Blob Blame History Raw
--- crash/unwind_x86_64.h.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/unwind_x86_64.h	2006-10-20 14:58:14.000000000 -0400
@@ -0,0 +1,92 @@
+#define CONFIG_64BIT 1
+#define NULL ((void *)0)
+
+typedef unsigned long size_t;
+typedef unsigned char u8;
+typedef signed short s16;
+typedef unsigned short u16;
+typedef signed int s32;
+typedef unsigned int u32;
+typedef unsigned long long u64;
+
+struct pt_regs {
+        unsigned long r15;
+        unsigned long r14;
+        unsigned long r13;
+        unsigned long r12;
+        unsigned long rbp;
+        unsigned long rbx;
+/* arguments: non interrupts/non tracing syscalls only save upto here*/
+        unsigned long r11;
+        unsigned long r10;
+        unsigned long r9;
+        unsigned long r8;
+        unsigned long rax;
+        unsigned long rcx;
+        unsigned long rdx;
+        unsigned long rsi;
+        unsigned long rdi;
+        unsigned long orig_rax;
+/* end of arguments */
+/* cpu exception frame or undefined */
+        unsigned long rip;
+        unsigned long cs;
+        unsigned long eflags;
+        unsigned long rsp;
+        unsigned long ss;
+/* top of stack page */
+};
+
+struct unwind_frame_info
+{
+        struct pt_regs regs;
+};
+
+extern int unwind(struct unwind_frame_info *);
+extern void init_unwind_table(void);
+extern void free_unwind_table(void);
+
+#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
+#define BUILD_BUG_ON_ZERO(e) (sizeof(char[1 - 2 * !!(e)]) - 1)
+#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
+#define get_unaligned(ptr) (*(ptr))
+//#define __get_user(x,ptr)  __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
+#define THREAD_ORDER 1
+#define THREAD_SIZE  (PAGE_SIZE << THREAD_ORDER)
+
+#define UNW_PC(frame)        (frame)->regs.rip
+#define UNW_SP(frame)        (frame)->regs.rsp
+#ifdef CONFIG_FRAME_POINTER
+	#define UNW_FP(frame)        (frame)->regs.rbp
+	#define FRAME_RETADDR_OFFSET 8
+	#define FRAME_LINK_OFFSET    0
+	#define STACK_BOTTOM(tsk)    (((tsk)->thread.rsp0 - 1) & ~(THREAD_SIZE - 1))
+	#define STACK_TOP(tsk)       ((tsk)->thread.rsp0)
+#endif
+
+
+#define EXTRA_INFO(f) { BUILD_BUG_ON_ZERO(offsetof(struct unwind_frame_info, f) % FIELD_SIZEOF(struct unwind_frame_info, f)) + offsetof(struct unwind_frame_info, f)/ FIELD_SIZEOF(struct unwind_frame_info, f), FIELD_SIZEOF(struct unwind_frame_info, f) }
+
+#define PTREGS_INFO(f) EXTRA_INFO(regs.f)
+
+#define UNW_REGISTER_INFO \
+	PTREGS_INFO(rax),\
+	PTREGS_INFO(rdx),\
+	PTREGS_INFO(rcx),\
+	PTREGS_INFO(rbx), \
+	PTREGS_INFO(rsi), \
+	PTREGS_INFO(rdi), \
+	PTREGS_INFO(rbp), \
+	PTREGS_INFO(rsp), \
+	PTREGS_INFO(r8), \
+	PTREGS_INFO(r9), \
+	PTREGS_INFO(r10),\
+	PTREGS_INFO(r11), \
+	PTREGS_INFO(r12), \
+	PTREGS_INFO(r13), \
+	PTREGS_INFO(r14), \
+	PTREGS_INFO(r15), \
+	PTREGS_INFO(rip)
+
--- crash/s390dbf.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/s390dbf.c	2006-08-14 13:58:56.000000000 -0400
@@ -0,0 +1,1340 @@
+/*
+ *    s390 debug feature command for crash
+ *
+ *    Copyright (C) IBM Corp. 2006
+ *    Author(s): Michael Holzheu <holzheu@de.ibm.com>
+ */
+
+#if defined(S390) || defined(S390X)
+
+#include "defs.h"
+#include <iconv.h>
+#include <ctype.h>
+
+/*
+ * Compat layer to integrate lcrash commands into crash
+ * Maps lcrash API to crash functions
+ */
+
+#define KL_NBPW sizeof(long)
+#define KL_ERRORFP stderr
+#define MAX_ARGS 128
+#define MAX_CMDLINE 256
+
+#define C_FALSE         0x00000001   /* Command takes no arguments */
+#define C_TRUE          0x00000002   /* Command requires arguments */
+#define C_ALL           0x00000004   /* All elements */
+#define C_PERM          0x00000008   /* Allocate perminant blocks */
+#define C_TEMP          0x00000000   /* For completeness */
+#define C_FULL          0x00000010   /* Full output */
+#define C_LIST          0x00000020   /* List items */
+#define C_NEXT          0x00000040   /* Follow links */
+#define C_WRITE         0x00000080   /* Write output to file */
+#define C_NO_OPCHECK    0x00000100   /* Don't reject bad cmd line options */
+#define C_ITER          0x00000200   /* set iteration threshold */
+
+#define C_LFLG_SHFT 12
+
+#define KL_ARCH_S390 0
+#define KL_ARCH_S390X 1
+#ifdef __s390x__
+#define KL_ARCH KL_ARCH_S390X
+#define FMTPTR "l"
+#define KL_PTRSZ 8
+#else
+#define KL_ARCH KL_ARCH_S390
+#define FMTPTR "ll"
+#define KL_PTRSZ 4
+#endif
+
+typedef unsigned long uaddr_t;
+typedef unsigned long kaddr_t;
+
+typedef struct _syment {
+	char *s_name;
+	kaddr_t s_addr;
+} syment_t;
+
+typedef struct option_s {
+	struct option_s	*op_next;
+	char		op_char;
+	char		*op_arg;
+} option_t;
+
+typedef struct command_s {
+	int		flags;
+	char		cmdstr[MAX_CMDLINE];
+	char		*command;
+	char		*cmdline;
+	option_t	*options;
+	int		nargs;
+	char		*args[MAX_ARGS];
+	char		*pipe_cmd;
+	FILE		*ofp;
+	FILE		*efp;
+} command_t;
+
+static inline syment_t* kl_lkup_symaddr(kaddr_t addr)
+{
+	static syment_t sym;
+	struct syment *crash_sym;
+
+	crash_sym = value_search(addr, &sym.s_addr);
+	if (!crash_sym)
+		return NULL;
+	sym.s_name = crash_sym->name;
+	return &sym;
+}
+
+static inline syment_t* kl_lkup_symname(char* name)
+{
+	static syment_t sym;
+	sym.s_addr = symbol_value(name);
+	sym.s_name = NULL;
+	if(!sym.s_addr)
+		return NULL;
+	else
+		return &sym;
+}
+
+static inline void GET_BLOCK(kaddr_t addr, int size, void* ptr)
+{
+	readmem(addr, KVADDR,ptr,size,"GET_BLOCK",FAULT_ON_ERROR);
+}
+
+static inline kaddr_t KL_VREAD_PTR(kaddr_t addr)
+{
+	unsigned long ptr;
+	readmem(addr, KVADDR,&ptr,sizeof(ptr),"GET_BLOCK",FAULT_ON_ERROR);
+	return (kaddr_t)ptr;
+}
+
+static inline uint32_t KL_GET_UINT32(void* ptr)
+{
+	return *((uint32_t*)ptr);
+}
+
+static inline uint64_t KL_GET_UINT64(void* ptr)
+{
+	return *((uint64_t*)ptr);
+}
+
+static inline kaddr_t KL_GET_PTR(void* ptr)
+{
+	return *((kaddr_t*)ptr);
+}
+
+static inline void* K_PTR(void* addr, char* struct_name, char* member_name)
+{
+	return addr+MEMBER_OFFSET(struct_name,member_name);
+}
+
+static inline uint32_t KL_UINT(void* ptr, char* struct_name, char* member_name)
+{
+	return (uint32_t) ULONG(ptr+MEMBER_OFFSET(struct_name,member_name));
+}
+
+static inline uint32_t KL_VREAD_UINT32(kaddr_t addr)
+{
+	uint32_t rc;
+	readmem(addr, KVADDR,&rc,sizeof(rc),"KL_VREAD_UINT32",FAULT_ON_ERROR);
+	return rc;
+}
+
+static inline uint32_t KL_INT(void* ptr, char* struct_name, char* member_name)
+{
+	return UINT(ptr+MEMBER_OFFSET(struct_name,member_name));
+}
+
+static inline int set_cmd_flags(command_t *cmd, int flags, char *extraops)
+{
+	return 0;
+}
+
+static inline void kl_s390tod_to_timeval(uint64_t todval, struct timeval *xtime)
+{
+	todval -= 0x8126d60e46000000LL - (0x3c26700LL * 1000000 * 4096);
+
+	todval >>= 12;
+	xtime->tv_sec  = todval / 1000000;
+	xtime->tv_usec = todval % 1000000;
+}
+
+static inline int kl_struct_len(char* struct_name)
+{
+	return STRUCT_SIZE(struct_name);
+}
+
+static inline kaddr_t kl_funcaddr(kaddr_t addr)
+{
+	struct syment *crash_sym;
+
+	crash_sym = value_search(addr, &addr);
+	if (!crash_sym)
+		return -1;
+	else
+		return crash_sym->value;
+}
+
+#define CMD_USAGE(cmd, s) \
+	fprintf(cmd->ofp, "Usage: %s %s\n", cmd->command, s); \
+	fprintf(cmd->ofp, "Enter \"help %s\" for details.\n",cmd->command);
+
+/*
+ * s390 debug feature implementation
+ */
+
+#ifdef DBF_DYNAMIC_VIEWS	/* views defined in shared libs */
+#include <dlfcn.h>
+#endif
+
+/* Local flags
+ */
+
+#define LOAD_FLAG (1 << C_LFLG_SHFT)
+#define VIEWS_FLAG (2 << C_LFLG_SHFT)
+
+#ifndef MIN
+#define MIN(a,b) (((a)<(b))?(a):(b))
+#endif
+
+/* Stuff which has to match with include/asm-s390/debug.h */
+
+#define DBF_VERSION_V1 1
+#define DBF_VERSION_V2 2
+#define PAGE_SIZE 4096
+#define DEBUG_MAX_VIEWS	    10 /* max number of views in proc fs */
+#define DEBUG_MAX_PROCF_LEN	16 /* max length for a proc file name */
+#define DEBUG_SPRINTF_MAX_ARGS 10
+
+/* define debug-structures for lcrash */
+#define DEBUG_DATA(entry) (char*)(entry + 1)
+
+typedef struct debug_view_s debug_view_t;
+
+/* struct to hold contents of struct __debug_entry from dump
+ */
+typedef struct debug_entry_s{
+	union {
+		struct {
+			unsigned long long clock:52;
+			unsigned long long exception:1;
+			unsigned long long level:3;
+			unsigned long long cpuid:8;
+		} fields;
+
+		unsigned long long stck;
+	} id;
+	kaddr_t caller; /* changed from void* to kaddr_t */
+} __attribute__((packed)) debug_entry_t;
+/* typedef struct __debug_entry debug_entry_t; */
+
+
+static unsigned int dbf_version;
+
+/* struct is used to manage contents of structs debug_info from dump
+ * in lcrash
+ */
+typedef struct debug_info_s {
+	struct debug_info_s *next;
+	struct debug_info_s *prev;
+	kaddr_t next_dbi;   /* store next ptr of struct in dump */
+	kaddr_t prev_dbi;   /* store prev ptr of struct in dump */
+	int level;
+	int nr_areas;
+	int page_order;
+	int buf_size;
+	int entry_size;
+	void **areas; /* contents of debug areas from dump */
+	int active_area;
+	int *active_entry; /* change to uint32_t ? */
+	debug_view_t *views[DEBUG_MAX_VIEWS];
+	char name[DEBUG_MAX_PROCF_LEN];
+	kaddr_t addr;
+	int pages_per_area_v2;
+	void ***areas_v2;
+} debug_info_t;
+
+
+/* functions to generate dbf output
+ */
+typedef int (debug_header_proc_t) (debug_info_t* id, debug_view_t* view,
+				   int area, debug_entry_t* entry,
+				   char* out_buf);
+typedef int (debug_format_proc_t) (debug_info_t* id, debug_view_t* view,
+				   char* out_buf, const char* in_buf);
+typedef int (debug_prolog_proc_t) (debug_info_t* id, debug_view_t* view,
+				   char* out_buf);
+
+struct debug_view_s {
+	char name[DEBUG_MAX_PROCF_LEN];
+	debug_prolog_proc_t* prolog_proc;
+	debug_header_proc_t* header_proc;
+	debug_format_proc_t* format_proc;
+	void*		private_data;
+};
+
+#define LCRASH_DB_VIEWS 1000
+
+static debug_info_t *debug_area_first = NULL;
+static debug_info_t *debug_area_last  = NULL;
+static debug_view_t *debug_views[LCRASH_DB_VIEWS];
+static int initialized = 0;
+static iconv_t ebcdic_ascii_conv = 0;
+
+void s390dbf_usage(command_t * cmd);
+static int add_lcrash_debug_view(debug_view_t *);
+static int dbe_size = 0;
+
+static void
+EBCASC(char *inout, size_t len)
+{
+	iconv(ebcdic_ascii_conv, &inout, &len, &inout, &len);
+}
+
+/*
+ * prints header for debug entry
+ */
+static int
+dflt_header_fn(debug_info_t * id, debug_view_t *view,
+	       int area, debug_entry_t * entry, char *out_buf)
+{
+	struct timeval time_val;
+	unsigned long long time;
+	char *except_str;
+	kaddr_t caller;
+	int rc = 0;
+	char *caller_name;
+	int offset;
+	char caller_buf[30];
+	unsigned int level;
+	syment_t *caller_sym;
+	debug_entry_t lentry; /* store byte swapped values of entry */
+
+	lentry.id.stck = KL_GET_UINT64(&entry->id);
+	lentry.caller = KL_GET_PTR(&entry->caller);
+	level = lentry.id.fields.level;
+	time = lentry.id.stck;
+
+	kl_s390tod_to_timeval(time, &time_val);
+
+	if (lentry.id.fields.exception)
+		except_str = "*";
+	else
+		except_str = "-";
+	caller = lentry.caller;
+	if(KL_ARCH == KL_ARCH_S390){
+		caller &= 0x7fffffff;
+	}
+	caller_sym = kl_lkup_symaddr(caller);
+	if(caller_sym){
+		caller_name = caller_sym->s_name;
+		offset = caller - kl_funcaddr(caller);
+	}
+	else {
+		sprintf(caller_buf, "%llx", (unsigned long long)caller);
+		caller_name = caller_buf;
+		offset = 0;
+	}
+
+	if(KL_ARCH == KL_ARCH_S390X){
+		rc += sprintf(out_buf, 
+			      "%02i %011lu:%06lu %1u %1s %02i <%20s+%04i>  ",
+			      area, time_val.tv_sec, time_val.tv_usec, level,
+			      except_str, entry->id.fields.cpuid, caller_name,
+			      offset);
+	} else {
+		rc += sprintf(out_buf,
+			      "%02i %011lu:%06lu %1u %1s %02i <%-20s+%04i>  ",
+			      area, time_val.tv_sec, time_val.tv_usec, level,
+			      except_str, lentry.id.fields.cpuid, caller_name,
+			      offset);
+	}
+	return rc;
+}
+
+/*
+ * prints debug header in raw format
+ */
+static int
+raw_header_fn(debug_info_t * id, debug_view_t *view,
+	      int area, debug_entry_t * entry, char *out_buf)
+{
+	int rc;
+
+	rc = sizeof(debug_entry_t);
+	if (out_buf == NULL)
+		goto out;
+	memcpy(out_buf,entry,sizeof(debug_entry_t));
+      out:
+	return rc;
+}
+
+/*
+ * prints debug data in raw format
+ */
+static int
+raw_format_fn(debug_info_t * id, debug_view_t *view,
+	      char *out_buf, const char *in_buf)
+{
+	int rc;
+
+	rc = id->buf_size;
+	if (out_buf == NULL || in_buf == NULL)
+		goto out;
+	memcpy(out_buf, in_buf, id->buf_size);
+      out:
+	return rc;
+}
+
+/*
+ * prints debug data in hex/ascii format
+ */
+static int
+hex_ascii_format_fn(debug_info_t * id, debug_view_t *view,
+		    char *out_buf, const char *in_buf)
+{
+	int i, rc = 0;
+
+	if (out_buf == NULL || in_buf == NULL) {
+		rc = id->buf_size * 4 + 3;
+		goto out;
+	}
+	for (i = 0; i < id->buf_size; i++) {
+		rc += sprintf(out_buf + rc, "%02x ",
+			      ((unsigned char *) in_buf)[i]);
+	}
+	rc += sprintf(out_buf + rc, "| ");
+	for (i = 0; i < id->buf_size; i++) {
+		unsigned char c = in_buf[i];
+		if (!isprint(c))
+			rc += sprintf(out_buf + rc, ".");
+		else
+			rc += sprintf(out_buf + rc, "%c", c);
+	}
+	rc += sprintf(out_buf + rc, "\n");
+      out:
+	return rc;
+}
+
+/*
+ * prints debug data in sprintf format
+ */
+static int
+sprintf_format_fn(debug_info_t * id, debug_view_t *view,
+		  char *out_buf, const char *in_buf)
+{
+#define _BUFSIZE 1024
+	char buf[_BUFSIZE];
+	int i, k, rc = 0, num_longs = 0, num_used_args = 0, num_strings = 0;
+	/* use kaddr_t to store long values of 32bit and 64bit archs here */
+	kaddr_t inbuf_cpy[DEBUG_SPRINTF_MAX_ARGS];
+	/* store ptrs to strings to be deallocated at end of this function */
+	uaddr_t to_dealloc[DEBUG_SPRINTF_MAX_ARGS];
+	kaddr_t addr;
+
+	memset(buf, 0, sizeof(buf));
+	memset(inbuf_cpy, 0, sizeof(inbuf_cpy));
+	memset(to_dealloc, 0, sizeof(to_dealloc));
+
+	if (out_buf == NULL || in_buf == NULL) {
+	      rc = id->buf_size * 4 + 3;
+	      goto out;
+	}
+
+	/* get the format string into buf */
+	addr = KL_GET_PTR((void*)in_buf);
+	GET_BLOCK(addr, _BUFSIZE, buf);
+
+	k = 0;
+	for (i = 0; buf[i] && (buf[i] != '\n'); i++) {
+		if (buf[i] != '%')
+			continue;
+		if (k == DEBUG_SPRINTF_MAX_ARGS) {
+			fprintf(KL_ERRORFP,
+				"\nToo much parameters in sprinf view (%i)\n"
+				,k + 1);
+			fprintf(KL_ERRORFP, "Format String: %s)\n", buf);
+			break;
+		}
+		/* for sprintf we have only unsigned long values ... */
+		if (buf[i+1] != 's'){
+			/* we use KL_GET_PTR here to read ulong value */
+			addr = KL_GET_PTR((void*) in_buf + ((k + 1)* KL_NBPW));
+			inbuf_cpy[k] = addr;
+		} else { /* ... or ptrs to strings in debug areas */
+			inbuf_cpy[k] = (uaddr_t) malloc(_BUFSIZE);
+			to_dealloc[num_strings++] = inbuf_cpy[k];
+			addr = KL_GET_PTR((void*) in_buf + ((k + 1)* KL_NBPW));
+			GET_BLOCK(addr, _BUFSIZE,
+				  (void*)(uaddr_t)(inbuf_cpy[k]));
+		}
+		k++;
+	}
+
+	/* count of longs fit into one entry */
+	num_longs = id->buf_size /  KL_NBPW; /* sizeof(long); */
+	if(num_longs < 1)	  /* bufsize of entry too small */
+		goto out;
+	if(num_longs == 1) {	  /* no args, just print the format string */
+		rc = sprintf(out_buf + rc, "%s", buf);
+		goto out;
+	}
+
+	/* number of arguments used for sprintf (without the format string) */
+	num_used_args = MIN(DEBUG_SPRINTF_MAX_ARGS, (num_longs - 1));
+
+	rc = sprintf(out_buf + rc, buf, (uaddr_t)(inbuf_cpy[0]),
+		     (uaddr_t)(inbuf_cpy[1]), (uaddr_t)(inbuf_cpy[2]),
+		     (uaddr_t)(inbuf_cpy[3]), (uaddr_t)(inbuf_cpy[4]),
+		     (uaddr_t)(inbuf_cpy[5]), (uaddr_t)(inbuf_cpy[6]),
+		     (uaddr_t)(inbuf_cpy[7]), (uaddr_t)(inbuf_cpy[8]),
+		     (uaddr_t)(inbuf_cpy[9]));
+ out:
+	while (num_strings--){
+		free((char*)(to_dealloc[num_strings]));
+	}
+	return rc;
+}
+
+
+/***********************************
+ * functions for debug-views
+ ***********************************/
+
+/*
+ * prints out actual debug level
+ */
+static int
+prolog_level_fn(debug_info_t * id,
+		debug_view_t *view, char *out_buf)
+{
+	int rc = 0;
+
+	if (out_buf == NULL) {
+		rc = 2;
+		goto out;
+	}
+	rc = sprintf(out_buf, "%i\n", id->level);
+      out:
+	return rc;
+}
+
+/*
+ * prints out actual pages_per_area
+ */
+static int
+prolog_pages_fn(debug_info_t * id,
+		debug_view_t *view, char *out_buf)
+{
+	int rc = 0;
+
+	if (out_buf == NULL) {
+		rc = 2;
+		goto out;
+	}
+	rc = sprintf(out_buf, "%i\n", id->pages_per_area_v2);
+      out:
+	return rc;
+}
+
+/*
+ * prints out prolog
+ */
+static int
+prolog_fn(debug_info_t * id,
+	  debug_view_t *view, char *out_buf)
+{
+	int rc = 0;
+
+	rc = sprintf(out_buf, "AREA TIME LEVEL EXCEPTION CP CALLING FUNCTION"
+		     "   + OFFSET  DATA\n==================================="
+		     "=======================================\n");
+	return rc;
+}
+
+/*
+ * prints debug data in hex format
+ */
+static int
+hex_format_fn(debug_info_t * id, debug_view_t *view,
+	      char *out_buf, const char *in_buf)
+{
+	int i, rc = 0;
+
+	for (i = 0; i < id->buf_size; i++) {
+		rc += sprintf(out_buf + rc, "%02x ",
+			      ((unsigned char *) in_buf)[i]);
+	}
+	rc += sprintf(out_buf + rc, "\n");
+	return rc;
+}
+
+/*
+ * prints debug data in ascii format
+ */
+static int
+ascii_format_fn(debug_info_t * id, debug_view_t *view,
+		char *out_buf, const char *in_buf)
+{
+	int i, rc = 0;
+
+	if (out_buf == NULL || in_buf == NULL) {
+		rc = id->buf_size + 1;
+		goto out;
+	}
+	for (i = 0; i < id->buf_size; i++) {
+		unsigned char c = in_buf[i];
+		if (!isprint(c))
+			rc += sprintf(out_buf + rc, ".");
+		else
+			rc += sprintf(out_buf + rc, "%c", c);
+	}
+	rc += sprintf(out_buf + rc, "\n");
+      out:
+	return rc;
+}
+
+/*
+ * prints debug data in ebcdic format
+ */
+static int
+ebcdic_format_fn(debug_info_t * id, debug_view_t *view,
+		 char *out_buf, const char *in_buf)
+{
+	int i, rc = 0;
+
+	if (out_buf == NULL || in_buf == NULL) {
+		rc = id->buf_size + 1;
+		goto out;
+	}
+	for (i = 0; i < id->buf_size; i++) {
+		char c = in_buf[i];
+		EBCASC(&c, 1);
+		if (!isprint(c))
+			rc += sprintf(out_buf + rc, ".");
+		else
+			rc += sprintf(out_buf + rc, "%c", c);
+	}
+	rc += sprintf(out_buf + rc, "\n");
+      out:
+	return rc;
+}
+
+debug_view_t ascii_view = {
+	"ascii",
+	&prolog_fn,
+	&dflt_header_fn,
+	&ascii_format_fn,
+};
+
+debug_view_t ebcdic_view = {
+	"ebcdic",
+	&prolog_fn,
+	&dflt_header_fn,
+	&ebcdic_format_fn,
+};
+
+debug_view_t hex_view = {
+	"hex",
+	&prolog_fn,
+	&dflt_header_fn,
+	&hex_format_fn,
+};
+
+debug_view_t level_view = {
+	"level",
+	&prolog_level_fn,
+	NULL,
+	NULL,
+};
+
+debug_view_t pages_view = {
+	"pages",
+	&prolog_pages_fn,
+	NULL,
+	NULL,
+};
+
+debug_view_t raw_view = {
+	"raw",
+	NULL,
+	&raw_header_fn,
+	&raw_format_fn,
+};
+
+debug_view_t hex_ascii_view = {
+	"hex_ascii",
+	&prolog_fn,
+	&dflt_header_fn,
+	&hex_ascii_format_fn,
+};
+
+debug_view_t sprintf_view = {
+	"sprintf",
+	&prolog_fn,
+	&dflt_header_fn,
+	&sprintf_format_fn,
+};
+
+
+static debug_entry_t *
+debug_find_oldest_entry(debug_entry_t *entries, int num, int entry_size)
+{
+	debug_entry_t *result, *current;
+	int i;
+	uint64_t clock1, clock2;
+
+	result = entries;
+	current = entries;
+	for (i=0; i < num; i++) {
+		if (current->id.stck == 0)
+			break;
+		clock1 = current->id.fields.clock;
+		clock2 = result->id.fields.clock;
+		clock1 = KL_GET_UINT64(&clock1);
+		clock2 = KL_GET_UINT64(&clock2);
+		if (clock1 < clock2)
+			result = current;
+		current = (debug_entry_t *) ((char *) current + entry_size);
+	}
+	return result;
+}
+
+
+/*
+ * debug_format_output:
+ * - calls prolog, header and format functions of view to format output
+ */
+static int
+debug_format_output_v1(debug_info_t * debug_area, debug_view_t *view, 
+			FILE * ofp)
+{
+	int i, j, len;
+	int nr_of_entries;
+	debug_entry_t *act_entry, *last_entry;
+	char *act_entry_data;
+	char buf[2048];
+
+	/* print prolog */
+	if (view->prolog_proc) {
+		len = view->prolog_proc(debug_area, view, buf);
+		fwrite(buf,len, 1, ofp);
+		memset(buf, 0, 2048);
+	}
+	/* print debug records */
+	if (!(view->format_proc) && !(view->header_proc))
+		goto out;
+	if(debug_area->entry_size <= 0){
+		fprintf(ofp, "Invalid entry_size: %i\n",debug_area->entry_size);
+		goto out;
+	}
+	nr_of_entries = (PAGE_SIZE << debug_area->page_order) / debug_area->entry_size;
+	for (i = 0; i < debug_area->nr_areas; i++) {
+		act_entry = debug_find_oldest_entry(debug_area->areas[i],
+						    nr_of_entries,
+						    debug_area->entry_size);
+		last_entry = (debug_entry_t *) ((char *) debug_area->areas[i] +
+			     (PAGE_SIZE << debug_area->page_order) -
+			     debug_area->entry_size);
+		for (j = 0; j < nr_of_entries; j++) {
+			act_entry_data = (char*)act_entry + dbe_size;
+			if (act_entry->id.stck == 0)
+				break;	/* empty entry */
+			if (view->header_proc) {
+				len = view->header_proc(debug_area, view, i,
+						  act_entry, buf);
+				fwrite(buf,len, 1, ofp);
+				memset(buf, 0, 2048);
+			}
+			if (view->format_proc) {
+				len = view->format_proc(debug_area, view,
+						  buf, act_entry_data);
+				fwrite(buf,len, 1, ofp);
+				memset(buf, 0, 2048); 
+			}
+			act_entry =
+			    (debug_entry_t *) (((char *) act_entry) +
+					       debug_area->entry_size);
+			if (act_entry > last_entry)
+				act_entry = debug_area->areas[i];
+		}
+	}
+      out:
+	return 1;
+}
+
+/*
+ * debug_format_output_v2:
+ * - calls prolog, header and format functions of view to format output
+ */
+static int
+debug_format_output_v2(debug_info_t * debug_area,
+		    debug_view_t *view, FILE * ofp)
+{
+	int i, j, k, len;
+	debug_entry_t *act_entry;
+	char *act_entry_data;
+	char buf[2048];
+
+	/* print prolog */
+	if (view->prolog_proc) {
+		len = view->prolog_proc(debug_area, view, buf);
+		fwrite(buf,len, 1, ofp);
+		memset(buf, 0, 2048);
+	}
+	/* print debug records */
+	if (!(view->format_proc) && !(view->header_proc))
+		goto out;
+	if(debug_area->entry_size <= 0){
+		fprintf(ofp, "Invalid entry_size: %i\n",debug_area->entry_size);
+		goto out;
+	}
+	for (i = 0; i < debug_area->nr_areas; i++) {
+		int nr_entries_per_page = PAGE_SIZE/debug_area->entry_size;
+		for (j = 0; j < debug_area->pages_per_area_v2; j++) {
+			act_entry = debug_area->areas_v2[i][j];
+			for (k = 0; k < nr_entries_per_page; k++) {
+				act_entry_data = (char*)act_entry + dbe_size;
+				if (act_entry->id.stck == 0)
+					break;	/* empty entry */
+				if (view->header_proc) {
+					len = view->header_proc(debug_area, 
+						view, i, act_entry, buf);
+					fwrite(buf,len, 1, ofp);
+					memset(buf, 0, 2048);
+				}
+				if (view->format_proc) {
+					len = view->format_proc(debug_area, 
+						view, buf, act_entry_data);
+					fwrite(buf,len, 1, ofp);
+					memset(buf, 0, 2048); 
+				}
+				act_entry = (debug_entry_t *) (((char *) 
+					act_entry) + debug_area->entry_size);
+			}
+		}
+	}
+out:
+	return 1;
+}
+
+static debug_info_t *
+find_debug_area(const char *area_name)
+{
+	debug_info_t* act_debug_info = debug_area_first;
+	while(act_debug_info != NULL){
+		if (strcmp(act_debug_info->name, area_name) == 0)
+				return act_debug_info;
+		act_debug_info = act_debug_info->next;
+	}
+	return NULL;
+}
+
+static void
+dbf_init(void)
+{
+	if (!initialized) {
+		if(dbf_version >= DBF_VERSION_V2)
+			add_lcrash_debug_view(&pages_view);
+		add_lcrash_debug_view(&ascii_view);
+		add_lcrash_debug_view(&level_view);
+		add_lcrash_debug_view(&ebcdic_view);
+		add_lcrash_debug_view(&hex_view);
+		add_lcrash_debug_view(&hex_ascii_view);
+		add_lcrash_debug_view(&sprintf_view);
+		add_lcrash_debug_view(&raw_view);
+		ebcdic_ascii_conv = iconv_open("ISO-8859-1", "EBCDIC-US");
+		initialized = 1;
+	}
+}
+
+static debug_view_t*
+get_debug_view(kaddr_t addr)
+{
+	void* k_debug_view;
+	int   k_debug_view_size;
+	debug_view_t* rc;
+
+	rc = (debug_view_t*)malloc(sizeof(debug_view_t));
+	memset(rc, 0, sizeof(debug_view_t));
+
+	k_debug_view_size = kl_struct_len("debug_view");
+	k_debug_view      = malloc(k_debug_view_size);
+	GET_BLOCK(addr, k_debug_view_size, k_debug_view);		
+	strncpy(rc->name,K_PTR(k_debug_view,"debug_view","name"),
+		DEBUG_MAX_PROCF_LEN);
+
+	free(k_debug_view);
+	return rc;
+}
+
+static void
+free_debug_view(debug_view_t* view)
+{
+	if(view) 
+		free(view);
+}
+
+static void
+debug_get_areas_v1(debug_info_t* db_info, void* k_dbi)
+{
+	kaddr_t mem_pos;
+	kaddr_t dbe_addr;
+	int area_size, i;
+
+       	/* get areas */
+	/* place to hold ptrs to debug areas in lcrash */
+	area_size = PAGE_SIZE << db_info->page_order;
+       	db_info->areas = (void**)malloc(db_info->nr_areas * sizeof(void *));
+	memset(db_info->areas, 0, db_info->nr_areas * sizeof(void *));
+       	mem_pos = (kaddr_t) KL_UINT(k_dbi,"debug_info","areas");
+       	for (i = 0; i < db_info->nr_areas; i++) {
+		dbe_addr = KL_VREAD_PTR(mem_pos);
+	       	db_info->areas[i] = (debug_entry_t *) malloc(area_size);
+		/* read raw data for debug area */
+	       	GET_BLOCK(dbe_addr, area_size, db_info->areas[i]);
+		mem_pos += KL_NBPW;
+	}
+}
+
+static void
+debug_get_areas_v2(debug_info_t* db_info, void* k_dbi)
+{
+	kaddr_t area_ptr;
+	kaddr_t page_array_ptr;
+	kaddr_t page_ptr;
+	int i,j;
+       	db_info->areas_v2=(void***)malloc(db_info->nr_areas * sizeof(void **));
+       	area_ptr = (kaddr_t) KL_UINT(k_dbi,"debug_info","areas");
+       	for (i = 0; i < db_info->nr_areas; i++) {
+		db_info->areas_v2[i] = (void**)malloc(db_info->pages_per_area_v2
+							* sizeof(void*));
+		page_array_ptr = KL_VREAD_PTR(area_ptr);
+		for(j=0; j < db_info->pages_per_area_v2; j++) {
+			page_ptr = KL_VREAD_PTR(page_array_ptr);
+			db_info->areas_v2[i][j] = (void*)malloc(PAGE_SIZE);
+			/* read raw data for debug area */
+	       		GET_BLOCK(page_ptr, PAGE_SIZE, db_info->areas_v2[i][j]);
+			page_array_ptr += KL_NBPW;
+		}
+		area_ptr += KL_NBPW;
+	}
+}
+
+static debug_info_t*
+get_debug_info(kaddr_t addr,int get_areas)
+{
+	void *k_dbi;
+	kaddr_t mem_pos;
+	kaddr_t view_addr;
+	debug_info_t* db_info;
+	int i;
+	int dbi_size;
+
+	/* get sizes of kernel structures */
+	if(!(dbi_size = kl_struct_len("debug_info"))){
+		fprintf (KL_ERRORFP,
+			 "Could not determine sizeof(struct debug_info)\n");
+		return(NULL);
+	}
+	if(!(dbe_size = kl_struct_len("__debug_entry"))){
+		fprintf(KL_ERRORFP,
+			"Could not determine sizeof(struct __debug_entry)\n");
+		return(NULL);
+	}
+
+	/* get kernel debug_info structure */
+	k_dbi = malloc(dbi_size);
+	GET_BLOCK(addr, dbi_size, k_dbi);
+
+	db_info = (debug_info_t*)malloc(sizeof(debug_info_t));
+	memset(db_info, 0, sizeof(debug_info_t));
+
+	/* copy members */
+	db_info->level	    = KL_INT(k_dbi,"debug_info","level");
+	db_info->nr_areas	 = KL_INT(k_dbi,"debug_info","nr_areas");
+	db_info->pages_per_area_v2= KL_INT(k_dbi,"debug_info","pages_per_area");
+	db_info->page_order       = KL_INT(k_dbi,"debug_info","page_order");
+	db_info->buf_size	 = KL_INT(k_dbi,"debug_info","buf_size");
+	db_info->entry_size       = KL_INT(k_dbi,"debug_info","entry_size");
+	db_info->next_dbi	 = KL_UINT(k_dbi,"debug_info","next");
+	db_info->prev_dbi	 = KL_UINT(k_dbi,"debug_info","prev");
+	db_info->addr	     = addr;
+	strncpy(db_info->name,K_PTR(k_dbi,"debug_info","name"),
+		DEBUG_MAX_PROCF_LEN);
+
+
+	if(get_areas){
+		if(dbf_version == DBF_VERSION_V1)
+			debug_get_areas_v1(db_info,k_dbi);
+		else
+			debug_get_areas_v2(db_info,k_dbi);
+	} else {
+		db_info->areas = NULL;
+	}
+
+	/* get views */
+	mem_pos = (uaddr_t) K_PTR(k_dbi,"debug_info","views");
+	memset(&db_info->views, 0, DEBUG_MAX_VIEWS * sizeof(void*));
+	for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
+		view_addr = KL_GET_PTR((void*)(uaddr_t)mem_pos);
+		if(view_addr == 0){
+			break;
+		} else {
+			db_info->views[i] = get_debug_view(view_addr);
+		}
+		mem_pos += KL_NBPW;
+	}
+	free(k_dbi);
+	return db_info;
+}
+
+static void
+free_debug_info_v1(debug_info_t * db_info)
+{
+	int i;
+	if(db_info->areas){
+		for (i = 0; i < db_info->nr_areas; i++) {
+			free(db_info->areas[i]);
+		}
+	}
+	for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
+		free_debug_view(db_info->views[i]);
+	}
+	free(db_info->areas);
+	free(db_info);
+}
+
+static void
+free_debug_info_v2(debug_info_t * db_info)
+{
+	int i,j;
+	if(db_info->areas) {
+		for (i = 0; i < db_info->nr_areas; i++) {
+			for(j = 0; j < db_info->pages_per_area_v2; j++) {
+				free(db_info->areas_v2[i][j]);
+			}
+			free(db_info->areas[i]);
+		}
+		free(db_info->areas);
+		db_info->areas = NULL;
+	}
+	for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
+		free_debug_view(db_info->views[i]);
+	}
+	free(db_info);
+}
+
+static int
+get_debug_areas(void)
+{
+	kaddr_t act_debug_area;
+	syment_t *debug_sym;
+	debug_info_t *act_debug_area_cpy;
+
+	if(!(debug_sym = kl_lkup_symname("debug_area_first"))){
+		printf("Did not find debug_areas");
+		return -1;
+	}
+	act_debug_area = KL_VREAD_PTR(debug_sym->s_addr);
+	while(act_debug_area != 0){
+		act_debug_area_cpy = get_debug_info(act_debug_area,0);
+		act_debug_area     = act_debug_area_cpy->next_dbi;
+	 	if(debug_area_first == NULL){
+			debug_area_first = act_debug_area_cpy;
+		} else {
+			debug_area_last->next = act_debug_area_cpy;
+		}
+		debug_area_last = act_debug_area_cpy;
+	}
+	return 0;
+}
+
+static void
+free_debug_areas(void)
+{
+	debug_info_t* next;
+	debug_info_t* act_debug_info = debug_area_first;
+
+	while(act_debug_info != NULL){
+		next = act_debug_info->next;
+		if(dbf_version == DBF_VERSION_V1)
+			free_debug_info_v1(act_debug_info);
+		else
+			free_debug_info_v2(act_debug_info);
+		act_debug_info = next;
+	}
+
+	debug_area_first = NULL;
+	debug_area_last  = NULL;
+}
+
+static debug_view_t *
+find_lcrash_debug_view(const char *name)
+{
+	int i;
+	for (i = 0; (i < LCRASH_DB_VIEWS) && (debug_views[i] != NULL); i++) {
+		if (strcmp(debug_views[i]->name, name) == 0)
+			return debug_views[i];
+	}
+	return NULL;
+}
+
+static void
+print_lcrash_debug_views(FILE * ofp)
+{
+	int i;
+	fprintf(ofp, "REGISTERED VIEWS\n");
+	fprintf(ofp, "=====================\n");
+	for (i = 0; i < LCRASH_DB_VIEWS; i++) {
+		if (debug_views[i] == NULL) {
+			return;
+		}
+		fprintf(ofp, " - %s\n", debug_views[i]->name);
+	}
+}
+
+static int
+add_lcrash_debug_view(debug_view_t *view)
+{
+	int i;
+	for (i = 0; i < LCRASH_DB_VIEWS; i++) {
+		if (debug_views[i] == NULL) {
+			debug_views[i] = view;
+			return 0;
+		}
+		if (strcmp(debug_views[i]->name, view->name) == 0)
+			return -1;
+	}
+	return -1;
+}
+
+static int
+list_one_view(char *area_name, char *view_name, command_t * cmd)
+{
+	debug_info_t *db_info;
+	debug_view_t *db_view;
+
+	if ((db_info = find_debug_area(area_name)) == NULL) {
+		fprintf(cmd->efp, "Debug log '%s' not found!\n", area_name);
+		return -1;
+	}
+
+	db_info = get_debug_info(db_info->addr,1);
+
+	if ((db_view = find_lcrash_debug_view(view_name)) == NULL) {
+		fprintf(cmd->efp, "View '%s' not registered!\n", view_name);
+		return -1;
+	}
+	if(dbf_version == DBF_VERSION_V1){
+		debug_format_output_v1(db_info, db_view, cmd->ofp);
+		free_debug_info_v1(db_info);
+	} else {
+		debug_format_output_v2(db_info, db_view, cmd->ofp);
+		free_debug_info_v2(db_info);
+	}
+	return 0;
+}
+
+static int
+list_areas(FILE * ofp)
+{
+	debug_info_t* act_debug_info = debug_area_first;
+	fprintf(ofp, "Debug Logs:\n");
+	fprintf(ofp, "==================\n");
+	while(act_debug_info != NULL){
+		fprintf(ofp, " - %s\n", act_debug_info->name);
+		act_debug_info = act_debug_info->next;
+	}
+	return 0;
+}
+
+static int
+list_one_area(const char *area_name, command_t * cmd)
+{
+	debug_info_t *db_info;
+	int i;
+	if ((db_info = find_debug_area(area_name)) == NULL) {
+		fprintf(cmd->efp, "Debug log '%s' not found!\n", area_name);
+		return -1;
+	}
+	fprintf(cmd->ofp, "INSTALLED VIEWS FOR '%s':\n", area_name);
+	fprintf(cmd->ofp, "================================================"
+		"==============================\n");
+	for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
+		if (db_info->views[i] != NULL) {
+			fprintf(cmd->ofp, " - %s ", db_info->views[i]->name);
+			if (find_lcrash_debug_view(db_info->views[i]->name))
+				fprintf(cmd->ofp, "(available)\n");
+			else
+				fprintf(cmd->ofp, "(not available)\n");
+		}
+	}
+	fprintf(cmd->ofp, "================================================="
+		"=============================\n");
+	return 0;
+}
+
+#ifdef DBF_DYNAMIC_VIEWS
+static int
+load_debug_view(const char *path, command_t * cmd)
+{
+	void *library;
+	const char *error;
+	debug_view_t *(*view_init_func) (void);
+
+	library = dlopen(path, RTLD_LAZY);
+	if (library == NULL) {
+		fprintf(cmd->efp, "Could not open %s: %s\n", path, dlerror());
+		return (1);
+	}
+
+	dlerror();
+
+	view_init_func = dlsym(library, "debug_view_init");
+	error = dlerror();
+
+	if (error) {
+		fprintf(stderr, "could not find debug_view_init(): %s\n",
+			error);
+		exit(1);
+	}
+
+	add_lcrash_debug_view((*view_init_func) ());
+
+	fprintf(cmd->ofp, "view %s loaded\n", path);
+	fflush(stdout);
+	return 0;
+}
+#endif
+
+/* 
+ * s390dbf_cmd() -- Run the 's390dbf' command.
+ */
+static int
+s390dbf_cmd(command_t * cmd)
+{
+	syment_t *dbf_version_sym;
+	int rc = 0;
+
+	/* check version */
+ 
+	if(!(dbf_version_sym = kl_lkup_symname("debug_feature_version"))){
+		fprintf(KL_ERRORFP,
+			"Could not determine debug_feature_version\n");
+		return -1;
+	}
+
+	dbf_version = KL_VREAD_UINT32(dbf_version_sym->s_addr);
+
+	if ((dbf_version != DBF_VERSION_V1) && (dbf_version != DBF_VERSION_V2)){
+		fprintf(cmd->efp,"lcrash does not support the"
+			" debug feature version of the dump kernel:\n");
+		fprintf(cmd->efp,"DUMP: %i SUPPORTED: %i and %i\n",
+			dbf_version, DBF_VERSION_V1, DBF_VERSION_V2);
+		return -1;
+	}
+
+	dbf_init();
+
+	if (cmd->flags & C_ALL) {
+		return (0);
+	}
+#ifdef DBF_DYNAMIC_VIEWS
+	if (cmd->flags & LOAD_FLAG) {
+		printf("loading: %s\n", cmd->args[0]);
+		return (load_debug_view(cmd->args[0], cmd));
+	}
+#endif
+	if (cmd->flags & VIEWS_FLAG) {
+		print_lcrash_debug_views(cmd->ofp);
+		return (0);
+	}
+	if (cmd->nargs > 2) {
+		s390dbf_usage(cmd);
+		return (1);
+	}
+
+	if(get_debug_areas() == -1) 
+		return -1;
+
+	switch (cmd->nargs) {
+	case 0:
+		rc = list_areas(cmd->ofp);
+		break;
+	case 1:
+		rc = list_one_area(cmd->args[0], cmd);
+		break;
+	case 2:
+		rc = list_one_view(cmd->args[0], cmd->args[1], cmd);
+		break;	
+	}
+
+	free_debug_areas();
+
+	return rc;
+}
+
+#define _S390DBF_USAGE " [-v] [debug log] [debug view]"
+
+/*
+ * s390dbf_usage() -- Print the usage string for the 's390dbf' command.
+ */
+void
+s390dbf_usage(command_t * cmd)
+{
+	CMD_USAGE(cmd, _S390DBF_USAGE);
+}
+
+/*
+ * s390 debug feature command for crash
+ */
+
+char *help_s390dbf[] = {
+	"s390dbf",
+	"s390dbf prints out debug feature logs",
+	"[-v] [debug_log] [debug_log view]",
+	"",
+	"Display Debug logs:",
+	" + If called without parameters, all active debug logs are listed.",
+	" + If called with '-v', all debug views which are available to",
+	"   'crash' are listed",
+	" + If called with the name of a debug log, all debug-views for which",
+	"   the debug-log has registered are listed. It is possible thatsome",
+	"   of the debug views are not available to 'crash'.",
+	" + If called with the name of a debug-log and an available viewname,",
+	"   the specified view is printed.",
+	NULL
+};
+
+void cmd_s390dbf()
+{
+	int i,c;
+
+	command_t cmd = {
+		.ofp = stdout,
+		.efp = stderr,
+		.cmdstr = "s390dbf",
+		.command = "s390dbf",
+	};
+
+	cmd.nargs=argcnt - 1;
+	for (i=1; i < argcnt; i++)
+		cmd.args[i-1] = args[i];
+	
+	while ((c = getopt(argcnt, args, "v")) != EOF) {
+		switch(c) {
+		case 'v':
+			cmd.flags |= VIEWS_FLAG;
+			break;
+		default:
+			s390dbf_usage(&cmd);
+			return;
+		}
+	}
+	s390dbf_cmd(&cmd);
+}
+
+#endif
+
--- crash/vas_crash.h.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/vas_crash.h	2006-10-11 09:14:36.000000000 -0400
@@ -1,8 +1,8 @@
 /* vas_crash.h - kernel crash dump file format (on swap)
  *
  * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc.
- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson
- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006 David Anderson
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006 Red Hat, Inc. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -19,7 +19,7 @@
  */
 
 #include <sys/types.h>
-#include <asm/page.h>
+//#include <asm/page.h>
 
 void save_core(void);
 
--- crash/global_data.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/global_data.c	2006-11-21 11:32:04.000000000 -0500
@@ -68,7 +68,7 @@
  *  To add a new command, declare it in defs.h and enter it in this table.
  */
 
-struct command_table_entry base_command_table[] = {
+struct command_table_entry linux_command_table[] = {
 	{"*", 	    cmd_pointer, help_pointer, 0},
 	{"alias",   cmd_alias,   help_alias,   0},
         {"ascii",   cmd_ascii,   help_ascii,   0},
@@ -117,6 +117,9 @@
 	{"waitq",   cmd_waitq,   help_waitq,   REFRESH_TASK_TABLE},
 	{"whatis",  cmd_whatis,  help_whatis,  0},
 	{"wr",      cmd_wr,      help_wr,      0},
+#if defined(S390) || defined(S390X)
+        {"s390dbf", cmd_s390dbf, help_s390dbf, 0},
+#endif
 	{(char *)NULL}
 };
 
--- crash/crash.8.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/crash.8	2008-04-16 10:21:08.000000000 -0400
@@ -5,7 +5,7 @@
 .TH CRASH 8
 .SH NAME
 crash \- Analyze Linux crash data or a live system
-.SH SYNAPSIS
+.SH SYNOPSIS
 .B crash
 [
 .B -h
@@ -42,9 +42,13 @@
 is a tool for interactively analyzing the state of the Linux system
 while it is running, or after a kernel crash has occurred and a 
 core dump has been created by the Red Hat 
-.I netdump
-facility.  It is loosely based on the SVR4 UNIX crash 
-command, but has been signficantly enhanced
+.I netdump,
+.I diskdump,
+.I kdump,
+or
+.I xendump
+facilities.  It is loosely based on the SVR4 UNIX crash 
+command, but has been significantly enhanced
 by completely merging it with the 
 .I gdb
 debugger. The marriage of the two effectively combines the 
@@ -207,15 +211,15 @@
 .I dis
 disassembles memory, either entire kernel functions, from a
 location for a specified number of instructions, or from the start of a
-fuction up to a specified memory location.
+function up to a specified memory location.
 .TP
 .I eval
 evalues an expression or numeric type and displays the result
-in hexidecimal, decimal, octal and binary.
+in hexadecimal, decimal, octal and binary.
 .TP
 .I exit
 causes
-.I crash
+.B crash
 to exit.
 .TP
 .I extend
@@ -230,7 +234,7 @@
 in the system.
 .TP
 .I fuser
-displays the tasks using the specifed file or socket.
+displays the tasks using the specified file or socket.
 .TP
 .I gdb
 passes its argument to the underlying
@@ -274,7 +278,7 @@
 display various network related data.
 .TP
 .I p
-passes its argumnts to the
+passes its arguments to the
 .I gdb
 "print" command for evaluation and display.
 .TP
@@ -358,14 +362,89 @@
 .I whatis
 displays the definition of structures, unions, typedefs or
 text/data symbols.
+.TP
 .I wr
 modifies the contents of memory.  When writing to memory on
 a live system, this command should obviously be used with great care.
+.SH FILES
+.TP
+.I .crashrc
+Initialization commands.  The file can be located in the user's
+.B HOME 
+directory and/or the current directory.  Commands found in the
+.I .crashrc
+file in the 
+.B HOME
+directory are executed before those in the current directory's 
+.I .crashrc
+file.
+.SH ENVIRONMENT
+.TP
+.B EDITOR
+Command input is read using
+.BR readline(3).
+If
+.B EDITOR
+is set to
+.I emacs
+or
+.I vi
+then suitable keybindings are used.  If 
+.B EDITOR
+is not set, then
+.I vi
+is used.  This can be overridden by
+.B set vi
+or 
+.B set emacs
+commands located in a
+.IR .crashrc 
+file, or by entering
+.B -e emacs
+on the
+.B crash
+command line.
+.TP
+.B CRASHPAGER
+If
+.B CRASHPAGER
+is set, its value is used as the name of the program to which command output will be sent. 
+If not, then command output is sent to 
+.B /usr/bin/less -E -X 
+by default.
+.SH NOTES
+.PP
+If
+.B crash
+does not work, look for a newer version: kernel evolution frequently makes
+.B crash
+updates necessary.
+.PP
+The command
+.B set scroll off
+will cause output to be sent directly to
+the terminal rather than through a paging program.  This is useful,
+for example, if you are running
+.B crash
+in a window of
+.BR emacs .
 .SH AUTHOR
 Dave Anderson <anderson@redhat.com> wrote
-.B Crash
+.B crash
 .TP
 Jay Fenlason <fenlason@redhat.com> wrote this man page.
 .SH "SEE ALSO"
-netdump(8)
-gdb(1)
+.PP
+The
+.I help
+command within
+.B crash
+provides more complete and accurate documentation than this man page.
+.PP
+.I http://people.redhat.com/anderson
+- the home page of the
+.B crash
+utility.
+.PP
+.BR netdump (8),
+.BR gdb (1)
--- crash/va_server.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/va_server.c	2006-10-11 09:14:36.000000000 -0400
@@ -1,8 +1,8 @@
 /* va_server.c - kernel crash dump file translation library
  *
  * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc.
- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson
- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006 David Anderson
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006 Red Hat, Inc. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -57,13 +57,15 @@
 
 extern int monitor_memory(long *, long *, long *, long *);
 
-int Page_Size = PAGE_SIZE;  /* temporary setting until disk header is read */
+int Page_Size;  
 ulong vas_debug = 0;
 
 extern void *malloc(size_t);
 
 int va_server_init(char *crash_file, u_long *start, u_long *end, u_long *stride)
 {
+	Page_Size = getpagesize();  /* temporary setting until disk header is read */
+
 	if(read_map(crash_file)) {
 		if(va_server_init_v1(crash_file, start, end, stride))
 			return -1;
--- crash/symbols.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/symbols.c	2009-01-29 15:33:13.000000000 -0500
@@ -1,8 +1,8 @@
 /* symbols.c - core analysis suite
  *
  * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc.
- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson
- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 David Anderson
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 Red Hat, Inc. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -21,6 +21,8 @@
 
 static void store_symbols(bfd *, int, void *, long, unsigned int);
 static void store_sysmap_symbols(void);
+static ulong relocate(ulong, char *, int);
+static int relocate_force(ulong, char *);
 static void strip_module_symbol_end(char *s);
 static int compare_syms(const void *, const void *);
 static int compare_mods(const void *, const void *);
@@ -36,7 +38,9 @@
 static int load_module_index(struct syment *);
 static void section_header_info(bfd *, asection *, void *);
 static void store_section_data(struct load_module *, bfd *, asection *);
-static void calculate_load_order(struct load_module *, bfd *);
+static void calculate_load_order_v1(struct load_module *, bfd *);
+static void calculate_load_order_v2(struct load_module *, bfd *, int,
+        void *, long, unsigned int);
 static void check_insmod_builtin(struct load_module *, int, ulong *);
 static int is_insmod_builtin(struct load_module *, struct syment *);
 struct load_module;
@@ -45,6 +49,7 @@
 static long rodata_search(ulong *, ulong);
 static int ascii_long(ulong word);
 static int is_bfd_format(char *); 
+static int is_binary_stripped(char *);
 static int namespace_ctl(int, struct namespace *, void *, void *);
 static void symval_hash_init(void);
 static struct syment *symval_hash_search(ulong);
@@ -61,12 +66,16 @@
 struct elf_common;
 static void Elf32_Sym_to_common(Elf32_Sym *, struct elf_common *); 
 static void Elf64_Sym_to_common(Elf64_Sym *, struct elf_common *); 
+static void cmd_datatype_common(ulong);
+static int display_per_cpu_info(struct syment *);
 
 
 #define KERNEL_SECTIONS  (void *)(1)
 #define MODULE_SECTIONS  (void *)(2) 
 #define VERIFY_SECTIONS  (void *)(3)
 
+#define EV_DWARFEXTRACT  101010101
+
 #define PARSE_FOR_DATA        (1)
 #define PARSE_FOR_DECLARATION (2)
 static void parse_for_member(struct datatype_member *, ulong);
@@ -96,6 +105,7 @@
 #define SHOW_OFFSET    (0x10000)
 #define IN_UNION       (0x20000)
 #define IN_STRUCT      (0x40000)
+#define DATATYPE_QUERY (0x80000)
 
 #define INTEGER_TYPE    (UINT8|INT8|UINT16|INT16|UINT32|INT32|UINT64|INT64)
 
@@ -110,6 +120,7 @@
 static void dump_datatype_member(FILE *, struct datatype_member *);
 static void dump_datatype_flags(ulong, FILE *);
 static void dump_enumerator_list(char *);
+static long anon_member_offset(char *, char *);
 static int gdb_whatis(char *);
 static void do_datatype_declaration(struct datatype_member *, ulong);
 
@@ -139,6 +150,12 @@
   	if (!bfd_check_format_matches(st->bfd, bfd_object, &matching))
 		error(FATAL, "cannot determine object file format: %s\n",
 			pc->namelist);
+	/*
+	 *  Check whether the namelist is a kerntypes file built by
+	 *  dwarfextract, which places a magic number in e_version.
+	 */
+	if (file_elf_version(pc->namelist) == EV_DWARFEXTRACT)
+		pc->flags |= KERNTYPES;
 
 	if (pc->flags & SYSMAP) {
 		bfd_map_over_sections(st->bfd, section_header_info, 
@@ -153,13 +170,16 @@
 		}
 		store_sysmap_symbols();
 		return;
-	} 
+	} else if (LKCD_KERNTYPES())
+		error(FATAL, "%s: use of kerntypes requires a system map\n",
+			pc->namelist);
 
 	/*
 	 *  Pull a bait-and-switch on st->bfd if we've got a separate
-         *  .gnu_debuglink file that matches the CRC.
+         *  .gnu_debuglink file that matches the CRC. Not done for kerntypes.
 	 */
-	if (!(bfd_get_file_flags(st->bfd) & HAS_SYMS)) {
+	if (!(LKCD_KERNTYPES()) &&
+	    !(bfd_get_file_flags(st->bfd) & HAS_SYMS)) {
 		if (!check_gnu_debuglink(st->bfd))
 			no_debugging_data(FATAL);
 	}
@@ -471,6 +491,11 @@
         kt->stext_init = (ulong)bfd_get_section_vma(st->bfd, section);
         kt->etext_init = kt->stext_init +
         	(ulong)bfd_section_size(st->bfd, section);
+
+	if (kt->relocate) {
+		kt->stext_init -= kt->relocate;
+		kt->etext_init -= kt->relocate;
+	}
 }
 
 /*
@@ -486,6 +511,7 @@
   	bfd_byte *from, *fromend;
         symbol_info syminfo;
 	struct syment *sp;
+	int first;
 
   	if ((store = bfd_make_empty_symbol(abfd)) == NULL)
 		error(FATAL, "bfd_make_empty_symbol() failed\n");
@@ -505,6 +531,13 @@
 	st->symcnt = 0;
 	sp = st->symtable;
 
+	if (machine_type("X86")) {
+		if (!(kt->flags & RELOC_SET))
+			kt->flags |= RELOC_FORCE;
+	} else
+		kt->flags &= ~RELOC_SET;
+
+	first = 0;
   	from = (bfd_byte *) minisyms;
   	fromend = from + symcount * size;
   	for (; from < fromend; from += size)
@@ -516,7 +549,11 @@
       		bfd_get_symbol_info(abfd, sym, &syminfo);
 		if (machdep->verify_symbol(syminfo.name, syminfo.value, 
 		    syminfo.type)) {
-			sp->value = syminfo.value;
+			if (kt->flags & (RELOC_SET|RELOC_FORCE))
+				sp->value = relocate(syminfo.value,
+					(char *)syminfo.name, !(first++));
+			else
+				sp->value = syminfo.value;
 			sp->type = syminfo.type;
 			namespace_ctl(NAMESPACE_INSTALL, &st->namespace,
 				sp, (char *)syminfo.name); 
@@ -540,7 +577,7 @@
 static void
 store_sysmap_symbols(void)
 {
-	int c;
+	int c, first;
 	long symcount;
 	char buf[BUFSIZE];
 	FILE *map;
@@ -564,6 +601,10 @@
                 error(FATAL, "symbol table namespace malloc: %s\n",
                         strerror(errno));
 
+	if (!machine_type("X86"))
+		kt->flags &= ~RELOC_SET;
+
+	first = 0;
         st->syment_size = symcount * sizeof(struct syment);
         st->symcnt = 0;
         sp = st->symtable;
@@ -580,7 +621,11 @@
 
                 if (machdep->verify_symbol(syment.name, syment.value, 
 		    syment.type)) {
-                        sp->value = syment.value;
+			if (kt->flags & RELOC_SET)
+				sp->value = relocate(syment.value,
+					syment.name, !(first++));
+			else
+				sp->value = syment.value;
                         sp->type = syment.type;
                         namespace_ctl(NAMESPACE_INSTALL, &st->namespace,
                                 sp, syment.name);
@@ -603,6 +648,96 @@
 }
 
 /*
+ *  Handle x86 kernels configured such that the vmlinux symbols
+ *  are not as loaded into the kernel (not unity-mapped).
+ */
+static ulong
+relocate(ulong symval, char *symname, int first_symbol)
+{
+	switch (kt->flags & (RELOC_SET|RELOC_FORCE))
+	{
+	case RELOC_SET: 
+		break;
+
+	case RELOC_FORCE:
+		if (first_symbol && !relocate_force(symval, symname))
+			kt->flags &= ~RELOC_FORCE;
+		break;
+	}
+
+	return (symval - kt->relocate);
+}
+
+/*
+ *  If no --reloc argument was passed, try to figure it out
+ *  by comparing the first vmlinux kernel symbol with the
+ *  first /proc/kallsyms symbol.  (should be "_text")
+ *
+ *  Live system only (at least for now).
+ */
+static int
+relocate_force(ulong symval, char *symname)
+{
+        FILE *kp;
+	char buf[BUFSIZE];
+        char *kallsyms[MAXARGS];
+	ulong first;
+
+	if (!ACTIVE() || !file_exists("/proc/kallsyms", NULL)) {
+		if (CRASHDEBUG(1))
+			fprintf(fp, 
+			    "cannot determine relocation value: %s\n",
+				!ACTIVE() ? "not a live system" : 
+				"/proc/kallsyms does not exist");
+		return FALSE;
+	}
+
+ 	if ((kp = fopen("/proc/kallsyms", "r")) == NULL) {
+		if (CRASHDEBUG(1))
+                	fprintf(fp, 
+			    "cannot open /proc/kallsyms to determine relocation\n");
+                return FALSE;
+        }
+
+	if (!fgets(buf, BUFSIZE, kp) ||
+	    (parse_line(buf, kallsyms) != 3) ||
+	    !hexadecimal(kallsyms[0], 0)) {
+		fclose(kp);
+		if (CRASHDEBUG(1))
+			fprintf(fp, 
+			    "malformed /proc/kallsyms: cannot determine relocation value\n");
+		return FALSE;
+	}
+	fclose(kp);
+
+	first = htol(kallsyms[0], RETURN_ON_ERROR, NULL);
+
+	if (CRASHDEBUG(1))
+		fprintf(fp, 
+		    "RELOCATE: %s @ %lx %s\n"
+		    "          %s @ %lx /proc/kallsyms\n",
+			symname, symval, pc->namelist,
+			kallsyms[2], first);
+
+	/*
+	 *  If the symbols match and have different values,
+	 *  force the relocation.
+	 */
+	if (STREQ(symname, kallsyms[2])) {
+		if (symval > first) {
+			kt->relocate = symval - first;
+			return TRUE;
+		}
+	}
+
+	if (CRASHDEBUG(1))
+		fprintf(fp, 
+		    "cannot determine relocation value from first symbol\n");
+
+	return FALSE;
+}
+
+/*
  *  Install all static kernel symbol values into the symval_hash.
  */
 static void
@@ -1142,8 +1277,15 @@
 		gpl_syms = ULONG(modbuf + OFFSET(module_gpl_syms));
                 nsyms = UINT(modbuf + OFFSET(module_num_syms));
                 ngplsyms = UINT(modbuf + OFFSET(module_num_gpl_syms));
-                nksyms = ULONG(modbuf + OFFSET(module_num_symtab));
-		size = ULONG(modbuf + OFFSET(module_core_size));
+
+		if (THIS_KERNEL_VERSION >= LINUX(2,6,27)) {
+			nksyms = UINT(modbuf + OFFSET(module_num_symtab));
+			size = UINT(modbuf + OFFSET(module_core_size));
+		} else {
+			nksyms = ULONG(modbuf + OFFSET(module_num_symtab));
+			size = ULONG(modbuf + OFFSET(module_core_size));
+		}
+
 		mod_name = modbuf + OFFSET(module_name);
 
 		lm = &st->load_modules[m++];
@@ -1159,15 +1301,19 @@
 				mod_name);
                 	strncpy(lm->mod_name, mod_name, MAX_MOD_NAME-1);
 		}
-		if (CRASHDEBUG(1))
+		if (CRASHDEBUG(3))
 			fprintf(fp, 
 			    "%lx (%lx): %s syms: %d gplsyms: %d ksyms: %ld\n", 
 				mod, lm->mod_base, lm->mod_name, nsyms, 
 				ngplsyms, nksyms);
 		lm->mod_flags = MOD_EXT_SYMS;
 		lm->mod_ext_symcnt = mcnt;
-		lm->mod_etext_guess = lm->mod_base + 
-			ULONG(modbuf + OFFSET(module_core_text_size));
+		if (THIS_KERNEL_VERSION >= LINUX(2,6,27))
+			lm->mod_etext_guess = lm->mod_base +
+				UINT(modbuf + OFFSET(module_core_text_size));
+		else
+			lm->mod_etext_guess = lm->mod_base +
+				ULONG(modbuf + OFFSET(module_core_text_size));
 		lm->mod_text_start = lm->mod_base;
 
 		st->ext_module_symtable[mcnt].value = lm->mod_base;
@@ -1178,6 +1324,14 @@
 		lm_mcnt = mcnt;
 		mcnt++;
 
+		if (nsyms && !IN_MODULE(syms, lm)) {
+			error(WARNING, 
+			    "[%s] module.syms outside of module "
+			    "address space (%lx)\n\n",
+				lm->mod_name, syms);
+			nsyms = 0;
+		}
+
 		if (nsyms) {
 			modsymbuf = GETBUF(sizeof(struct kernel_symbol)*nsyms);
 			readmem((ulong)syms, KVADDR, modsymbuf,
@@ -1597,7 +1751,10 @@
                 return 0;
         }
 
-	nksyms = ULONG(modbuf + OFFSET(module_num_symtab));
+	if (THIS_KERNEL_VERSION >= LINUX(2,6,27))
+		nksyms = UINT(modbuf + OFFSET(module_num_symtab));
+	else
+		nksyms = ULONG(modbuf + OFFSET(module_num_symtab));
 	ksymtab = ULONG(modbuf + OFFSET(module_symtab));
 	locsymtab = module_buf + (ksymtab - lm->mod_base);
 	kstrtab = ULONG(modbuf + OFFSET(module_strtab));
@@ -1623,6 +1780,19 @@
 		if (ec->st_shndx == SHN_UNDEF)
                         continue;
 
+		if (!IN_MODULE(kstrtab + ec->st_name, lm)) {
+			if (CRASHDEBUG(3)) {
+				error(WARNING, 
+				   "%s: bad st_name index: %lx -> %lx\n        "
+				   " st_value: %lx st_shndx: %ld st_info: %c\n",
+					lm->mod_name,
+					ec->st_name, (kstrtab + ec->st_name),
+					ec->st_value, ec->st_shndx, 
+					ec->st_info);
+			}
+			continue;
+		}
+
 		nameptr = locstrtab + ec->st_name;
 		if (*nameptr == '\0')
 			continue;
@@ -2121,22 +2291,13 @@
                 fprintf(fp, "%sFORCE_DEBUGINFO", others++ ? "|" : "");
         if (st->flags & CRC_MATCHES)
                 fprintf(fp, "%sCRC_MATCHES", others++ ? "|" : "");
+        if (st->flags & ADD_SYMBOL_FILE)
+                fprintf(fp, "%sADD_SYMBOL_FILE", others++ ? "|" : "");
+        if (st->flags & USE_OLD_ADD_SYM)
+                fprintf(fp, "%sUSE_OLD_ADD_SYM", others++ ? "|" : "");
         fprintf(fp, ")\n");
 
 	fprintf(fp, "                 bfd: %lx\n", (ulong)st->bfd);
-
-	sec = (asection **)st->sections;
-	fprintf(fp, "            sections: %s\n", sec ? "" : "(not in use)");
-	for (i = 0; sec && (i < st->bfd->section_count); i++, sec++) {
-		asection *section;
-
-		section = *sec;
-		fprintf(fp, "%25s  vma: %.*lx  size: %ld\n", 
-			section->name, VADDR_PRLEN,
-			(ulong)bfd_get_section_vma(st->bfd, section),
-			(ulong)bfd_section_size(st->bfd, section));
-	}
-
 	fprintf(fp, "            symtable: %lx\n", (ulong)st->symtable);
 	fprintf(fp, "              symend: %lx\n", (ulong)st->symend);
 	fprintf(fp, "              symcnt: %ld\n", st->symcnt);
@@ -2320,6 +2481,24 @@
 			}
                 }
 	}
+
+	fprintf(fp, "\n");
+	fprintf(fp, "dwarf_eh_frame_file_offset: %llx\n", 
+		(unsigned long long)st->dwarf_eh_frame_file_offset);
+        fprintf(fp, "       dwarf_eh_frame_size: %ld\n", st->dwarf_eh_frame_size);
+	fprintf(fp, "\n");
+
+	sec = (asection **)st->sections;
+	fprintf(fp, "            sections: %s\n", sec ? "" : "(not in use)");
+	for (i = 0; sec && (i < st->bfd->section_count); i++, sec++) {
+		asection *section;
+
+		section = *sec;
+		fprintf(fp, "%25s  vma: %.*lx  size: %ld\n", 
+			section->name, VADDR_PRLEN,
+			(ulong)bfd_get_section_vma(st->bfd, section),
+			(ulong)bfd_section_size(st->bfd, section));
+	}
 }
 
 
@@ -2354,6 +2533,106 @@
 }
 
 /*
+ *  Verify a vmlinux file, issuing a warning for processor and endianness
+ *  mismatches.
+ */
+int
+is_kernel(char *file)
+{
+	int fd, swap;
+	char eheader[BUFSIZE];
+	Elf32_Ehdr *elf32;
+	Elf64_Ehdr *elf64;
+
+	if ((fd = open(file, O_RDONLY)) < 0) {
+		error(INFO, "%s: %s\n", file, strerror(errno));
+		return FALSE;
+	}
+	if (read(fd, eheader, BUFSIZE) != BUFSIZE) {
+                /* error(INFO, "%s: %s\n", file, strerror(errno)); */
+		close(fd);
+		return FALSE;
+	}  
+	close(fd);
+
+	if (!STRNEQ(eheader, ELFMAG) || eheader[EI_VERSION] != EV_CURRENT)
+		return FALSE;
+
+	elf32 = (Elf32_Ehdr *)&eheader[0];
+	elf64 = (Elf64_Ehdr *)&eheader[0];
+
+	swap = (((eheader[EI_DATA] == ELFDATA2LSB) && 
+	     (__BYTE_ORDER == __BIG_ENDIAN)) ||
+	    ((eheader[EI_DATA] == ELFDATA2MSB) && 
+	     (__BYTE_ORDER == __LITTLE_ENDIAN)));
+
+        if ((elf32->e_ident[EI_CLASS] == ELFCLASS32) &&
+	    (swap16(elf32->e_type, swap) == ET_EXEC) &&
+	    (swap32(elf32->e_version, swap) == EV_CURRENT)) {
+		switch (swap16(elf32->e_machine, swap))
+		{
+		case EM_386:
+			if (machine_type_mismatch(file, "X86", NULL, 0))
+				goto bailout;
+			break;
+
+		case EM_S390:
+			if (machine_type_mismatch(file, "S390", NULL, 0))
+				goto bailout;
+			break;
+
+		default:
+			if (machine_type_mismatch(file, "(unknown)", NULL, 0))
+				goto bailout;
+		}
+
+		if (endian_mismatch(file, elf32->e_ident[EI_DATA], 0))
+			goto bailout;
+
+	} else if ((elf64->e_ident[EI_CLASS] == ELFCLASS64) &&
+	    (swap16(elf64->e_type, swap) == ET_EXEC) &&
+	    (swap32(elf64->e_version, swap) == EV_CURRENT)) {
+		switch (swap16(elf64->e_machine, swap))
+		{
+		case EM_IA_64:
+			if (machine_type_mismatch(file, "IA64", NULL, 0))
+				goto bailout;
+			break;
+
+		case EM_PPC64:
+			if (machine_type_mismatch(file, "PPC64", NULL, 0))
+				goto bailout;
+			break;
+
+		case EM_X86_64:
+			if (machine_type_mismatch(file, "X86_64", NULL, 0)) 
+				goto bailout;
+			break;
+
+		case EM_386:
+			if (machine_type_mismatch(file, "X86", NULL, 0))
+				goto bailout;
+			break;
+
+		case EM_S390:
+			if (machine_type_mismatch(file, "S390X", NULL, 0))
+				goto bailout;
+			break;
+
+		default:
+			if (machine_type_mismatch(file, "(unknown)", NULL, 0))
+				goto bailout;
+		}
+
+		if (endian_mismatch(file, elf64->e_ident[EI_DATA], 0))
+			goto bailout;
+	}
+
+bailout:
+	return(is_bfd_format(file));
+}
+
+/*
  *  Given a choice between two namelists, pick the one for gdb to use.
  *  For now, just check get their stats and check their sizes; the larger 
  *  one presumably has debug data.
@@ -2427,7 +2706,7 @@
 			goto not_system_map;
 		if (parse_line(buf, mapitems) != 3)
                         goto not_system_map;
-		if ((strlen(mapitems[0]) != MAX_HEXADDR_STRLEN) ||
+		if ((strlen(mapitems[0]) > MAX_HEXADDR_STRLEN) ||
 		    !hexadecimal(mapitems[0], 0) || (strlen(mapitems[1]) > 1))
 			goto not_system_map;
 	}
@@ -2468,6 +2747,33 @@
         return TRUE;
 }
 
+static int
+is_binary_stripped(char *filename)
+{
+#if defined(GDB_6_0) || defined(GDB_6_1)
+        struct bfd *bfd;
+#else
+        struct _bfd *bfd;
+#endif
+	int number_of_symbols;
+
+	if ((bfd = bfd_openr(filename, NULL)) == NULL) {
+		error(INFO, "cannot open ELF file: %s\n", filename);
+		return FALSE;
+	}
+
+	if (!bfd_check_format(bfd, bfd_object)) {
+		error(INFO, "invalid ELF file: %s\n", filename);
+		bfd_close(bfd);
+		return FALSE;
+	}
+
+	number_of_symbols = bfd_canonicalize_symtab(bfd, NULL);
+
+	bfd_close(bfd);
+	
+	return (number_of_symbols == 0);
+}
 
 /*
  *  This command may be used to:
@@ -3463,6 +3769,22 @@
 }
 
 /*
+ *  Same as above, but allow for failure.
+ */
+int
+try_get_symbol_data(char *symbol, long size, void *local)
+{
+        struct syment *sp;
+
+        if ((sp = symbol_search(symbol)) &&
+            readmem(sp->value, KVADDR, local,
+            size, symbol, RETURN_ON_ERROR|QUIET))
+			return TRUE;
+
+	return FALSE;
+}
+
+/*
  *  Return the value of a given symbol.
  */
 ulong
@@ -3477,6 +3799,34 @@
 }
 
 /*
+ *  Return the value of a symbol from a specific module.
+ */
+ulong
+symbol_value_module(char *symbol, char *module)
+{
+	int i;
+	struct syment *sp, *sp_end;
+	struct load_module *lm;
+
+	for (i = 0; i < st->mods_installed; i++) {
+		lm = &st->load_modules[i];
+
+		if (!STREQ(module, lm->mod_name))
+			continue;
+
+		sp = lm->mod_symtable;
+		sp_end = lm->mod_symend;
+
+		for ( ; sp < sp_end; sp++) {
+			if (STREQ(symbol, sp->name))
+				return(sp->value);
+		}
+	}
+
+	return 0;
+}
+
+/*
  *  Return the symbol name of a given value, with no allowance for offsets.
  *  Returns NULL on failure to allow for testing of a value.
  */
@@ -3608,6 +3958,8 @@
  *   #define STRUCT_EXISTS(X)    (datatype_info((X), NULL, NULL) >= 0)
  *   #define MEMBER_EXISTS(X,Y)  (datatype_info((X), (Y), NULL) >= 0)
  *   #define MEMBER_SIZE(X,Y)    datatype_info((X), (Y), MEMBER_SIZE_REQUEST)
+ *   #define MEMBER_TYPE(X,Y)    datatype_info((X), (Y), MEMBER_TYPE_REQUEST)
+ *   #define ANON_MEMBER_OFFSET(X,Y)    datatype_info((X), (Y), ANON_MEMBER_OFFSET_REQUEST)
  *
  *  to determine structure or union sizes, or member offsets.
  */
@@ -3620,6 +3972,9 @@
         ulong type_found;
 	char buf[BUFSIZE];
 
+        if (dm == ANON_MEMBER_OFFSET_REQUEST)
+		return anon_member_offset(name, member);
+
 	strcpy(buf, name);
 
 	req = (struct gnu_request *)GETBUF(sizeof(struct gnu_request));
@@ -3743,11 +4098,12 @@
 
 	FREEBUF(req);
 
-        if (dm && (dm != MEMBER_SIZE_REQUEST)) {
+        if (dm && (dm != MEMBER_SIZE_REQUEST) && (dm != MEMBER_TYPE_REQUEST)) {
                 dm->type = type_found;
                 dm->size = size;
 		dm->member_size = member_size;
 		dm->member_typecode = member_typecode;
+		dm->member_offset = offset;
 		if (req->is_typedef) {
 			dm->flags |= TYPEDEF;
 		}
@@ -3762,13 +4118,42 @@
 
 	if (dm == MEMBER_SIZE_REQUEST)
 		return member_size;
-        else if (member) 
+	else if (dm == MEMBER_TYPE_REQUEST)
+		return member_typecode;
+        else if (member)
 		return offset;
 	else
                 return size;
 }
 
 /*
+ *  Determine the offset of a member in an anonymous union
+ *  in a structure.
+ */
+static long
+anon_member_offset(char *name, char *member)
+{
+	int c;
+	char buf[BUFSIZE];
+	char *arglist[MAXARGS];
+	ulong value;
+
+	value = -1;
+	sprintf(buf, "print &((struct %s *)0x0)->%s", name, member);
+
+	open_tmpfile();
+	if (gdb_pass_through(buf, fp, GNU_RETURN_ON_ERROR)) {
+		rewind(pc->tmpfile);
+		if (fgets(buf, BUFSIZE, pc->tmpfile) &&
+	    	    (c = parse_line(strip_linefeeds(buf), arglist)))
+			value = stol(arglist[c-1], RETURN_ON_ERROR|QUIET, NULL);
+	}
+	close_tmpfile();
+
+	return value;
+}
+
+/*
  *  Get the basic type info for a symbol.  Let the caller pass in the 
  *  gnu_request structure to have access to the full response; in either
  *  case, return the type code.  The member field can be used for structures
@@ -3906,276 +4291,31 @@
                 restore_radix = output_radix;
                 output_radix = radix;
 		output_format = (output_radix == 10) ? 0 : 'x';
-        }
-
-        print_union(s, addr);
-
-        if (radix) {
-                output_radix = restore_radix;
-		output_format = (output_radix == 10) ? 0 : 'x';
-	}
-}
-
-/*
- * This command displays either a structure definition, or a formatted display
- * of the contents of a structure at a specified address.  If no address is
- * specified, the structure size and the file in which the structure is defined
- * are also displayed.  A structure member may be appended to the structure 
- * name (in a "struct.member" format) in order to limit the scope of the data
- * displayed to that particular member.  Structure data is shown in hexadecimal
- * format.  The raw data in a structure may be dumped with the -r flag.
- */
-void
-cmd_struct(void)
-{
-	int c;
-	ulong addr, aflag;
-	struct syment *sp;
-	int rawdata;
-	long len;
-	ulong flags;
-	ulong list_head_offset;
-	int count;
-        struct datatype_member struct_member, *sm;
-
-        sm = &struct_member;
-	count = 1;
-	rawdata = 0;
-	aflag = 0;
-	list_head_offset = 0;
-	flags = STRUCT_REQUEST;
-
-        while ((c = getopt(argcnt, args, "c:rvol:")) != EOF) {
-                switch(c)
-		{
-		case 'c':
-			count = atoi(optarg);
-			break;
-
-		case 'r':
-			rawdata = 1;
-			break;
-
-		case 'v':
-			flags |= STRUCT_VERBOSE;
-			break;
-
-		case 'o':
-			flags |= SHOW_OFFSET;
-			break;
-
-		case 'l':
-                        if (IS_A_NUMBER(optarg))
-                                list_head_offset = stol(optarg,
-                                        FAULT_ON_ERROR, NULL);
-                        else if (arg_to_datatype(optarg,
-                                sm, RETURN_ON_ERROR) > 1)
-                                list_head_offset = sm->member_offset;
-			break;
-
-		default:
-			argerrs++;
-			break;
-		}
-	}
-
-	if (argerrs || !args[optind])
-		cmd_usage(pc->curcmd, SYNOPSIS);
-
-	if ((arg_to_datatype(args[optind++], sm, FAULT_ON_ERROR) > 1) && 
-	    rawdata)
-        	error(FATAL, "member-specific output not allowed with -r\n");
-
-	if ((len = sm->size) < 0) {
-		error(INFO, "structure not found: %s\n", sm->name);
-		cmd_usage(pc->curcmd, SYNOPSIS); 
-	}
-	
-	if (!args[optind]) {
-		do_datatype_declaration(sm, flags | (sm->flags & TYPEDEF));
-		return;
-	}
-
-	while (args[optind]) {
-		if (clean_arg() && IS_A_NUMBER(args[optind])) { 
-			if (aflag) 
-				count = stol(args[optind], 
-					FAULT_ON_ERROR, NULL);
-			else {
-				if (!IS_KVADDR(addr = htol(args[optind], 
-                                    FAULT_ON_ERROR, NULL)))
-					error(FATAL, 
-					"invalid kernel virtual address: %s\n",
-						args[optind]);
-				aflag++;
-			}
-		}
-	        else if ((sp = symbol_search(args[optind]))) {
-	                addr = sp->value;
-			aflag++;
-	        } else {
-			fprintf(fp, "symbol not found: %s\n", args[optind]);
-	                fprintf(fp, "possible aternatives:\n");
-	                if (!symbol_query(args[optind], "  ", NULL))
-	                   	fprintf(fp, "  (none found)\n");
-			return;
-		}
-		optind++;
-	}
-
-	if (!aflag)
-		error(FATAL, "no kernel virtual address argument entered\n");
-
-	if (list_head_offset)
-		addr -= list_head_offset;
-
-	if (count < 0) {
-		addr -= len * abs(count);
-		addr += len;
-	}
-
-	for (c =  0; c < abs(count); c++, addr += len) {
-		if (rawdata) 
-			raw_data_dump(addr, len, flags & STRUCT_VERBOSE);
-		else {
-			if (sm->member) 
-				open_tmpfile();
-
-			print_struct(sm->name, addr);
-
-			if (sm->member) {
-				parse_for_member(sm, PARSE_FOR_DATA);
-				close_tmpfile();
-			}
-		}
-	}
-}
-
-/*
- *  After determining what type of data type follows the *, this routine
- *  has the identical functionality as cmd_struct() or cmd_union().
- */
-void 
-cmd_pointer(void)
-{
-	int c;
-	ulong addr, aflag;
-	struct syment *sp;
-	int rawdata;
-	long len;
-	ulong flags;
-	int count;
-        struct datatype_member datatype_member, *dm;
-
-        dm = &datatype_member;
-	rawdata = 0;
-	flags = 0;
-	aflag = 0;
-	count = 1;
-
-        while ((c = getopt(argcnt, args, "c:rvo")) != EOF) {
-                switch(c)
-		{
-                case 'c':
-                        count = atoi(optarg);
-                        break;
-
-		case 'r':
-			rawdata = 1;
-			break;
-
-		case 'v':
-			flags |= STRUCT_VERBOSE;
-			break;
-
-		case 'o':
-			flags |= SHOW_OFFSET;
-			break;
-
-		default:
-			argerrs++;
-			break;
-		}
-	}
-
-	if (argerrs || !args[optind])
-		cmd_usage(pc->curcmd, SYNOPSIS);
-
-	if ((arg_to_datatype(args[optind++], dm, FAULT_ON_ERROR) > 1) && 
-	     rawdata)
-        	error(FATAL, "member-specific output not allowed with -r\n");
-
-	if ((len = dm->size) < 0) {
-		error(INFO, "structure or union not found: %s\n", dm->name);
-		cmd_usage(pc->curcmd, SYNOPSIS);
-	}
-
-	flags |= dm->type;
-
-	if (!args[optind]) {
-		do_datatype_declaration(dm, flags | (dm->flags & TYPEDEF));
-                return;
-	}
-
-	while (args[optind]) {
-		if (clean_arg() && IS_A_NUMBER(args[optind])) { 
-                        if (aflag)
-                                count = stol(args[optind],
-                                        FAULT_ON_ERROR, NULL);
-                        else {
-                                if (!IS_KVADDR(addr = htol(args[optind],
-                                    FAULT_ON_ERROR, NULL)))
-                                        error(FATAL,
-                                        "invalid kernel virtual address: %s\n",
-                                                args[optind]);
-                                aflag++;
-                        }
-		}
-	        else if ((sp = symbol_search(args[optind]))) {
-	                addr = sp->value;
-			aflag++;
-	        } else {
-			fprintf(fp, "symbol not found: %s\n", args[optind]);
-	                fprintf(fp, "possible aternatives:\n");
-	                if (!symbol_query(args[optind], "  ", NULL))
-	                   	fprintf(fp, "  (none found)\n");
-			return;
-		}
-		optind++;
-	}
-
-	if (!(flags & (UNION_REQUEST|STRUCT_REQUEST)))
-		error(FATAL, "invalid argument!");
+        }
 
-        if (!aflag) 
-                error(FATAL, "no kernel virtual address argument entered\n");
+        print_union(s, addr);
 
-	if (count < 0) {
-		addr -= len * abs(count);
-		addr += len;
+        if (radix) {
+                output_radix = restore_radix;
+		output_format = (output_radix == 10) ? 0 : 'x';
 	}
-
-        for (c =  0; c < abs(count); c++, addr += len) {
-                if (rawdata)
-                        raw_data_dump(addr, len, flags & STRUCT_VERBOSE);
-                else {
-                        if (dm->member)
-                                open_tmpfile();
-
-        		if (flags & UNION_REQUEST)
-                		print_union(dm->name, addr);
-        		else if (flags & STRUCT_REQUEST)
-                		print_struct(dm->name, addr);
-
-                        if (dm->member) {
-                                parse_for_member(dm, PARSE_FOR_DATA);
-                                close_tmpfile();
-                        }
-                }
-        }
 }
 
 /*
+ * This command displays either a structure definition, or a formatted display
+ * of the contents of a structure at a specified address.  If no address is
+ * specified, the structure size and the file in which the structure is defined
+ * are also displayed.  A structure member may be appended to the structure 
+ * name (in a "struct.member" format) in order to limit the scope of the data
+ * displayed to that particular member.  Structure data is shown in hexadecimal
+ * format.  The raw data in a structure may be dumped with the -r flag.
+ */
+void
+cmd_struct(void)
+{
+	cmd_datatype_common(STRUCT_REQUEST);
+}
+/*
  * This command displays either a union definition, or a formatted display
  * of the contents of a union at a specified address.  If no address is
  * specified, the union size and the file in which the union is defined
@@ -4187,25 +4327,45 @@
 void
 cmd_union(void)
 {
-	int c;
+	cmd_datatype_common(UNION_REQUEST);
+}
+
+/*
+ *  After determining what type of data type follows the *, this routine
+ *  has the identical functionality as cmd_struct() or cmd_union().
+ */
+void
+cmd_pointer(void)
+{
+	cmd_datatype_common(0);
+}
+
+static void 
+cmd_datatype_common(ulong flags)
+{
+	int i, c;
 	ulong addr, aflag;
 	struct syment *sp;
 	int rawdata;
 	long len;
-	ulong flags;
-	int count;
-        struct datatype_member union_member, *um;
 	ulong list_head_offset;
+	int count;
+	int argc_members;
+	int optind_save;
+        struct datatype_member datatype_member, *dm;
+        char *separator;
+        char *structname, *members;
+        char *memberlist[MAXARGS];
 
-        um = &union_member;
-	count = 1;
+        dm = &datatype_member;
+	count = 0xdeadbeef;
 	rawdata = 0;
 	aflag = 0;
-	list_head_offset = 0;
-	flags = UNION_REQUEST;
+        list_head_offset = 0;
+        argc_members = 0;
 
-        while ((c = getopt(argcnt, args, "c:rvol:")) != EOF) {
-                switch(c)
+        while ((c = getopt(argcnt, args, "fuc:rvol:")) != EOF) {
+                switch (c)
 		{
 		case 'c':
 			count = atoi(optarg);
@@ -4223,14 +4383,28 @@
 			flags |= SHOW_OFFSET;
 			break;
 
-                case 'l':
+		case 'l':
                         if (IS_A_NUMBER(optarg))
                                 list_head_offset = stol(optarg,
                                         FAULT_ON_ERROR, NULL);
                         else if (arg_to_datatype(optarg,
-                                um, RETURN_ON_ERROR) > 1)
-                                list_head_offset = um->member_offset;
-                        break;
+                                dm, RETURN_ON_ERROR) > 1)
+                                list_head_offset = dm->member_offset;
+			else
+				error(FATAL, "invalid -l option: %s\n", 
+					optarg);
+			break;
+
+		case 'f':
+			if (!pc->dumpfile)
+				error(FATAL,
+				   	"-f option requires a dumpfile\n");
+			pc->curcmd_flags |= MEMTYPE_FILEADDR;
+			break;
+
+		case 'u':
+			pc->curcmd_flags |= MEMTYPE_UVADDR;
+			break;
 
 		default:
 			argerrs++;
@@ -4241,75 +4415,177 @@
 	if (argerrs || !args[optind])
 		cmd_usage(pc->curcmd, SYNOPSIS);
 
-	if ((arg_to_datatype(args[optind++], um, FAULT_ON_ERROR) > 1) && 
-	     rawdata)
-        	error(FATAL, "member-specific output not allowed with -r\n");
+        if ((count_chars(args[optind], ',')+1) > MAXARGS)
+                error(FATAL, "too many members in comma-separated list!\n");
 
-	if ((len = um->size) < 0)  {
-		error(INFO, "union not found: %s\n", um->name);
-		cmd_usage(pc->curcmd, SYNOPSIS);
-	}
-	
-	if (!args[optind]) {
-		do_datatype_declaration(um, flags | (um->flags & TYPEDEF));
-                return;
-	}
+	if ((count_chars(args[optind], '.') > 1) ||
+	    (LASTCHAR(args[optind]) == ',') ||
+	    (LASTCHAR(args[optind]) == '.'))
+		error(FATAL, "invalid format: %s\n", args[optind]);
+
+	optind_save = optind;
+
+        /*
+         *  Take care of address and count (array).
+         */
+	while (args[++optind]) {
+		if (aflag && (count != 0xdeadbeef))
+			error(FATAL, "too many arguments!\n");
 
-	while (args[optind]) {
 		if (clean_arg() && IS_A_NUMBER(args[optind])) { 
-                        if (aflag)
-                                count = stol(args[optind],
-                                        FAULT_ON_ERROR, NULL);
-                        else {
-                                if (!IS_KVADDR(addr = htol(args[optind],
+			if (aflag) 
+				count = stol(args[optind], 
+					FAULT_ON_ERROR, NULL);
+			else {
+				if (pc->curcmd_flags & MEMTYPE_FILEADDR)
+					pc->curcmd_private = stoll(args[optind], 
+						FAULT_ON_ERROR, NULL);
+				else if (pc->curcmd_flags & MEMTYPE_UVADDR) {
+					addr = htol(args[optind], FAULT_ON_ERROR,
+						NULL);
+				} else if (!IS_KVADDR(addr = htol(args[optind], 
                                     FAULT_ON_ERROR, NULL)))
-                                        error(FATAL,
-                                        "invalid kernel virtual address: %s\n",
-                                                args[optind]);
-                                aflag++;
-                        }
-		}
-	        else if ((sp = symbol_search(args[optind]))) {
+					error(FATAL, 
+					"invalid kernel virtual address: %s\n",
+						args[optind]);
+				aflag++;
+			}
+		} else if ((sp = symbol_search(args[optind]))) {
 	                addr = sp->value;
 			aflag++;
-		} else {
+	        } else {
 			fprintf(fp, "symbol not found: %s\n", args[optind]);
 	                fprintf(fp, "possible aternatives:\n");
 	                if (!symbol_query(args[optind], "  ", NULL))
 	                   	fprintf(fp, "  (none found)\n");
-			return;
+			goto freebuf;
 		}
-		optind++;
 	}
 
-        if (!aflag) 
-                error(FATAL, "no kernel virtual address argument entered\n");
+	optind = optind_save;
+
+	if (count == 0xdeadbeef)
+		count = 1;
+	else if (!aflag)
+		error(FATAL, "no kernel virtual address argument entered\n");
+
+	if ((flags & SHOW_OFFSET) && aflag) {
+		error(INFO, "-o option not valid with an address argument\n");
+		flags &= ~SHOW_OFFSET;
+	}
 
 	if (list_head_offset)
 		addr -= list_head_offset;
 
+	/*
+	 *  Handle struct.member[,member] argument format.
+	 */
+	if (strstr(args[optind], ".")) {
+                structname = GETBUF(strlen(args[optind])+1);
+                strcpy(structname, args[optind]);
+		separator = strstr(structname, ".");
+
+                members = GETBUF(strlen(args[optind])+1);
+                strcpy(members, separator+1);
+                replace_string(members, ",", ' ');
+                argc_members = parse_line(members, memberlist);
+        } else
+                structname = args[optind];
+
+	if ((arg_to_datatype(structname, dm, DATATYPE_QUERY|RETURN_ON_ERROR) < 1))
+		error(FATAL, "invalid data structure reference: %s\n", structname);
+
+        if ((argc_members > 1) && !aflag) {
+                error(INFO, flags & SHOW_OFFSET ? 
+		    "-o option not valid with multiple member format\n" :
+		    "multiple member format not supported in this syntax\n");
+		*separator = NULLCHAR;
+		argc_members = 0;
+		flags |= SHOW_OFFSET;
+	}
+
+	len = dm->size;
+
 	if (count < 0) {
 		addr -= len * abs(count);
 		addr += len;
 	}
 
-	for (c = 0; c < abs(count); c++, addr += len) {
-		if (rawdata) 
-			raw_data_dump(addr, len, flags & STRUCT_VERBOSE);
-		else {
-			if (um->member)
-				open_tmpfile();
-
-			print_union(um->name, addr);
-
-			if (um->member) {
-				parse_for_member(um, PARSE_FOR_DATA);
-				close_tmpfile();
+	if (pc->curcmd_flags & MEMTYPE_FILEADDR)
+		addr = 0;  /* unused, but parsed by gdb */
+
+       	for (c = 0; c < abs(count); c++, addr += len, pc->curcmd_private += len) {
+		if (c) 
+			fprintf(fp,"\n");
+
+		i = 0;
+        	do {
+                	if (argc_members) {
+                        	*separator = '.';
+                        	strcpy(separator+1, memberlist[i]);
 			}
-		}
+
+			switch (arg_to_datatype(structname, dm, RETURN_ON_ERROR))
+			{
+			case 0: error(FATAL, "invalid data structure reference: %s\n", 
+					structname);
+				break;
+			case 1: break;
+			case 2: if (rawdata)
+        				error(FATAL, 
+					    "member-specific output not allowed with -r\n");
+				break;
+			}
+
+			if (!(dm->flags & TYPEDEF)) {
+				if (flags &(STRUCT_REQUEST|UNION_REQUEST) ) {
+					if ((flags & (STRUCT_REQUEST|UNION_REQUEST)) != dm->type) 
+						goto freebuf;
+				} else
+					flags |= dm->type;
+			}
+
+			/* 
+	 		 *  No address was passed -- dump the structure/member declaration.
+	 		 */
+			if (!aflag) {
+				do_datatype_declaration(dm, flags | (dm->flags & TYPEDEF));
+				goto freebuf;
+			}
+
+			if (!(flags & (UNION_REQUEST|STRUCT_REQUEST)))
+				error(FATAL, "invalid argument");
+
+			/*
+		 	 *  Display data.
+		 	 */
+                	if (rawdata)
+                        	raw_data_dump(addr, len, flags & STRUCT_VERBOSE);
+                	else {
+	                        if (dm->member)
+	                                open_tmpfile();
+	
+	        		if (flags & UNION_REQUEST)
+	                		print_union(dm->name, addr);
+	        		else if (flags & STRUCT_REQUEST)
+	                		print_struct(dm->name, addr);
+	
+	                        if (dm->member) {
+	                                parse_for_member(dm, PARSE_FOR_DATA);
+	                                close_tmpfile();
+	                        }
+                	}
+		} while (++i < argc_members);
+        }
+
+freebuf:
+        if (argc_members) {
+                FREEBUF(structname);
+                FREEBUF(members);
 	}
 }
 
+
 /*
  *  Generic function for dumping data structure declarations, with a small
  *  fixup for typedefs, sizes and member offsets.
@@ -4405,7 +4681,10 @@
 
 	if (!(p1 = strstr(s, "."))) 
 		both = FALSE;
-	else {
+	else if (flags & DATATYPE_QUERY) {
+        	*p1 = NULLCHAR;
+		both = FALSE;
+	} else {
 		if ((p1 == s) || !strlen(p1+1))
         		goto datatype_member_fatal;
         	*p1 = NULLCHAR;
@@ -4634,6 +4913,27 @@
 }
 
 /*
+ *  Given the name of an enum, return its value.
+ */
+int 
+enumerator_value(char *e, long *value) 
+{
+	struct datatype_member datatype_member, *dm;
+
+	dm = &datatype_member;
+
+        if (arg_to_datatype(e, dm, RETURN_ON_ERROR)) {
+                if ((dm->size >= 0) && 
+		    (dm->type == ENUM) && dm->tagname) {
+			*value = dm->value;
+			return TRUE;
+		}
+	}
+
+	return FALSE;
+}
+
+/*
  *  Verify that a datatype exists, but return on error.
  */
 int
@@ -4679,7 +4979,7 @@
 
 	leader = do_load_module_filter = restore_radix = 0;
 
-        while ((c = getopt(argcnt, args, "dhx")) != EOF) {
+        while ((c = getopt(argcnt, args, "dhxu")) != EOF) {
                 switch(c)
                 {
 		case 'd':
@@ -4695,6 +4995,10 @@
                         output_format = (output_radix == 10) ? 0 : 'x';
                         break;
 
+		case 'u':
+			pc->curcmd_flags |= MEMTYPE_UVADDR;
+			break;
+
                 default:
                         argerrs++;
                         break;
@@ -4705,6 +5009,8 @@
                 cmd_usage(pc->curcmd, SYNOPSIS);
 
 	if ((sp = symbol_search(args[optind])) && !args[optind+1]) {
+		if (STRNEQ(sp->name, "per_cpu__") && display_per_cpu_info(sp))
+			return;
 		sprintf(buf2, "%s = ", args[optind]);
 		leader = strlen(buf2);
 		if (module_symbol(sp->value, NULL, NULL, NULL, output_radix))
@@ -4758,6 +5064,39 @@
 }
 
 /*
+ *  Display the datatype of the per_cpu__xxx symbol and 
+ *  the addresses of each its per-cpu instances.
+ */
+static int
+display_per_cpu_info(struct syment *sp)
+{
+	int c;
+	ulong addr;
+	char buf[BUFSIZE];
+
+	if (((kt->flags & (SMP|PER_CPU_OFF)) != (SMP|PER_CPU_OFF)) ||
+	    (sp->value < symbol_value("__per_cpu_start")) || 
+	    (sp->value >= symbol_value("__per_cpu_end")) ||
+	    !((sp->type == 'd') || (sp->type == 'D')))
+		return FALSE;
+
+	fprintf(fp, "PER-CPU DATA TYPE:\n  ");
+        sprintf(buf, "whatis %s", sp->name);
+        if (!gdb_pass_through(buf, pc->nullfp, GNU_RETURN_ON_ERROR))
+                fprintf(fp, "[undetermined type] %s;\n", sp->name);
+	else
+        	whatis_variable(sp);
+
+	fprintf(fp, "PER-CPU ADDRESSES:\n");
+	for (c = 0; c < kt->cpus; c++) {
+		addr = sp->value + kt->__per_cpu_offset[c];
+		fprintf(fp, "  [%d]: %lx\n", c, addr);
+	}
+
+	return TRUE;
+}
+
+/*
  *  As a latch ditch effort before a command is thrown away by exec_command(),
  *  args[0] is checked to see whether it's the name of a variable, structure, 
  *  union, or typedef.  If so, args[0] is changed to the appropriate command, 
@@ -4793,9 +5132,9 @@
 		command = "whatis";
 	else if (!datatype_exists(args[0]))
 		return FALSE;
-	else if (!arg_to_datatype(buf, dm, RETURN_ON_ERROR)) {
+	else if (!arg_to_datatype(buf, dm, RETURN_ON_ERROR|DATATYPE_QUERY))
 		return FALSE;
-	} else {
+	else {
                 if (is_gdb_command(FALSE, RETURN_ON_ERROR)) {
 			pc->curcmd = pc->program_name;
                 	error(FATAL, 
@@ -5056,6 +5395,8 @@
 		fprintf(ofp, "%sSTRUCT_VERBOSE", others++ ? "|" : "");
 	if (flags & SHOW_OFFSET)
 		fprintf(ofp, "%sSHOW_OFFSET", others++ ? "|" : "");
+	if (flags & DATATYPE_QUERY)
+		fprintf(ofp, "%sDATATYPE_QUERY", others++ ? "|" : "");
 	fprintf(ofp, ")\n");
 }
 
@@ -5079,7 +5420,8 @@
 
 	s = dm->member;
 	indent = 0;
-	on = array = FALSE;
+	array = FALSE;
+	on = 0;
 	rewind(pc->tmpfile);
 
 	switch (flag)  
@@ -5090,7 +5432,7 @@
 next_item:
 		while (fgets(buf, BUFSIZE, pc->tmpfile)) {
 			if (STRNEQ(buf, lookfor1) || STRNEQ(buf, lookfor2)) {
-				on = TRUE;
+				on++;
 				if (strstr(buf, "= {")) 
 					indent = count_leading_spaces(buf);
 				if (strstr(buf, "["))
@@ -5098,16 +5440,22 @@
 			}
 	
 			if (on) {
+				if ((indent && (on > 1) && (count_leading_spaces(buf) == indent) &&
+				    !strstr(buf, "}")) || (buf[0] == '}')) {
+					break;
+				}
 				fprintf(pc->saved_fp, buf);
 				if (!indent)
 					break;
 				if (strstr(buf, "}") && 
 				    (count_leading_spaces(buf) == indent))
 					break;
+				on++;
 			}
 		}
 		if (array) {
 			on = array = FALSE;
+			on = 0;
 			goto next_item; 
 		}
 		break;
@@ -5174,7 +5522,7 @@
 {
 	int i, c, len;
 	long offset;
-	char *target;
+	char *t1, *target;
 	char *arglist[MAXARGS];
 	char buf1[BUFSIZE];
 	char fmt[BUFSIZE];
@@ -5186,6 +5534,9 @@
 		return FALSE;
 	}
 
+	if (STRNEQ(inbuf, "        "))
+		goto do_empty_offset;
+
 	if (STRNEQ(inbuf, "    union {")) 
 		dm->flags |= IN_UNION;
 	if (STRNEQ(inbuf, "    struct {")) 
@@ -5215,9 +5566,20 @@
 			}
 		}
 	} else if (c) { 
-		target = arglist[c-1];
-		if (!strstr(target, ";"))
-			target = NULL;
+		for (i = 0; i < c; i++) {
+			if (STRNEQ(arglist[i], "(*")) {
+				target = arglist[i]+2;
+				if (!(t1 = strstr(target, ")")))
+					continue;
+				*t1 = NULLCHAR;
+				break;
+			}
+		}
+		if (i == c) {
+			target = arglist[c-1];
+			if (!strstr(target, ";"))
+				target = NULL;
+		}
 	}
 
 	if (!target) 
@@ -5307,7 +5669,8 @@
 	if ((retval = builtin_array_length(s, 0, two_dim)))
 		return retval;
 
-	if (symbol_search(s)) {
+	/* symbol_search cannot be done with just kernel type information */
+	if (!(LKCD_KERNTYPES()) && symbol_search(s)) {
 		if (!two_dim) {
 			req = &gnu_request;
 			if ((get_symbol_type(copy, NULL, req) == 
@@ -5417,6 +5780,23 @@
 }
 
 /*
+ *   Get and store the size of a "known" array.
+ *   A wrapper for get_array_length(), for cases in which
+ *   the name of the result to be stored is different from the
+ *   structure.member to be evaluated.
+ */
+int
+get_array_length_alt(char *name, char *s, int *two_dim, long entry_size)
+{
+	int retval;
+
+	retval = get_array_length(s, two_dim, entry_size);
+	if (retval)
+		retval = builtin_array_length(name, retval, two_dim);
+	return retval;
+}
+
+/*
  *  Designed for use by non-debug kernels, but used by all.
  */
 int
@@ -5433,6 +5813,8 @@
                 lenptr = &array_table.kmem_cache_s_c_name;
         else if (STREQ(s, "kmem_cache_s.array"))
                 lenptr = &array_table.kmem_cache_s_array;
+        else if (STREQ(s, "kmem_cache.array"))
+                lenptr = &array_table.kmem_cache_s_array;
         else if (STREQ(s, "kmem_cache_s.cpudata"))
                 lenptr = &array_table.kmem_cache_s_cpudata;
 	else if (STREQ(s, "log_buf")) 
@@ -5469,11 +5851,16 @@
                 lenptr = &array_table.prio_array_queue;
 	else if (STREQ(s, "height_to_maxindex"))
 		lenptr = &array_table.height_to_maxindex;
+	else if (STREQ(s, "pid_hash"))
+		lenptr = &array_table.pid_hash;
         else if (STREQ(s, "free_area")) {
                 lenptr = &array_table.free_area;
 		if (two_dim)
 			dimptr = &array_table.free_area_DIMENSION;
-	} 
+	} else if (STREQ(s, "kmem_cache.node"))
+		lenptr = &array_table.kmem_cache_node;
+	else if (STREQ(s, "kmem_cache.cpu_slab"))
+		lenptr = &array_table.kmem_cache_cpu_slab;
 
 	if (!lenptr)                /* not stored */
 		return(len);        
@@ -5606,8 +5993,16 @@
                 OFFSET(task_struct_last_run));
         fprintf(fp, "         task_struct_timestamp: %ld\n",
                 OFFSET(task_struct_timestamp));
+        fprintf(fp, "        task_struct_sched_info: %ld\n",
+                OFFSET(task_struct_sched_info));
+	fprintf(fp, "       sched_info_last_arrival: %ld\n",
+                OFFSET(sched_info_last_arrival));
         fprintf(fp, "       task_struct_thread_info: %ld\n",
                 OFFSET(task_struct_thread_info));
+        fprintf(fp, "           task_struct_nsproxy: %ld\n",
+                OFFSET(task_struct_nsproxy));
+        fprintf(fp, "              task_struct_rlim: %ld\n",
+                OFFSET(task_struct_rlim));
 
 	fprintf(fp, "              thread_info_task: %ld\n",
                 OFFSET(thread_info_task));
@@ -5618,11 +6013,31 @@
 	fprintf(fp, "      thread_info_previous_esp: %ld\n",
                 OFFSET(thread_info_previous_esp));
 
+	fprintf(fp, "                nsproxy_mnt_ns: %ld\n",
+		OFFSET(nsproxy_mnt_ns));
+	fprintf(fp, "            mnt_namespace_root: %ld\n",
+		OFFSET(mnt_namespace_root));
+	fprintf(fp, "            mnt_namespace_list: %ld\n",
+		OFFSET(mnt_namespace_list));
+
         fprintf(fp, "                  pid_link_pid: %ld\n",
                 OFFSET(pid_link_pid));
         fprintf(fp, "                pid_hash_chain: %ld\n",
                 OFFSET(pid_hash_chain));
 
+	fprintf(fp, "                   pid_numbers: %ld\n",
+		OFFSET(pid_numbers));
+
+	fprintf(fp, "                       upid_nr: %ld\n",
+		OFFSET(upid_nr));
+	fprintf(fp, "                       upid_ns: %ld\n",
+		OFFSET(upid_ns));
+	fprintf(fp, "                upid_pid_chain: %ld\n",
+		OFFSET(upid_pid_chain));
+
+	fprintf(fp, "                     pid_tasks: %ld\n",
+		OFFSET(pid_tasks));
+
         fprintf(fp, "               hlist_node_next: %ld\n",
 		OFFSET(hlist_node_next));
         fprintf(fp, "              hlist_node_pprev: %ld\n",
@@ -5647,6 +6062,11 @@
         	OFFSET(signal_struct_count));
 	fprintf(fp, "          signal_struct_action: %ld\n",
         	OFFSET(signal_struct_action));
+	fprintf(fp, "  signal_struct_shared_pending: %ld\n",
+        	OFFSET(signal_struct_shared_pending));
+	fprintf(fp, "            signal_struct_rlim: %ld\n",
+        	OFFSET(signal_struct_rlim));
+
         fprintf(fp, "        task_struct_start_time: %ld\n",
                 OFFSET(task_struct_start_time));
         fprintf(fp, "             task_struct_times: %ld\n",
@@ -5766,10 +6186,22 @@
 		OFFSET(mm_struct_pgd));
 	fprintf(fp, "                 mm_struct_rss: %ld\n", 
 		OFFSET(mm_struct_rss));
+	fprintf(fp, "            mm_struct_anon_rss: %ld\n", 
+		OFFSET(mm_struct_anon_rss));
+	fprintf(fp, "            mm_struct_file_rss: %ld\n", 
+		OFFSET(mm_struct_file_rss));
 	fprintf(fp, "            mm_struct_total_vm: %ld\n", 
 		OFFSET(mm_struct_total_vm));
 	fprintf(fp, "          mm_struct_start_code: %ld\n", 
 		OFFSET(mm_struct_start_code));
+	fprintf(fp, "           mm_struct_arg_start: %ld\n", 
+		OFFSET(mm_struct_arg_start));
+	fprintf(fp, "             mm_struct_arg_end: %ld\n", 
+		OFFSET(mm_struct_arg_end));
+	fprintf(fp, "           mm_struct_env_start: %ld\n", 
+		OFFSET(mm_struct_env_start));
+	fprintf(fp, "             mm_struct_env_end: %ld\n", 
+		OFFSET(mm_struct_env_end));
 
 	fprintf(fp, "          vm_area_struct_vm_mm: %ld\n", 
 		OFFSET(vm_area_struct_vm_mm));
@@ -5885,6 +6317,17 @@
         fprintf(fp, "                      page_pte: %ld\n",
                 OFFSET(page_pte));
 
+        fprintf(fp, "                    page_inuse: %ld\n",
+                OFFSET(page_inuse));
+        fprintf(fp, "                  page_objects: %ld\n",
+                OFFSET(page_objects));
+        fprintf(fp, "                     page_slab: %ld\n",
+                OFFSET(page_slab));
+        fprintf(fp, "               page_first_page: %ld\n",
+                OFFSET(page_first_page));
+        fprintf(fp, "                 page_freelist: %ld\n",
+                OFFSET(page_freelist));
+
         fprintf(fp, "    swap_info_struct_swap_file: %ld\n",
 		OFFSET(swap_info_struct_swap_file));
         fprintf(fp, "  swap_info_struct_swap_vfsmnt: %ld\n",
@@ -5922,6 +6365,8 @@
 		OFFSET(irq_desc_t_status));
 	fprintf(fp, "            irq_desc_t_handler: %ld\n",
 		OFFSET(irq_desc_t_handler));
+	fprintf(fp, "               irq_desc_t_chip: %ld\n",
+		OFFSET(irq_desc_t_chip));
 	fprintf(fp, "             irq_desc_t_action: %ld\n",
 		OFFSET(irq_desc_t_action));
 	fprintf(fp, "              irq_desc_t_depth: %ld\n",
@@ -5967,11 +6412,52 @@
 	fprintf(fp, "hw_interrupt_type_set_affinity: %ld\n",
 		OFFSET(hw_interrupt_type_set_affinity));
 
+	fprintf(fp, "             irq_chip_typename: %ld\n",
+		OFFSET(irq_chip_typename));
+	fprintf(fp, "              irq_chip_startup: %ld\n",
+		OFFSET(irq_chip_startup));
+	fprintf(fp, "             irq_chip_shutdown: %ld\n",
+		OFFSET(irq_chip_shutdown));
+	fprintf(fp, "               irq_chip_enable: %ld\n",
+		OFFSET(irq_chip_enable));
+	fprintf(fp, "              irq_chip_disable: %ld\n",
+		OFFSET(irq_chip_disable));
+	fprintf(fp, "                  irq_chip_ack: %ld\n",
+		OFFSET(irq_chip_ack));
+	fprintf(fp, "                 irq_chip_mask: %ld\n",
+		OFFSET(irq_chip_mask));
+	fprintf(fp, "             irq_chip_mask_ack: %ld\n",
+		OFFSET(irq_chip_mask_ack));
+	fprintf(fp, "               irq_chip_unmask: %ld\n",
+		OFFSET(irq_chip_unmask));
+	fprintf(fp, "                  irq_chip_eoi: %ld\n",
+		OFFSET(irq_chip_eoi));
+	fprintf(fp, "                  irq_chip_end: %ld\n",
+		OFFSET(irq_chip_end));
+	fprintf(fp, "         irq_chip_set_affinity: %ld\n",
+		OFFSET(irq_chip_set_affinity));
+	fprintf(fp, "            irq_chip_retrigger: %ld\n",
+		OFFSET(irq_chip_retrigger));
+	fprintf(fp, "             irq_chip_set_type: %ld\n",
+		OFFSET(irq_chip_set_type));
+	fprintf(fp, "             irq_chip_set_wake: %ld\n",
+		OFFSET(irq_chip_set_wake));
+
 	fprintf(fp, "irq_cpustat_t___softirq_active: %ld\n",
         	OFFSET(irq_cpustat_t___softirq_active));
 	fprintf(fp, "  irq_cpustat_t___softirq_mask: %ld\n",
         	OFFSET(irq_cpustat_t___softirq_mask));
 	
+        fprintf(fp, "              files_struct_fdt: %ld\n",
+		OFFSET(files_struct_fdt));
+        fprintf(fp, "               fdtable_max_fds: %ld\n",
+		OFFSET(fdtable_max_fds));
+        fprintf(fp, "             fdtable_max_fdset: %ld\n",
+		OFFSET(fdtable_max_fdset));
+        fprintf(fp, "              fdtable_open_fds: %ld\n",
+		OFFSET(fdtable_open_fds));
+        fprintf(fp, "                    fdtable_fd: %ld\n",
+		OFFSET(fdtable_fd));
         fprintf(fp, "          files_struct_max_fds: %ld\n", 
 		OFFSET(files_struct_max_fds));
         fprintf(fp, "        files_struct_max_fdset: %ld\n", 
@@ -5988,6 +6474,12 @@
 		OFFSET(file_f_vfsmnt));
         fprintf(fp, "                  file_f_count: %ld\n", 
 		OFFSET(file_f_count));
+        fprintf(fp, "                   file_f_path: %ld\n", 
+		OFFSET(file_f_path));
+        fprintf(fp, "                      path_mnt: %ld\n", 
+		OFFSET(path_mnt));
+        fprintf(fp, "                   path_dentry: %ld\n", 
+		OFFSET(path_dentry));
 	fprintf(fp, "                fs_struct_root: %ld\n",
 		OFFSET(fs_struct_root));
 	fprintf(fp, "                 fs_struct_pwd: %ld\n",
@@ -6165,6 +6657,51 @@
         fprintf(fp, "                     slab_free: %ld\n",
                 OFFSET(slab_free));
 
+        fprintf(fp, "               kmem_cache_size: %ld\n",
+                OFFSET(kmem_cache_size));
+        fprintf(fp, "            kmem_cache_objsize: %ld\n",
+                OFFSET(kmem_cache_objsize));
+        fprintf(fp, "             kmem_cache_offset: %ld\n",
+                OFFSET(kmem_cache_offset));
+        fprintf(fp, "              kmem_cache_order: %ld\n",
+                OFFSET(kmem_cache_order));
+        fprintf(fp, "         kmem_cache_local_node: %ld\n",
+                OFFSET(kmem_cache_local_node));
+        fprintf(fp, "            kmem_cache_objects: %ld\n",
+                OFFSET(kmem_cache_objects));
+        fprintf(fp, "              kmem_cache_inuse: %ld\n",
+                OFFSET(kmem_cache_inuse));
+        fprintf(fp, "              kmem_cache_align: %ld\n",
+                OFFSET(kmem_cache_align));
+        fprintf(fp, "               kmem_cache_name: %ld\n",
+                OFFSET(kmem_cache_name));
+        fprintf(fp, "               kmem_cache_list: %ld\n",
+                OFFSET(kmem_cache_list));
+        fprintf(fp, "               kmem_cache_node: %ld\n",
+                OFFSET(kmem_cache_node));
+        fprintf(fp, "           kmem_cache_cpu_slab: %ld\n",
+                OFFSET(kmem_cache_cpu_slab));
+        fprintf(fp, "                 kmem_cache_oo: %ld\n",
+                OFFSET(kmem_cache_oo));
+
+        fprintf(fp, "    kmem_cache_node_nr_partial: %ld\n",
+                OFFSET(kmem_cache_node_nr_partial));
+        fprintf(fp, "      kmem_cache_node_nr_slabs: %ld\n",
+                OFFSET(kmem_cache_node_nr_slabs));
+        fprintf(fp, "       kmem_cache_node_partial: %ld\n",
+                OFFSET(kmem_cache_node_partial));
+        fprintf(fp, "          kmem_cache_node_full: %ld\n",
+                OFFSET(kmem_cache_node_full));
+
+        fprintf(fp, "       kmem_cache_cpu_freelist: %ld\n",
+                OFFSET(kmem_cache_cpu_freelist));
+        fprintf(fp, "           kmem_cache_cpu_page: %ld\n",
+                OFFSET(kmem_cache_cpu_page));
+        fprintf(fp, "           kmem_cache_cpu_node: %ld\n",
+                OFFSET(kmem_cache_cpu_node));
+        fprintf(fp, "              kmem_cache_flags: %ld\n",
+                OFFSET(kmem_cache_flags));
+
 	fprintf(fp, "               net_device_next: %ld\n",
         	OFFSET(net_device_next));
 	fprintf(fp, "               net_device_name: %ld\n",
@@ -6175,6 +6712,11 @@
         	OFFSET(net_device_addr_len));
 	fprintf(fp, "             net_device_ip_ptr: %ld\n",
         	OFFSET(net_device_ip_ptr));
+	fprintf(fp, "           net_device_dev_list: %ld\n",
+		OFFSET(net_device_dev_list));
+	fprintf(fp, "             net_dev_base_head: %ld\n",
+		OFFSET(net_dev_base_head));
+
 	fprintf(fp, "                   device_next: %ld\n",
         	OFFSET(device_next));
 	fprintf(fp, "                   device_name: %ld\n",
@@ -6217,6 +6759,11 @@
         fprintf(fp, "                  inet_opt_num: %ld\n", 
 		OFFSET(inet_opt_num));
 
+        fprintf(fp, "          ipv6_pinfo_rcv_saddr: %ld\n", 
+		OFFSET(ipv6_pinfo_rcv_saddr));
+        fprintf(fp, "              ipv6_pinfo_daddr: %ld\n", 
+		OFFSET(ipv6_pinfo_daddr));
+
         fprintf(fp, "               timer_list_list: %ld\n",
                 OFFSET(timer_list_list));
         fprintf(fp, "               timer_list_next: %ld\n", 
@@ -6291,6 +6838,8 @@
                 OFFSET(zone_struct_size));
 	fprintf(fp, "           zone_struct_memsize: %ld\n",
                 OFFSET(zone_struct_memsize));
+	fprintf(fp, "    zone_struct_zone_start_pfn: %ld\n",
+                OFFSET(zone_struct_zone_start_pfn));
 	fprintf(fp, "  zone_struct_zone_start_paddr: %ld\n",
                 OFFSET(zone_struct_zone_start_paddr));
 	fprintf(fp, "  zone_struct_zone_start_mapnr: %ld\n",
@@ -6324,6 +6873,8 @@
                 OFFSET(zone_name));
 	fprintf(fp, "            zone_spanned_pages: %ld\n",
                 OFFSET(zone_spanned_pages));
+	fprintf(fp, "            zone_present_pages: %ld\n",
+                OFFSET(zone_present_pages));
 	fprintf(fp, "           zone_zone_start_pfn: %ld\n",
                 OFFSET(zone_zone_start_pfn));
 	fprintf(fp, "                zone_pages_min: %ld\n",
@@ -6332,6 +6883,18 @@
                 OFFSET(zone_pages_low));
 	fprintf(fp, "               zone_pages_high: %ld\n",
                 OFFSET(zone_pages_high));
+	fprintf(fp, "                  zone_vm_stat: %ld\n",
+                OFFSET(zone_vm_stat));
+	fprintf(fp, "                zone_nr_active: %ld\n",
+                OFFSET(zone_nr_active));
+	fprintf(fp, "              zone_nr_inactive: %ld\n",
+                OFFSET(zone_nr_inactive));
+	fprintf(fp, "        zone_all_unreclaimable: %ld\n",
+                OFFSET(zone_all_unreclaimable));
+	fprintf(fp, "                    zone_flags: %ld\n",
+                OFFSET(zone_flags));
+	fprintf(fp, "            zone_pages_scanned: %ld\n",
+                OFFSET(zone_pages_scanned));
 
         fprintf(fp, "                neighbour_next: %ld\n", 
 		OFFSET(neighbour_next));
@@ -6471,10 +7034,61 @@
 		OFFSET(x8664_pda_irqstackptr));
 	fprintf(fp, "          x8664_pda_level4_pgt: %ld\n",
 		OFFSET(x8664_pda_level4_pgt));
+	fprintf(fp, "                  x8664_pda_me: %ld\n",
+		OFFSET(x8664_pda_me));
 
 	fprintf(fp, "                tss_struct_ist: %ld\n", 
 		OFFSET(tss_struct_ist));
+	fprintf(fp, "   mem_section_section_mem_map: %ld\n",
+		OFFSET(mem_section_section_mem_map));
 
+	fprintf(fp, "  vcpu_guest_context_user_regs: %ld\n",
+		OFFSET(vcpu_guest_context_user_regs));
+	fprintf(fp, "             cpu_user_regs_eip: %ld\n",
+		OFFSET(cpu_user_regs_eip));
+	fprintf(fp, "             cpu_user_regs_esp: %ld\n",
+		OFFSET(cpu_user_regs_esp));
+	fprintf(fp, "             cpu_user_regs_rip: %ld\n",
+		OFFSET(cpu_user_regs_rip));
+	fprintf(fp, "             cpu_user_regs_rsp: %ld\n",
+		OFFSET(cpu_user_regs_rsp));
+	fprintf(fp, "             unwind_table_core: %ld\n",
+		OFFSET(unwind_table_core));
+	fprintf(fp, "             unwind_table_init: %ld\n",
+		OFFSET(unwind_table_init));
+	fprintf(fp, "          unwind_table_address: %ld\n",
+		OFFSET(unwind_table_address));
+	fprintf(fp, "             unwind_table_size: %ld\n",
+		OFFSET(unwind_table_size));
+	fprintf(fp, "             unwind_table_link: %ld\n",
+		OFFSET(unwind_table_link));
+	fprintf(fp, "             unwind_table_name: %ld\n",
+		OFFSET(unwind_table_name));
+
+	fprintf(fp, "                        rq_cfs: %ld\n",
+		OFFSET(rq_cfs));
+	fprintf(fp, "                         rq_rt: %ld\n",
+		OFFSET(rq_rt));
+	fprintf(fp, "                 rq_nr_running: %ld\n",
+		OFFSET(rq_nr_running));
+	fprintf(fp, "                task_struct_se: %ld\n",
+		OFFSET(task_struct_se));
+	fprintf(fp, "         sched_entity_run_node: %ld\n",
+		OFFSET(sched_entity_run_node));
+	fprintf(fp, "             cfs_rq_nr_running: %ld\n",
+		OFFSET(cfs_rq_nr_running));
+	fprintf(fp, "            cfs_rq_rb_leftmost: %ld\n",
+		OFFSET(cfs_rq_rb_leftmost));
+	fprintf(fp, "         cfs_rq_tasks_timeline: %ld\n",
+		OFFSET(cfs_rq_tasks_timeline));
+	fprintf(fp, "                  rt_rq_active: %ld\n",
+		OFFSET(rt_rq_active));
+	fprintf(fp, "                pcpu_info_vcpu: %ld\n",
+		OFFSET(pcpu_info_vcpu));
+	fprintf(fp, "                pcpu_info_idle: %ld\n",
+		OFFSET(pcpu_info_idle));
+	fprintf(fp, "                vcpu_struct_rq: %ld\n",
+		OFFSET(vcpu_struct_rq));
 
 	fprintf(fp, "\n                    size_table:\n");
 	fprintf(fp, "                          page: %ld\n", SIZE(page));
@@ -6493,6 +7107,10 @@
         fprintf(fp, "                   array_cache: %ld\n", SIZE(array_cache));
         fprintf(fp, "                 kmem_bufctl_t: %ld\n", 
 		SIZE(kmem_bufctl_t));
+        fprintf(fp, "                    kmem_cache: %ld\n", SIZE(kmem_cache));
+        fprintf(fp, "               kmem_cache_node: %ld\n", SIZE(kmem_cache_node));
+        fprintf(fp, "                kmem_cache_cpu: %ld\n", SIZE(kmem_cache_cpu));
+
         fprintf(fp, "              swap_info_struct: %ld\n", 
 		SIZE(swap_info_struct));
         fprintf(fp, "                vm_area_struct: %ld\n", 
@@ -6512,6 +7130,7 @@
 	fprintf(fp, "                     fs_struct: %ld\n", SIZE(fs_struct));
 	fprintf(fp, "                  files_struct: %ld\n", 
 		SIZE(files_struct));
+	fprintf(fp, "                       fdtable: %ld\n", SIZE(fdtable));
 	fprintf(fp, "                          file: %ld\n", SIZE(file)); 
 	fprintf(fp, "                         inode: %ld\n", SIZE(inode)); 
 	fprintf(fp, "                      vfsmount: %ld\n", SIZE(vfsmount)); 
@@ -6546,8 +7165,11 @@
 	fprintf(fp, "                          sock: %ld\n", SIZE(sock));
 	fprintf(fp, "                     inet_sock: %ld\n", SIZE(inet_sock));
 	fprintf(fp, "                        socket: %ld\n", SIZE(socket));
+	fprintf(fp, "                      in6_addr: %ld\n", SIZE(in6_addr));
 	fprintf(fp, "                 signal_struct: %ld\n", 
 		SIZE(signal_struct));
+	fprintf(fp, "             sigpending_signal: %ld\n", 
+		SIZE(sigpending_signal));
 	fprintf(fp, "                  signal_queue: %ld\n", 
 		SIZE(signal_queue));
 	fprintf(fp, "                      sigqueue: %ld\n", SIZE(sigqueue));
@@ -6601,6 +7223,8 @@
 
 	fprintf(fp, "                     x8664_pda: %ld\n", 
 		SIZE(x8664_pda));
+	fprintf(fp, "                    ppc64_paca: %ld\n", 
+		SIZE(ppc64_paca));
 	fprintf(fp, "                   gate_struct: %ld\n", 
 		SIZE(gate_struct));
 	fprintf(fp, "                    tss_struct: %ld\n", 
@@ -6609,7 +7233,22 @@
 		SIZE(task_struct_start_time));
 	fprintf(fp, "                     cputime_t: %ld\n", 
 		SIZE(cputime_t));
-
+	fprintf(fp, "                   mem_section: %ld\n", 
+		SIZE(mem_section));
+	fprintf(fp, "                      pid_link: %ld\n", 
+		SIZE(pid_link));
+	fprintf(fp, "                          upid: %ld\n", 
+		SIZE(upid));
+	fprintf(fp, "                  unwind_table: %ld\n", 
+		SIZE(unwind_table));
+	fprintf(fp, "                        rlimit: %ld\n", 
+		SIZE(rlimit));
+	fprintf(fp, "                        cfs_rq: %ld\n", 
+		SIZE(cfs_rq));
+	fprintf(fp, "                     pcpu_info: %ld\n", 
+		SIZE(pcpu_info));
+	fprintf(fp, "                   vcpu_struct: %ld\n", 
+		SIZE(vcpu_struct));
 
         fprintf(fp, "\n                   array_table:\n");
 	/*
@@ -6663,6 +7302,12 @@
                 get_array_length("prio_array.queue", NULL, SIZE(list_head)));
 	fprintf(fp, "            height_to_maxindex: %d\n",
 		ARRAY_LENGTH(height_to_maxindex));
+	fprintf(fp, "                      pid_hash: %d\n",
+		ARRAY_LENGTH(pid_hash));
+	fprintf(fp, "               kmem_cache_node: %d\n",
+		ARRAY_LENGTH(kmem_cache_node));
+	fprintf(fp, "           kmem_cache_cpu_slab: %d\n",
+		ARRAY_LENGTH(kmem_cache_cpu_slab));
 
 	if (spec) {
 		int in_size_table, in_array_table, arrays, offsets, sizes;
@@ -6862,14 +7507,14 @@
 {
 	int i;
 	struct load_module *lm;
-	int request;
+	ulong request;
         asection **sec;
 
-	request = (int)((ulong)reqptr);
+	request = ((ulong)reqptr);
 
 	switch (request)
 	{
-	case (uint)KERNEL_SECTIONS:
+	case (ulong)KERNEL_SECTIONS:
         	sec = (asection **)st->sections;
         	for (i = 0; (i < st->bfd->section_count) && *sec; i++)
 			sec++;
@@ -6890,14 +7535,18 @@
                             SEC_HAS_CONTENTS))
                                 st->flags |= NO_SEC_CONTENTS;
                 }
+                if (STREQ(bfd_get_section_name(bfd, section), ".eh_frame")) {
+			st->dwarf_eh_frame_file_offset = (off_t)section->filepos;
+			st->dwarf_eh_frame_size = (ulong)bfd_section_size(bfd, section);
+		}
 		break;
 
-	case (uint)MODULE_SECTIONS:
+	case (ulong)MODULE_SECTIONS:
 		lm = st->current;
 		store_section_data(lm, bfd, section);
 		break;
 
-	case (uint)VERIFY_SECTIONS:
+	case (ulong)VERIFY_SECTIONS:
 		if (STREQ(bfd_get_section_name(bfd, section), ".text") ||
 		    STREQ(bfd_get_section_name(bfd, section), ".data")) {
 			if (!(bfd_get_section_flags(bfd, section) & SEC_LOAD))
@@ -6906,6 +7555,10 @@
 			    SEC_HAS_CONTENTS))
 				st->flags |= NO_SEC_CONTENTS;
 		}
+                if (STREQ(bfd_get_section_name(bfd, section), ".eh_frame")) {
+			st->dwarf_eh_frame_file_offset = (off_t)section->filepos;
+			st->dwarf_eh_frame_size = (ulong)bfd_section_size(bfd, section);
+		}
 		break;
 
 	default:
@@ -6960,8 +7613,9 @@
 	i = lm->mod_sections;
 	lm->mod_section_data[i].section = section;
 	lm->mod_section_data[i].priority = prio;
-	lm->mod_section_data[i].flags = section->flags;
+	lm->mod_section_data[i].flags = section->flags & ~SEC_FOUND;
 	lm->mod_section_data[i].size = bfd_section_size(bfd, section);
+	lm->mod_section_data[i].offset = 0;
 	if (strlen(name) < MAX_MOD_SEC_NAME)
 		strcpy(lm->mod_section_data[i].name, name);
 	else
@@ -7013,7 +7667,7 @@
  */
 
 static void
-calculate_load_order(struct load_module *lm, bfd *bfd)
+calculate_load_order_v1(struct load_module *lm, bfd *bfd)
 {
 	int i;
 	asection *section;
@@ -7073,6 +7727,134 @@
 }
 
 /*
+ * Later versions of kmod no longer get the help from insmod,
+ * and while the heuristics might work, it's relatively
+ * straightforward to just try to match the sections in the object file
+ * with exported symbols.
+ *
+ * This works well if kallsyms is set, but may not work so well in other
+ * instances.
+ */
+static void
+calculate_load_order_v2(struct load_module *lm, bfd *bfd, int dynamic,
+	void *minisyms, long symcount, unsigned int size)
+{
+	struct syment *s1, *s2;
+	ulong sec_start, sec_end;
+	bfd_byte *from, *fromend;
+	asymbol *store;
+	asymbol *sym;
+	symbol_info syminfo;
+	char *secname;
+	int i;
+
+	if ((store = bfd_make_empty_symbol(bfd)) == NULL)
+		error(FATAL, "bfd_make_empty_symbol() failed\n");
+
+	s1 = lm->mod_symtable;
+	s2 = lm->mod_symend;
+	while (s1 < s2) {
+            ulong sym_offset = s1->value - lm->mod_base;
+	    if (MODULE_PSEUDO_SYMBOL(s1)) {
+		    s1++;
+		    continue;
+	    }
+
+            /* Skip over symbols whose sections have been identified. */
+            for (i = 0; i < lm->mod_sections; i++) {
+                    if ((lm->mod_section_data[i].flags & SEC_FOUND) == 0)
+                            continue;
+                    if (sym_offset >= lm->mod_section_data[i].offset
+                        && sym_offset < lm->mod_section_data[i].offset
+                            + lm->mod_section_data[i].size) {
+                            break;
+                    }
+            }
+
+            /* Matched one of the sections. Skip symbol. */
+            if (i < lm->mod_sections) {
+                    if (CRASHDEBUG(2)) {
+                        fprintf(fp, "skip %lx %s %s\n", s1->value, s1->name,
+                            lm->mod_section_data[i].name);
+                    }
+                    s1++;
+                    continue;
+            }
+
+	    /* Find the symbol in the object file. */
+	    from = (bfd_byte *) minisyms;
+	    fromend = from + symcount * size;
+	    secname = NULL;
+	    for (; from < fromend; from += size) {
+		    if ((sym = bfd_minisymbol_to_symbol(bfd, dynamic, from,
+			    store)) == NULL)
+			    error(FATAL,
+				    "bfd_minisymbol_to_symbol() failed\n");
+
+		    bfd_get_symbol_info(bfd, sym, &syminfo);
+                    if (CRASHDEBUG(3)) {
+                            fprintf(fp,"matching sym %s %lx against bfd %s %lx\n",
+                                s1->name, (long) s1->value, syminfo.name,
+                                (long) syminfo.value);
+                    }
+		    if (strcmp(syminfo.name, s1->name) == 0) {
+			    secname = (char *)bfd_get_section_name(bfd, sym->section);
+			    break;
+		    }
+
+	    }
+	    if (secname == NULL) {
+                    if (CRASHDEBUG(1)) {
+                        fprintf(fp, "symbol %s not found in module\n", s1->name);
+                    }
+		    s1++;
+		    continue;
+	    }
+
+	    /* Match the section it came in. */
+	    for (i = 0; i < lm->mod_sections; i++) {
+		    if (STREQ(lm->mod_section_data[i].name, secname)) {
+			    break;
+		    }
+	    }
+
+	    if (i == lm->mod_sections) {
+		    fprintf(fp, "?? Section %s not found for symbol %s\n",
+			secname, s1->name);
+		    s1++;
+		    continue;
+	    }
+
+            /* Update the offset information for the section */
+	    sec_start = s1->value - syminfo.value;
+	    sec_end = sec_start + lm->mod_section_data[i].size;
+	    lm->mod_section_data[i].offset = sec_start - lm->mod_base;
+            lm->mod_section_data[i].flags |= SEC_FOUND;
+
+	    if (CRASHDEBUG(1)) {
+		    fprintf(fp, "update sec offset sym %s @ %lx  val %lx  section %s\n",
+			    s1->name, s1->value, syminfo.value, secname);
+	    }
+
+	    if (strcmp(secname, ".text") == 0)
+		    lm->mod_text_start = sec_start;
+
+	    if (strcmp(secname, ".bss") == 0)
+		    lm->mod_bss_start = sec_start;
+
+	    if (strcmp(secname, ".data") == 0)
+		    lm->mod_data_start = sec_start;
+
+	    if (strcmp(secname, ".data") == 0)
+		    lm->mod_data_start = sec_start;
+
+	    if (strcmp(secname, ".rodata") == 0)
+		    lm->mod_rodata_start = sec_start;
+            s1++;
+	}
+}
+
+/*
  *  Later versons of insmod store basic address information of each
  *  module in a format that looks like the following example of the
  *  nfsd module:
@@ -7185,8 +7967,8 @@
 	}
 
 	if (CRASHDEBUG(1))
-		fprintf(fp, "load_module_symbols: %s %s %lx\n",
-			modref, namelist, base_addr);
+		fprintf(fp, "load_module_symbols: %s %s %lx %lx\n",
+			modref, namelist, base_addr, kt->flags);
 
 	switch (kt->flags & (KMOD_V1|KMOD_V2))
 	{
@@ -7199,7 +7981,8 @@
                 	strcpy(lm->mod_namelist, namelist);
         	else
                 	strncpy(lm->mod_namelist, namelist, MAX_MOD_NAMELIST-1);
-		goto add_symbols;
+                if (st->flags & USE_OLD_ADD_SYM)
+                        goto add_symbols;
 	}
 
   	if ((mbfd = bfd_openr(namelist, NULL)) == NULL) 
@@ -7209,8 +7992,11 @@
 		error(FATAL, "cannot determine object file format: %s\n",
 			namelist);
 
-        if (!(bfd_get_file_flags(mbfd) & HAS_SYMS)) 
-          	error(FATAL, "no symbols in object file: %s\n", namelist);
+	if (LKCD_KERNTYPES() && (file_elf_version(namelist) == EV_DWARFEXTRACT))
+		goto add_symbols;   /* no symbols, add the debuginfo */
+
+	if (!(bfd_get_file_flags(mbfd) & HAS_SYMS))
+		error(FATAL, "no symbols in object file: %s\n", namelist);
 
 	symcount = bfd_read_minisymbols(mbfd, FALSE, &minisyms, &size);
 	if (symcount < 0)
@@ -7219,6 +8005,10 @@
 	else if (symcount == 0)
 		error(FATAL, "no symbols in object file: %s\n", namelist);
 
+        if (CRASHDEBUG(1)) {
+                fprintf(fp, "%ld symbols found in obj file %s\n", symcount,
+                    namelist);
+        }
         sort_x = bfd_make_empty_symbol(mbfd);
         sort_y = bfd_make_empty_symbol(mbfd);
         if (sort_x == NULL || sort_y == NULL)
@@ -7251,17 +8041,33 @@
 add_symbol_file(struct load_module *lm)
 {
         struct gnu_request request, *req;
-	char buf[BUFSIZE];
+        char buf[BUFSIZE];
+        int i, len;
+        char *secname;
+
+	for (i = len = 0; i < lm->mod_sections; i++)
+	{
+		secname = lm->mod_section_data[i].name;
+		if ((lm->mod_section_data[i].flags & SEC_FOUND) &&
+		    !STREQ(secname, ".text")) {
+			sprintf(buf, " -s %s 0x%lx", secname, 
+				lm->mod_section_data[i].offset + lm->mod_base);
+			len += strlen(buf);
+		}
+	}
 
 	req = &request;
 	BZERO(req, sizeof(struct gnu_request));
         req->command = GNU_ADD_SYMBOL_FILE;
 	req->addr = (ulong)lm;
-	req->buf = buf;
+	req->buf = GETBUF(len+BUFSIZE);
 	if (!CRASHDEBUG(1))
 		req->fp = pc->nullfp;
 
-	gdb_interface(req); 
+	st->flags |= ADD_SYMBOL_FILE;
+	gdb_interface(req);
+	st->flags &= ~ADD_SYMBOL_FILE;
+	FREEBUF(req->buf);
 
 	sprintf(buf, "set complaints 0");
 	gdb_pass_through(buf, NULL, 0);
@@ -7382,7 +8188,12 @@
 
         bfd_map_over_sections(bfd, section_header_info, MODULE_SECTIONS);
 
-	calculate_load_order(lm, bfd);
+	if (kt->flags & KMOD_V1)
+		calculate_load_order_v1(lm, bfd);
+	else
+		calculate_load_order_v2(lm, bfd, dynamic, minisyms,
+			symcount, size);
+
 
         from = (bfd_byte *) minisyms;
         fromend = from + symcount * size;
@@ -7395,104 +8206,112 @@
                 bfd_get_symbol_info(bfd, sym, &syminfo);
 
 		secname = (char *)bfd_get_section_name(bfd, sym->section);
+                found = 0;
 
-		switch (syminfo.type)
-		{
-		case 'b':
-		case 'B':
-                       if (CRASHDEBUG(2))
-                            fprintf(fp, "%08lx (%c) [%s] %s\n",  
-				(ulong)syminfo.value,
-                                syminfo.type, secname, syminfo.name);
+                if (kt->flags & KMOD_V1) {
+                        switch (syminfo.type)
+                        {
+                        case 'b':
+                        case 'B':
+                               if (CRASHDEBUG(2))
+                                    fprintf(fp, "%08lx (%c) [%s] %s\n",  
+                                        (ulong)syminfo.value,
+                                        syminfo.type, secname, syminfo.name);
 
-                        syminfo.value += lm->mod_bss_start;
-                        strcpy(name, syminfo.name);
-                        strip_module_symbol_end(name);
+                                if (!lm->mod_bss_start)
+                                        break;
 
-                        if (machdep->verify_symbol(name, syminfo.value, 
-			    syminfo.type)) {
-                                sp->value = syminfo.value;
-				sp->type = syminfo.type;
-				
-                                namespace_ctl(NAMESPACE_INSTALL,
-                                        &lm->mod_load_namespace, sp, name); 
+                                syminfo.value += lm->mod_bss_start;
+                                found = 1;
+                                break;
 
-                                if (CRASHDEBUG(1))
-                                    fprintf(fp, "%08lx %s\n",  sp->value,
-                                        name);
+                        case 'd': 
+                        case 'D':
+                                if (CRASHDEBUG(2))
+                                    fprintf(fp, "%08lx (%c) [%s] %s\n",  
+                                        (ulong)syminfo.value,
+                                        syminfo.type, secname, syminfo.name);
+
+                                if (STREQ(secname, ".rodata")) {
+                                        if (!lm->mod_rodata_start)
+                                                break;
+                                        syminfo.value += lm->mod_rodata_start;
+                                } else {
+                                        if (!lm->mod_data_start)
+                                                break;
+                                        syminfo.value += lm->mod_data_start;
+                                }
+                                found = 1;
+                                break;
 
-                                sp++;
-                                lm->mod_load_symcnt++;
-                        }
-			break;
+                        case 't':
+                        case 'T':
+                                if (CRASHDEBUG(2))
+                                    fprintf(fp, "%08lx (%c) [%s] %s\n",  
+                                        (ulong)syminfo.value, 
+                                        syminfo.type, secname, syminfo.name); 
 
-		case 'd': 
-		case 'D':
-                        if (CRASHDEBUG(2))
-                            fprintf(fp, "%08lx (%c) [%s] %s\n",  
-				(ulong)syminfo.value,
-                                syminfo.type, secname, syminfo.name);
+                                if (! lm->mod_text_start) {
+                                        break;
+                                }
 
-			if (STREQ(secname, ".rodata"))
-                        	syminfo.value += lm->mod_rodata_start;
-			else
-                        	syminfo.value += lm->mod_data_start;
+                                if ((st->flags & INSMOD_BUILTIN) &&
+                                    (STREQ(name, "init_module") || 
+                                    STREQ(name, "cleanup_module")))
+                                        break;
+
+                                syminfo.value += lm->mod_text_start;
+                                found = 1;
+                                break;
 
+                        default:
+                                break;
+                        }
+
+                } else {
+                        /* Match the section it came in. */
+                        for (i = 0; i < lm->mod_sections; i++) {
+                                if (STREQ(lm->mod_section_data[i].name, secname)
+                                    && (lm->mod_section_data[i].flags & SEC_FOUND)) {
+                                        break;
+                                }
+                        }
+                        if (i < lm->mod_sections) {
+                                if (CRASHDEBUG(2))
+                                    fprintf(fp, "%08lx (%c) [%s] %s\n",  
+                                        (ulong)syminfo.value, 
+                                        syminfo.type, secname, syminfo.name); 
+
+                                if ((st->flags & INSMOD_BUILTIN) &&
+                                    (STREQ(name, "init_module") || 
+                                    STREQ(name, "cleanup_module"))) {
+                                        found = 0;
+                                } else {
+                                        syminfo.value += lm->mod_section_data[i].offset + lm->mod_base;
+                                        found = 1;
+                                }
+                        }
+                }
+
+                if (found) {
                         strcpy(name, syminfo.name);
                         strip_module_symbol_end(name);
 
-                        if (machdep->verify_symbol(name, syminfo.value, 
-			    syminfo.type)) {
+                        if (machdep->verify_symbol(name, syminfo.value,
+                            syminfo.type)) {
                                 sp->value = syminfo.value;
-				sp->type = syminfo.type;
+                                sp->type = syminfo.type;
                                 namespace_ctl(NAMESPACE_INSTALL,
-                                        &lm->mod_load_namespace, sp, name); 
+                                        &lm->mod_load_namespace, sp, name);
 
                                 if (CRASHDEBUG(1))
-                                    fprintf(fp, "%08lx %s\n",  sp->value,
+                                    fprintf(fp, "installing %c %08lx %s\n",  syminfo.type, sp->value,
                                         name);
 
                                 sp++;
                                 lm->mod_load_symcnt++;
                         }
-			break;
-
-		case 't':
-		case 'T':
-			if (CRASHDEBUG(2))
-			    fprintf(fp, "%08lx (%c) [%s] %s\n",  
-				(ulong)syminfo.value, 
-				syminfo.type, secname, syminfo.name); 
-
-			syminfo.value += lm->mod_text_start;
-			strcpy(name, syminfo.name);
-			strip_module_symbol_end(name);
-
-			if ((st->flags & INSMOD_BUILTIN) &&
-			    (STREQ(name, "init_module") || 
-			    STREQ(name, "cleanup_module")))
-				break;
-
-                	if (machdep->verify_symbol(name, syminfo.value, 
-			    syminfo.type)) {
-                        	sp->value = syminfo.value;
-				sp->type = syminfo.type;
-                                namespace_ctl(NAMESPACE_INSTALL,
-                                        &lm->mod_load_namespace, sp, name);
-
-                                if (CRASHDEBUG(1))
-                                    fprintf(fp, "%08lx %s\n",  sp->value,
-                                	name);
-
-                        	sp++;
-				lm->mod_load_symcnt++;
-                	} 
-
-			break;
-
-		default:
-			break;
-		}
+                }
 	}
 
 	lm->mod_load_symend = &lm->mod_load_symtable[lm->mod_load_symcnt];
@@ -7713,7 +8532,7 @@
 	ulong start, end;
 	char *modbuf;
 	ulong maxchunk, alloc;
-	long offset;
+	long offset = 0;
 
         start = roundup(lm->mod_size_of_struct, sizeof(long)) + lm->mod_base;
         end = lm->mod_base + lm->mod_size;
@@ -8089,6 +8908,10 @@
 	struct syment *sp_array[200], *sp;
 
 	if (req->name == PATCH_KERNEL_SYMBOLS_START) {
+		if (kt->flags & RELOC_FORCE)
+			error(WARNING, 
+			    "\nkernel relocated [%ldMB]: patching %ld gdb minimal_symbol values\n",
+				kt->relocate >> 20, st->symcnt);
                 fprintf(fp, (pc->flags & SILENT) || !(pc->flags & TTY) ? "" :
                  "\nplease wait... (patching %ld gdb minimal_symbol values) ",
 			st->symcnt);
@@ -8164,8 +8987,8 @@
 		return offset2;
 
 	if (pc->flags & DATADEBUG) {
-        	ulong retaddr[4] = { 0 };
-		save_return_address(retaddr);
+        	ulong retaddr[NUMBER_STACKFRAMES] = { 0 };
+		SAVE_RETURN_ADDRESS(retaddr);
 		sprintf(errmsg, 	
 		    "invalid (optional) structure member offsets: %s or %s",
 			item1, item2);
@@ -8187,8 +9010,8 @@
                 return size2;
 
         if (pc->flags & DATADEBUG) {
-        	ulong retaddr[4] = { 0 };
-		save_return_address(retaddr);
+        	ulong retaddr[NUMBER_STACKFRAMES] = { 0 };
+		SAVE_RETURN_ADDRESS(retaddr);
 		sprintf(errmsg, "invalid (optional) structure sizes: %s or %s",
 			item1, item2);
                 datatype_error(retaddr, errmsg, func, file, line);
@@ -8215,8 +9038,8 @@
 		return offset;
 
 	if (offset < 0) {
-        	ulong retaddr[4] = { 0 };
-		save_return_address(retaddr);
+        	ulong retaddr[NUMBER_STACKFRAMES] = { 0 };
+		SAVE_RETURN_ADDRESS(retaddr);
 		sprintf(errmsg, "invalid structure member offset: %s",
 			item);
 		datatype_error(retaddr, errmsg, func, file, line);
@@ -8233,8 +9056,8 @@
                 return size;
 
         if (size < 0) {
-        	ulong retaddr[4] = { 0 };
-		save_return_address(retaddr);
+        	ulong retaddr[NUMBER_STACKFRAMES] = { 0 };
+		SAVE_RETURN_ADDRESS(retaddr);
 		sprintf(errmsg, "invalid structure size: %s", item);
                 datatype_error(retaddr, errmsg, func, file, line);
         }
@@ -8267,7 +9090,8 @@
         if (pc->flags & DROP_CORE)
         	drop_core("DROP_CORE flag set: forcing a segmentation fault\n");
 	
-	gdb_readnow_warning();
+	if (CRASHDEBUG(1))
+		gdb_readnow_warning();
 
 	if (pc->flags & RUNTIME) {
 		sprintf(buf, "%s\n%s  FILE: %s  LINE: %d  FUNCTION: %s()\n",
@@ -8290,9 +9114,10 @@
 	char *arglist[MAXARGS];
 	char buf[BUFSIZE];
 	FILE *pipe;
-	ulong vaddr, lookfor;
-	ulong last_vaddr;
+	ulong vaddr, size, lookfor;
+	ulong last_vaddr, last_size;
 	char symbol[BUFSIZE];
+	const char *nm_call;
 
 	fflush(fp);
 	fflush(stdout);
@@ -8301,7 +9126,7 @@
 	thisfile = get_thisfile();
 
 	fprintf(stderr, "[%s] error trace: ", thisfile);
-        for (i = 3; i >= 0; i--) {
+        for (i = (NUMBER_STACKFRAMES-1); i >= 0; i--) {
                 if (retaddr[i])
                         fprintf(stderr, "%s%lx%s",
                                 i == 3 ? "" : "=> ",
@@ -8315,11 +9140,16 @@
 		return;
 	}
 
-        for (i = 0; i < 4; i++) {
+	if (is_binary_stripped(thisfile))
+		nm_call = "/usr/bin/nm -DSBn %s";
+	else
+		nm_call = "/usr/bin/nm -BSn %s";
+
+        for (i = 0; i < NUMBER_STACKFRAMES; i++) {
 		if (!(lookfor = retaddr[i]))
 			continue;
 
-		sprintf(buf, "/usr/bin/nm -Bn %s", thisfile);
+		sprintf(buf, nm_call, thisfile);
 	        if (!(pipe = popen(buf, "r"))) {
 			perror("pipe");
 			break;
@@ -8328,20 +9158,27 @@
 		last_vaddr = 0;
 		BZERO(symbol, BUFSIZE);
 
-	        while (fgets(buf, 80, pipe)) {
+	        while (fgets(buf, BUFSIZE, pipe)) {
 			c = parse_line(strip_linefeeds(buf), arglist);
-			if (c != 3)
+			if (c != 4)
 				continue;
 			vaddr = htol(arglist[0], FAULT_ON_ERROR, NULL);
+			size = htol(arglist[1], FAULT_ON_ERROR, NULL);
 			if (vaddr > lookfor) {
-				fprintf(stderr, "%s  %lx: %s+%ld\n",
-					i == 0 ? "\n" : "", 
-					lookfor, symbol, 
-					lookfor-last_vaddr);
+				if ((lookfor - last_vaddr) > last_size)
+					fprintf(stderr, "%s  %lx: (undetermined)\n",
+						i == 0 ? "\n" : "", 
+						lookfor);
+				else
+					fprintf(stderr, "%s  %lx: %s+%ld\n",
+						i == 0 ? "\n" : "", 
+						lookfor, symbol, 
+						lookfor-last_vaddr);
 				break;
 			}
-			strcpy(symbol, arglist[2]);
+			strcpy(symbol, arglist[3]);
 			last_vaddr = vaddr;
+			last_size = size;
 		}
 
 		pclose(pipe);
--- crash/xendump.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/xendump.c	2008-01-02 09:15:13.000000000 -0500
@@ -0,0 +1,2848 @@
+/* 
+ * xendump.c 
+ * 
+ * Copyright (C) 2006, 2007, 2008 David Anderson
+ * Copyright (C) 2006, 2007, 2008 Red Hat, Inc. All rights reserved.
+ *
+ * This software may be freely redistributed under the terms of the
+ * GNU General Public License.
+ * 
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "defs.h"
+#include "xendump.h"
+
+static struct xendump_data xendump_data = { 0 };
+static struct xendump_data *xd = &xendump_data;
+
+static int xc_save_verify(char *);
+static int xc_core_verify(char *, char *);
+static int xc_save_read(void *, int, ulong, physaddr_t);
+static int xc_core_read(void *, int, ulong, physaddr_t);
+static int xc_core_mfns(ulong, FILE *);
+
+static void poc_store(ulong, off_t);
+static off_t poc_get(ulong, int *);
+
+static void xen_dump_vmconfig(FILE *);
+
+static void xc_core_create_pfn_tables(void);
+static ulong xc_core_pfn_to_page_index(ulong);
+static int xc_core_pfn_valid(ulong);
+
+static void xendump_print(char *fmt, ...);
+
+static int xc_core_elf_verify(char *, char *);
+static void xc_core_elf_dump(void);
+static char *xc_core_elf_mfn_to_page(ulong, char *);
+static int xc_core_elf_mfn_to_page_index(ulong);
+static ulong xc_core_elf_pfn_valid(ulong);
+static ulong xc_core_elf_pfn_to_page_index(ulong);
+static void xc_core_dump_Elf32_Ehdr(Elf32_Ehdr *);
+static void xc_core_dump_Elf64_Ehdr(Elf64_Ehdr *);
+static void xc_core_dump_Elf32_Shdr(Elf32_Off offset, int);
+static void xc_core_dump_Elf64_Shdr(Elf64_Off offset, int);
+static char *xc_core_strtab(uint32_t, char *);
+static void xc_core_dump_elfnote(off_t, size_t, int);
+static void xc_core_elf_pfn_init(void);
+
+#define ELFSTORE 1
+#define ELFREAD  0
+
+/*
+ *  Determine whether a file is a xendump creation, and if TRUE,
+ *  initialize the xendump_data structure.
+ */
+int
+is_xendump(char *file)
+{
+	int verified;
+	char buf[BUFSIZE];
+
+        if ((xd->xfd = open(file, O_RDWR)) < 0) {
+                if ((xd->xfd = open(file, O_RDONLY)) < 0) {
+                        sprintf(buf, "%s: open", file);
+                        perror(buf);
+                        return FALSE;
+                }
+        }
+
+	if (read(xd->xfd, buf, BUFSIZE) != BUFSIZE) 
+		return FALSE;
+
+        if (machine_type("X86") || machine_type("X86_64"))
+                xd->page_size = 4096;
+	else if (machine_type("IA64") && !machdep->pagesize)
+		xd->page_size = 16384;
+	else 
+                xd->page_size = machdep->pagesize;
+
+	verified = xc_save_verify(buf) || xc_core_verify(file, buf);
+
+	if (!verified)
+		close(xd->xfd);
+
+	return (verified);
+}
+
+/*
+ *  Verify whether the dump was created by the xc_domain_dumpcore()
+ *  library function in libxc/xc_core.c.
+ */
+static int
+xc_core_verify(char *file, char *buf)
+{
+	struct xc_core_header *xcp;
+
+	xcp = (struct xc_core_header *)buf;
+
+	if (xc_core_elf_verify(file, buf))
+		return TRUE;
+
+	if ((xcp->xch_magic != XC_CORE_MAGIC) && 
+	    (xcp->xch_magic != XC_CORE_MAGIC_HVM))
+		return FALSE;
+
+	if (!xcp->xch_nr_vcpus) {
+		error(INFO, 
+		    "faulty xc_core dump file header: xch_nr_vcpus is 0\n\n");
+
+        	fprintf(stderr, "         xch_magic: %x\n", xcp->xch_magic);
+        	fprintf(stderr, "      xch_nr_vcpus: %d\n", xcp->xch_nr_vcpus);
+        	fprintf(stderr, "      xch_nr_pages: %d\n", xcp->xch_nr_pages);
+        	fprintf(stderr, "   xch_ctxt_offset: %d\n", xcp->xch_ctxt_offset);
+        	fprintf(stderr, "  xch_index_offset: %d\n", xcp->xch_index_offset);
+        	fprintf(stderr, "  xch_pages_offset: %d\n\n", xcp->xch_pages_offset);
+
+		clean_exit(1);
+	}
+
+	BCOPY(xcp, &xd->xc_core.header, 
+		sizeof(struct xc_core_header));
+
+        xd->flags |= (XENDUMP_LOCAL | XC_CORE_ORIG | XC_CORE_P2M_CREATE);
+
+	if (xc_core_mfns(XC_CORE_64BIT_HOST, stderr))
+		xd->flags |= XC_CORE_64BIT_HOST;
+
+	if (!xd->page_size)
+		error(FATAL,
+		    "unknown page size: use -p <pagesize> command line option\n");
+
+	if (!(xd->page = (char *)malloc(xd->page_size)))
+		error(FATAL, "cannot malloc page space.");
+
+        if (!(xd->poc = (struct pfn_offset_cache *)calloc
+            (PFN_TO_OFFSET_CACHE_ENTRIES,
+            sizeof(struct pfn_offset_cache))))
+                error(FATAL, "cannot malloc pfn_offset_cache\n");
+	xd->last_pfn = ~(0UL);
+
+	if (CRASHDEBUG(1)) 
+                xendump_memory_dump(stderr);
+
+	return TRUE;
+}
+
+/*
+ *  Do the work for read_xendump() for the XC_CORE dumpfile format.
+ */
+static int
+xc_core_read(void *bufptr, int cnt, ulong addr, physaddr_t paddr)
+{
+        ulong pfn, page_index;
+	off_t offset;
+	int redundant;
+
+	if (xd->flags & (XC_CORE_P2M_CREATE|XC_CORE_PFN_CREATE))
+		xc_core_create_pfn_tables();
+
+        pfn = (ulong)BTOP(paddr);
+
+        if ((offset = poc_get(pfn, &redundant))) {
+                if (!redundant) {
+                        if (lseek(xd->xfd, offset, SEEK_SET) == -1)
+                                return SEEK_ERROR;
+                        if (read(xd->xfd, xd->page, xd->page_size) != 
+			    xd->page_size)
+                                return READ_ERROR;
+			xd->last_pfn = pfn;
+                }
+
+                BCOPY(xd->page + PAGEOFFSET(paddr), bufptr, cnt);
+                return cnt;
+        }
+
+	if ((page_index = xc_core_pfn_to_page_index(pfn)) == 
+	    PFN_NOT_FOUND)
+		return READ_ERROR;
+
+	offset = (off_t)xd->xc_core.header.xch_pages_offset +
+		((off_t)(page_index) * (off_t)xd->page_size);
+
+	if (lseek(xd->xfd, offset, SEEK_SET) == -1) 
+ 		return SEEK_ERROR;
+
+	if (read(xd->xfd, xd->page, xd->page_size) != xd->page_size)
+		return READ_ERROR;
+
+	poc_store(pfn, offset);
+
+	BCOPY(xd->page + PAGEOFFSET(paddr), bufptr, cnt);
+
+	return cnt;
+}
+
+/*
+ *  Verify whether the dumpfile was created by the "xm save" facility.
+ *  This gets started by the "save" function in XendCheckpoint.py, and
+ *  then by xc_save.c, with the work done in the xc_linux_save() library
+ *  function in libxc/xc_linux_save.c.
+ */
+
+#define MAX_BATCH_SIZE  1024
+/*
+ *  Number of P2M entries in a page.
+ */
+#define ULPP (xd->page_size/sizeof(unsigned long))
+/*
+ *  Number of P2M entries in the pfn_to_mfn_frame_list.
+ */
+#define P2M_FL_ENTRIES  (((xd->xc_save.nr_pfns)+ULPP-1)/ULPP)
+/*
+ *  Size in bytes of the pfn_to_mfn_frame_list.
+ */
+#define P2M_FL_SIZE     ((P2M_FL_ENTRIES)*sizeof(unsigned long))
+
+#define XTAB  (0xf<<28) /* invalid page */
+#define LTAB_MASK XTAB
+
+static int
+xc_save_verify(char *buf)
+{
+	int i, batch_count, done_batch, *intptr;
+	ulong flags, *ulongptr;
+	ulong batch_index, total_pages_read;
+	ulong N;
+
+	if (!STRNEQ(buf, XC_SAVE_SIGNATURE))
+		return FALSE;
+
+	if (lseek(xd->xfd, strlen(XC_SAVE_SIGNATURE), SEEK_SET) == -1)
+		return FALSE;
+
+	flags = XC_SAVE;
+
+	if (CRASHDEBUG(1)) {
+		fprintf(stderr, "\"%s\"\n", buf); 
+		fprintf(stderr, "endian: %d %s\n", __BYTE_ORDER, 
+		    __BYTE_ORDER == __BIG_ENDIAN ? "__BIG_ENDIAN" :
+		    (__BYTE_ORDER == __LITTLE_ENDIAN ? 
+		    "__LITTLE_ENDIAN" : "???"));
+	}
+		  
+	/*
+	 *  size of vmconfig data structure (big-endian)
+	 */
+	if (read(xd->xfd, buf, sizeof(int)) != sizeof(int))
+		return FALSE;
+
+	intptr = (int *)buf;
+
+	if (CRASHDEBUG(1) && BYTE_SWAP_REQUIRED(__BIG_ENDIAN)) {
+		fprintf(stderr, "byte-swap required for this:\n");
+		for (i = 0; i < sizeof(int); i++) 
+			fprintf(stderr, "[%x]", buf[i] & 0xff);
+		fprintf(stderr, ": %x -> ", *intptr);
+	}
+	
+	xd->xc_save.vmconfig_size = swab32(*intptr);
+
+	if (CRASHDEBUG(1))
+		fprintf(stderr, "%x\n", xd->xc_save.vmconfig_size);
+
+	if (!(xd->xc_save.vmconfig_buf = (char *)malloc
+	    (xd->xc_save.vmconfig_size)))
+		error(FATAL, "cannot malloc xc_save vmconfig space.");
+
+	if (!xd->page_size)
+		error(FATAL, 
+		    "unknown page size: use -p <pagesize> command line option\n");
+
+	if (!(xd->page = (char *)malloc(xd->page_size)))
+		error(FATAL, "cannot malloc page space.");
+
+	if (!(xd->poc = (struct pfn_offset_cache *)calloc
+	    (PFN_TO_OFFSET_CACHE_ENTRIES, 
+	    sizeof(struct pfn_offset_cache))))
+		error(FATAL, "cannot malloc pfn_offset_cache\n");
+	xd->last_pfn = ~(0UL);
+
+	if (!(xd->xc_save.region_pfn_type = (ulong *)calloc
+	    (MAX_BATCH_SIZE, sizeof(ulong))))
+		error(FATAL, "cannot malloc region_pfn_type\n");
+
+	if (read(xd->xfd, xd->xc_save.vmconfig_buf, 
+	    xd->xc_save.vmconfig_size) != xd->xc_save.vmconfig_size)
+		goto xc_save_bailout;
+
+	/*
+	 *  nr_pfns (native byte order)
+	 */
+	if (read(xd->xfd, buf, sizeof(ulong)) != sizeof(ulong))
+		goto xc_save_bailout;
+
+	ulongptr = (ulong *)buf;
+
+	if (CRASHDEBUG(1)) {
+		for (i = 0; i < sizeof(ulong); i++)
+			fprintf(stderr, "[%x]", buf[i] & 0xff);
+		fprintf(stderr, ": %lx (nr_pfns)\n", *ulongptr);
+	}
+
+	xd->xc_save.nr_pfns = *ulongptr;
+
+	if (machine_type("IA64"))
+		goto xc_save_ia64;
+
+    	/* 
+	 *  Get a local copy of the live_P2M_frame_list 
+	 */
+	if (!(xd->xc_save.p2m_frame_list = (unsigned long *)malloc(P2M_FL_SIZE))) 
+        	error(FATAL, "cannot allocate p2m_frame_list array");
+
+	if (!(xd->xc_save.batch_offsets = (off_t *)calloc((size_t)P2M_FL_ENTRIES, 
+	    sizeof(off_t))))
+        	error(FATAL, "cannot allocate batch_offsets array");
+
+	xd->xc_save.batch_count = P2M_FL_ENTRIES;
+		
+	if (read(xd->xfd, xd->xc_save.p2m_frame_list, P2M_FL_SIZE) != 
+	    P2M_FL_SIZE)
+		goto xc_save_bailout;
+
+	if (CRASHDEBUG(1))
+		fprintf(stderr, "pre-batch file pointer: %lld\n", 
+			(ulonglong)lseek(xd->xfd, 0L, SEEK_CUR));
+
+	/*
+	 *  ...
+	 *  int batch_count
+	 *  ulong region pfn_type[batch_count]
+	 *  page 0
+	 *  page 1
+	 *  ...
+	 *  page batch_count-1
+	 *  (repeat)
+	 */
+
+	total_pages_read = 0;
+	batch_index = 0;
+	done_batch = FALSE;
+
+	while (!done_batch) {
+
+		xd->xc_save.batch_offsets[batch_index] = (off_t)
+			lseek(xd->xfd, 0L, SEEK_CUR);
+
+		if (read(xd->xfd, &batch_count, sizeof(int)) != sizeof(int))
+			goto xc_save_bailout;
+
+		if (CRASHDEBUG(1))
+			fprintf(stderr, "batch[%ld]: %d ", 
+				batch_index, batch_count); 
+
+		batch_index++;
+
+		if (batch_index >= P2M_FL_ENTRIES) {
+			fprintf(stderr, "more than %ld batches encountered?\n",
+				P2M_FL_ENTRIES);
+			goto xc_save_bailout;
+		}
+
+	 	switch (batch_count)
+	 	{
+	 	case 0:
+			if (CRASHDEBUG(1)) {
+	 		    fprintf(stderr, 
+			        ": Batch work is done: %ld pages read (P2M_FL_ENTRIES: %ld)\n", 
+				    total_pages_read, P2M_FL_ENTRIES);
+			}
+			done_batch = TRUE;
+			continue;
+
+	 	case -1:
+			if (CRASHDEBUG(1))
+	 			fprintf(stderr, ": Entering page verify mode\n");
+			continue;
+
+	 	default:
+	 		if (batch_count > MAX_BATCH_SIZE) {
+				if (CRASHDEBUG(1))
+	             		    fprintf(stderr, 
+					": Max batch size exceeded. Giving up.\n");
+				done_batch = TRUE;
+				continue;
+	 		}
+			if (CRASHDEBUG(1))
+	 			fprintf(stderr, "\n");
+			break;
+		}
+
+		if (read(xd->xfd, xd->xc_save.region_pfn_type, batch_count * sizeof(ulong)) != 
+	    	    batch_count * sizeof(ulong))
+			goto xc_save_bailout;
+
+		for (i = 0; i < batch_count; i++) {
+			unsigned long pagetype;
+			unsigned long pfn;
+	
+	            	pfn = xd->xc_save.region_pfn_type[i] & ~LTAB_MASK;
+	            	pagetype = xd->xc_save.region_pfn_type[i] & LTAB_MASK;
+	
+		        if (pagetype == XTAB) 
+			    /* a bogus/unmapped page: skip it */
+	                	continue;
+	
+	            	if (pfn > xd->xc_save.nr_pfns) {
+				if (CRASHDEBUG(1))
+	                	    fprintf(stderr, 
+				 	"batch_count: %d pfn %ld out of range",
+						batch_count, pfn);
+	            	}
+
+			if (lseek(xd->xfd, xd->page_size, SEEK_CUR) == -1)
+				goto xc_save_bailout;
+	
+			total_pages_read++;
+		}
+	}	
+
+	/* 
+	 *  Get the list of PFNs that are not in the psuedo-phys map 
+	 */
+	if (read(xd->xfd, &xd->xc_save.pfns_not, 
+	    sizeof(xd->xc_save.pfns_not)) != sizeof(xd->xc_save.pfns_not))
+		goto xc_save_bailout;
+
+	if (CRASHDEBUG(1))
+		fprintf(stderr, "PFNs not in pseudo-phys map: %d\n", 
+			xd->xc_save.pfns_not);
+
+	if ((total_pages_read + xd->xc_save.pfns_not) != 
+	    xd->xc_save.nr_pfns)
+		error(WARNING, 
+		    "nr_pfns: %ld != (total pages: %ld + pages not saved: %d)\n",
+			xd->xc_save.nr_pfns, total_pages_read, 
+			xd->xc_save.pfns_not);
+
+	xd->xc_save.pfns_not_offset = lseek(xd->xfd, 0L, SEEK_CUR);
+
+	if (lseek(xd->xfd, sizeof(ulong) * xd->xc_save.pfns_not, SEEK_CUR) == -1)
+		goto xc_save_bailout;
+
+	xd->xc_save.vcpu_ctxt_offset = lseek(xd->xfd, 0L, SEEK_CUR);
+
+	lseek(xd->xfd, 0, SEEK_END);
+	lseek(xd->xfd,  -((off_t)(xd->page_size)), SEEK_CUR);
+
+	xd->xc_save.shared_info_page_offset = lseek(xd->xfd, 0L, SEEK_CUR);
+
+	xd->flags |= (XENDUMP_LOCAL | flags);
+	kt->xen_flags |= (CANONICAL_PAGE_TABLES|XEN_SUSPEND);
+
+	if (CRASHDEBUG(1))
+		xendump_memory_dump(stderr);
+
+	return TRUE;
+
+xc_save_ia64:
+
+	/*
+	 *  Completely different format for ia64:
+         *
+         *    ...
+         *    pfn #
+         *    page data
+         *    pfn #
+         *    page data
+         *    ...
+	 */
+	free(xd->poc); 
+	xd->poc = NULL;
+	free(xd->xc_save.region_pfn_type); 
+	xd->xc_save.region_pfn_type = NULL;
+
+	if (!(xd->xc_save.ia64_page_offsets = 
+	    (ulong *)calloc(xd->xc_save.nr_pfns, sizeof(off_t)))) 
+        	error(FATAL, "cannot allocate ia64_page_offsets array");
+
+        /*
+         *  version
+         */
+        if (read(xd->xfd, buf, sizeof(ulong)) != sizeof(ulong))
+                goto xc_save_bailout;
+
+	xd->xc_save.ia64_version = *((ulong *)buf);
+
+	if (CRASHDEBUG(1))
+		fprintf(stderr, "ia64 version: %lx\n", 
+			xd->xc_save.ia64_version);
+
+	/*
+	 *  xen_domctl_arch_setup structure
+	 */
+        if (read(xd->xfd, buf, sizeof(xen_domctl_arch_setup_t)) != 
+	    sizeof(xen_domctl_arch_setup_t))
+                goto xc_save_bailout;
+
+	if (CRASHDEBUG(1)) {
+		xen_domctl_arch_setup_t *setup = 
+			(xen_domctl_arch_setup_t *)buf;
+
+		fprintf(stderr, "xen_domctl_arch_setup:\n");
+		fprintf(stderr, "        flags: %lx\n", (ulong)setup->flags);
+		fprintf(stderr, "           bp: %lx\n", (ulong)setup->bp);
+		fprintf(stderr, "       maxmem: %lx\n", (ulong)setup->maxmem);
+		fprintf(stderr, "       xsi_va: %lx\n", (ulong)setup->xsi_va);
+		fprintf(stderr, "hypercall_imm: %x\n", setup->hypercall_imm);
+	}
+
+	for (i = N = 0; i < xd->xc_save.nr_pfns; i++) {
+        	if (read(xd->xfd, &N, sizeof(N)) != sizeof(N))
+                	goto xc_save_bailout;
+
+		if (N < xd->xc_save.nr_pfns)
+			xd->xc_save.ia64_page_offsets[N] = 
+				lseek(xd->xfd, 0, SEEK_CUR);
+		else
+			error(WARNING, 	
+			    "[%d]: pfn of %lx (0x%lx) in ia64 canonical page list exceeds %ld\n",	
+				i, N, N, xd->xc_save.nr_pfns);
+
+		if (CRASHDEBUG(1)) {
+			if ((i < 10) || (N >= (xd->xc_save.nr_pfns-10))) 
+				fprintf(stderr, "[%d]: %ld\n%s", i, N,
+					i == 9 ? "...\n" : "");	
+		}
+
+		if ((N+1) >= xd->xc_save.nr_pfns)
+			break;
+
+		if (lseek(xd->xfd, xd->page_size, SEEK_CUR) == -1)
+                	goto xc_save_bailout;
+	}
+
+	if (CRASHDEBUG(1)) {
+		for (i = N = 0; i < xd->xc_save.nr_pfns; i++) {
+			if (!xd->xc_save.ia64_page_offsets[i])
+				N++;
+		}
+		fprintf(stderr, "%ld out of %ld pfns not dumped\n",
+			N,  xd->xc_save.nr_pfns);
+	}
+
+	xd->flags |= (XENDUMP_LOCAL | flags | XC_SAVE_IA64);
+	kt->xen_flags |= (CANONICAL_PAGE_TABLES|XEN_SUSPEND);
+
+	if (CRASHDEBUG(1))
+		xendump_memory_dump(stderr);
+
+	return TRUE;
+
+xc_save_bailout:
+
+	error(INFO, 
+	    "xc_save_verify: \"LinuxGuestRecord\" file handling/format error\n");
+
+	if (xd->xc_save.p2m_frame_list) {
+		free(xd->xc_save.p2m_frame_list);
+		xd->xc_save.p2m_frame_list = NULL;
+	}
+	if (xd->xc_save.batch_offsets) {
+		free(xd->xc_save.batch_offsets);
+		xd->xc_save.batch_offsets = NULL;
+	}
+	if (xd->xc_save.vmconfig_buf) {
+		free(xd->xc_save.vmconfig_buf);
+		xd->xc_save.vmconfig_buf = NULL;
+	}
+	if (xd->page) {
+		free(xd->page);
+		xd->page = NULL;
+	}
+
+	return FALSE;
+}
+
+/*
+ *  Do the work for read_xendump() for the XC_SAVE dumpfile format.
+ */
+static int
+xc_save_read(void *bufptr, int cnt, ulong addr, physaddr_t paddr)
+{
+	int b, i, redundant;
+	ulong reqpfn;
+	int batch_count;
+	off_t file_offset;
+
+	reqpfn = (ulong)BTOP(paddr);
+
+	if (CRASHDEBUG(8))
+	    fprintf(xd->ofp, 
+	        "xc_save_read(bufptr: %lx cnt: %d addr: %lx paddr: %llx (%ld, 0x%lx)\n",
+		    (ulong)bufptr, cnt, addr, (ulonglong)paddr, reqpfn, reqpfn);
+
+	if (xd->flags & XC_SAVE_IA64) {
+                if (reqpfn >= xd->xc_save.nr_pfns) {
+			if (CRASHDEBUG(1))
+                            	fprintf(xd->ofp,
+				    "xc_save_read: pfn %lx too large: nr_pfns: %lx\n",
+					reqpfn, xd->xc_save.nr_pfns);
+			return SEEK_ERROR;
+		}
+
+        	file_offset = xd->xc_save.ia64_page_offsets[reqpfn];
+		if (!file_offset) {
+			if (CRASHDEBUG(1))
+                            	fprintf(xd->ofp,
+				    "xc_save_read: pfn %lx not stored in xendump\n",
+					reqpfn);
+			return SEEK_ERROR;
+		}	
+
+       		if (reqpfn != xd->last_pfn) {
+	        	if (lseek(xd->xfd, file_offset, SEEK_SET) == -1)
+				return SEEK_ERROR;
+	
+			if (read(xd->xfd, xd->page, xd->page_size) != xd->page_size)
+	               		return READ_ERROR;
+		} else {
+                	xd->redundant++;
+			xd->cache_hits++;
+		}
+
+		xd->accesses++;
+		xd->last_pfn = reqpfn;
+
+                BCOPY(xd->page + PAGEOFFSET(paddr), bufptr, cnt);
+                return cnt;
+	}
+
+	if ((file_offset = poc_get(reqpfn, &redundant))) {
+		if (!redundant) {
+        		if (lseek(xd->xfd, file_offset, SEEK_SET) == -1)
+				return SEEK_ERROR;
+			if (read(xd->xfd, xd->page, xd->page_size) != xd->page_size)
+                		return READ_ERROR;
+			xd->last_pfn = reqpfn;
+		} else if (CRASHDEBUG(1))
+			console("READ %ld (0x%lx) skipped!\n", reqpfn, reqpfn);
+
+		BCOPY(xd->page + PAGEOFFSET(paddr), bufptr, cnt);
+                return cnt;
+	}
+
+        /*
+         *  ...
+         *  int batch_count
+         *  ulong region pfn_type[batch_count]
+         *  page 0
+         *  page 1
+         *  ...
+         *  page batch_count-1
+         *  (repeat)
+         */
+	for (b = 0; b < xd->xc_save.batch_count; b++) {
+
+		if (lseek(xd->xfd, xd->xc_save.batch_offsets[b], SEEK_SET) == -1)
+			return SEEK_ERROR;
+
+		if (CRASHDEBUG(8))
+		    fprintf(xd->ofp, "check batch[%d]: offset: %llx\n",
+			b, (ulonglong)xd->xc_save.batch_offsets[b]);
+
+                if (read(xd->xfd, &batch_count, sizeof(int)) != sizeof(int))
+                        return READ_ERROR;
+
+                switch (batch_count)
+                {
+                case 0:
+                        if (CRASHDEBUG(1)) {
+                            	fprintf(xd->ofp,
+                                    "batch[%d]: has count of zero -- bailing out on pfn %ld\n",
+					 b, reqpfn);
+                        }
+			return READ_ERROR;
+
+                case -1:
+			return READ_ERROR;
+
+                default:
+			if (CRASHDEBUG(8))
+		    	    fprintf(xd->ofp, 
+				"batch[%d]: offset: %llx batch count: %d\n",
+				    b, (ulonglong)xd->xc_save.batch_offsets[b], 
+				    batch_count);
+                        break;
+                }
+
+                if (read(xd->xfd, xd->xc_save.region_pfn_type, batch_count * sizeof(ulong)) !=
+                    batch_count * sizeof(ulong))
+                        return READ_ERROR;
+
+                for (i = 0; i < batch_count; i++) {
+                        unsigned long pagetype;
+                        unsigned long pfn;
+
+                        pfn = xd->xc_save.region_pfn_type[i] & ~LTAB_MASK;
+                        pagetype = xd->xc_save.region_pfn_type[i] & LTAB_MASK;
+
+                        if (pagetype == XTAB)
+                            /* a bogus/unmapped page: skip it */
+                                continue;
+
+                        if (pfn > xd->xc_save.nr_pfns) {
+                                if (CRASHDEBUG(1))
+                                    fprintf(stderr,
+                                        "batch_count: %d pfn %ld out of range",
+                                                batch_count, pfn);
+                        }
+
+			if (pfn == reqpfn) {
+				file_offset = lseek(xd->xfd, 0, SEEK_CUR);
+				poc_store(pfn, file_offset);
+
+				if (read(xd->xfd, xd->page, xd->page_size) != 
+				    xd->page_size)
+                			return READ_ERROR;
+
+				BCOPY(xd->page + PAGEOFFSET(paddr), bufptr, cnt);
+				return cnt;
+			}
+
+                        if (lseek(xd->xfd, xd->page_size, SEEK_CUR) == -1)
+                                return SEEK_ERROR;
+                }
+	}
+
+	return READ_ERROR;
+}
+
+/*
+ *  Stash a pfn's offset.  If they're all in use, put it in the
+ *  least-used slot that's closest to the beginning of the array.
+ */
+static void
+poc_store(ulong pfn, off_t file_offset)
+{
+	int i;
+	struct pfn_offset_cache *poc, *plow;
+	ulong curlow;
+
+	curlow = ~(0UL);
+	plow = NULL;
+	poc = xd->poc;
+
+        for (i = 0; i < PFN_TO_OFFSET_CACHE_ENTRIES; i++, poc++) {
+		if (poc->cnt == 0) {
+			poc->cnt = 1;
+			poc->pfn = pfn;
+			poc->file_offset = file_offset;
+			xd->last_pfn = pfn;
+			return;
+		}
+
+		if (poc->cnt < curlow) {
+			curlow = poc->cnt;
+			plow = poc;
+		}
+	}
+
+	plow->cnt = 1;
+	plow->pfn = pfn;
+	plow->file_offset = file_offset;
+	xd->last_pfn = pfn;
+}
+
+/*
+ *  Check whether a pfn's offset has been cached.
+ */
+static off_t
+poc_get(ulong pfn, int *redundant)
+{
+	int i;
+	struct pfn_offset_cache *poc;
+
+	xd->accesses++;
+
+	if (pfn == xd->last_pfn) {
+		xd->redundant++;
+		*redundant = TRUE;
+		return 1;
+	} else
+		*redundant = FALSE;
+
+	poc = xd->poc;
+
+        for (i = 0; i < PFN_TO_OFFSET_CACHE_ENTRIES; i++, poc++) {
+		if (poc->cnt && (poc->pfn == pfn)) {
+			poc->cnt++;
+			xd->cache_hits++;
+			return poc->file_offset;
+		}
+	}
+
+	return 0;
+}
+
+
+/*
+ *  Perform any post-dumpfile determination stuff here.
+ */
+int
+xendump_init(char *unused, FILE *fptr)
+{
+        if (!XENDUMP_VALID())
+                return FALSE;
+
+        xd->ofp = fptr;
+        return TRUE;
+}
+
+int
+read_xendump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr)
+{
+	if (pc->curcmd_flags & XEN_MACHINE_ADDR)
+		return READ_ERROR;
+
+	switch (xd->flags & (XC_SAVE|XC_CORE_ORIG|XC_CORE_ELF))
+	{
+	case XC_SAVE:
+		return xc_save_read(bufptr, cnt, addr, paddr);
+
+	case XC_CORE_ORIG:
+	case XC_CORE_ELF:
+		return xc_core_read(bufptr, cnt, addr, paddr);
+
+	default:
+        	return READ_ERROR;
+	}
+}
+
+int
+read_xendump_hyper(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr)
+{
+        ulong pfn, page_index;
+        off_t offset;
+
+        pfn = (ulong)BTOP(paddr);
+
+	/* ODA: pfn == mfn !!! */
+        if ((page_index = xc_core_mfn_to_page_index(pfn)) == PFN_NOT_FOUND)
+                return READ_ERROR;
+
+        offset = (off_t)xd->xc_core.header.xch_pages_offset +
+                ((off_t)(page_index) * (off_t)xd->page_size);
+
+        if (lseek(xd->xfd, offset, SEEK_SET) == -1)
+                return SEEK_ERROR;
+
+        if (read(xd->xfd, xd->page, xd->page_size) != xd->page_size)
+                return READ_ERROR;
+
+        BCOPY(xd->page + PAGEOFFSET(paddr), bufptr, cnt);
+
+        return cnt;
+}
+
+int
+write_xendump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr)
+{
+        return WRITE_ERROR;
+}
+
+uint
+xendump_page_size(void)
+{
+        if (!XENDUMP_VALID())
+                return 0;
+
+        return xd->page_size;
+}
+
+/*
+ *  xendump_free_memory(), and xendump_memory_used()
+ *  are debug only, and typically unnecessary to implement.
+ */
+int
+xendump_free_memory(void)
+{
+        return 0;
+}
+
+int
+xendump_memory_used(void)
+{
+        return 0;
+}
+
+/*
+ *  This function is dump-type independent, used here to
+ *  to dump the xendump_data structure contents.
+ */
+int
+xendump_memory_dump(FILE *fp)
+{
+	int i, linefeed, used, others;
+	ulong *ulongptr;
+	Elf32_Off offset32;
+	Elf64_Off offset64;
+	FILE *fpsave;
+
+	fprintf(fp, "        flags: %lx (", xd->flags);
+	others = 0;
+	if (xd->flags & XENDUMP_LOCAL)
+		fprintf(fp, "%sXENDUMP_LOCAL", others++ ? "|" : "");
+	if (xd->flags & XC_SAVE)
+		fprintf(fp, "%sXC_SAVE", others++ ? "|" : "");
+	if (xd->flags & XC_CORE_ORIG)
+		fprintf(fp, "%sXC_CORE_ORIG", others++ ? "|" : "");
+	if (xd->flags & XC_CORE_ELF)
+		fprintf(fp, "%sXC_CORE_ELF", others++ ? "|" : "");
+	if (xd->flags & XC_CORE_P2M_CREATE)
+		fprintf(fp, "%sXC_CORE_P2M_CREATE", others++ ? "|" : "");
+	if (xd->flags & XC_CORE_PFN_CREATE)
+		fprintf(fp, "%sXC_CORE_PFN_CREATE", others++ ? "|" : "");
+	if (xd->flags & XC_CORE_NO_P2M)
+		fprintf(fp, "%sXC_CORE_NO_P2M", others++ ? "|" : "");
+	if (xd->flags & XC_SAVE_IA64)
+		fprintf(fp, "%sXC_SAVE_IA64", others++ ? "|" : "");
+	if (xd->flags & XC_CORE_64BIT_HOST)
+		fprintf(fp, "%sXC_CORE_64BIT_HOST", others++ ? "|" : "");
+	fprintf(fp, ")\n");
+	fprintf(fp, "          xfd: %d\n", xd->xfd);
+	fprintf(fp, "    page_size: %d\n", xd->page_size);
+	fprintf(fp, "          ofp: %lx\n", (ulong)xd->ofp);
+	fprintf(fp, "         page: %lx\n", (ulong)xd->page);
+	fprintf(fp, "     panic_pc: %lx\n", xd->panic_pc);
+	fprintf(fp, "     panic_sp: %lx\n", xd->panic_sp);
+	fprintf(fp, "     accesses: %ld\n", (ulong)xd->accesses);
+	fprintf(fp, "   cache_hits: %ld ", (ulong)xd->cache_hits);
+	if (xd->accesses)
+ 		fprintf(fp, "(%ld%%)\n", xd->cache_hits * 100 / xd->accesses);
+	else
+		fprintf(fp, "\n");
+	fprintf(fp, "     last_pfn: %ld\n", xd->last_pfn);
+	fprintf(fp, "    redundant: %ld ", (ulong)xd->redundant);
+	if (xd->accesses)
+ 		fprintf(fp, "(%ld%%)\n", xd->redundant * 100 / xd->accesses);
+	else
+		fprintf(fp, "\n");
+	for (i = used = 0; i < PFN_TO_OFFSET_CACHE_ENTRIES; i++) 
+		if (xd->poc && xd->poc[i].cnt)
+			used++;
+	if (xd->poc)
+		fprintf(fp, "    poc[%d]: %lx %s", PFN_TO_OFFSET_CACHE_ENTRIES, 
+		(ulong)xd->poc, xd->poc ? "" : "(none)");
+	else
+		fprintf(fp, "       poc[0]: (unused)\n");
+	for (i = 0; i < PFN_TO_OFFSET_CACHE_ENTRIES; i++) {
+		if (!xd->poc)
+			break;
+		if (!xd->poc[i].cnt) {
+			if (!i)
+				fprintf(fp, "(none used)\n");
+			break;
+		} else if (!i)
+			fprintf(fp, "(%d used)\n", used);
+		if (CRASHDEBUG(2))
+			fprintf(fp, 
+		  	    "  [%d]: pfn: %ld (0x%lx) count: %ld file_offset: %llx\n",
+			    	i,
+			    	xd->poc[i].pfn,
+				xd->poc[i].pfn,
+				xd->poc[i].cnt,
+				(ulonglong)xd->poc[i].file_offset);
+	}
+	if (!xd->poc)
+		fprintf(fp, "\n");
+
+	fprintf(fp, "\n      xc_save:\n");
+	fprintf(fp, "                  nr_pfns: %ld (0x%lx)\n", 
+		xd->xc_save.nr_pfns, xd->xc_save.nr_pfns); 
+	fprintf(fp, "            vmconfig_size: %d (0x%x)\n", xd->xc_save.vmconfig_size, 
+		xd->xc_save.vmconfig_size);
+	fprintf(fp, "             vmconfig_buf: %lx\n", (ulong)xd->xc_save.vmconfig_buf);
+	if (xd->flags & XC_SAVE) 
+		xen_dump_vmconfig(fp);
+	fprintf(fp, "           p2m_frame_list: %lx ", (ulong)xd->xc_save.p2m_frame_list);
+	if ((xd->flags & XC_SAVE) && xd->xc_save.p2m_frame_list) {
+		fprintf(fp, "\n");
+		ulongptr = xd->xc_save.p2m_frame_list;
+		for (i = 0; i < P2M_FL_ENTRIES; i++, ulongptr++)
+			fprintf(fp, "%ld ", *ulongptr);
+		fprintf(fp, "\n");
+	} else
+		fprintf(fp, "(none)\n");
+	fprintf(fp, "                 pfns_not: %d\n", xd->xc_save.pfns_not);
+	fprintf(fp, "          pfns_not_offset: %lld\n", 
+		(ulonglong)xd->xc_save.pfns_not_offset);
+	fprintf(fp, "         vcpu_ctxt_offset: %lld\n", 
+		(ulonglong)xd->xc_save.vcpu_ctxt_offset);
+	fprintf(fp, "  shared_info_page_offset: %lld\n", 
+		(ulonglong)xd->xc_save.shared_info_page_offset);
+	fprintf(fp, "          region_pfn_type: %lx\n", (ulong)xd->xc_save.region_pfn_type);
+	fprintf(fp, "              batch_count: %ld\n", (ulong)xd->xc_save.batch_count);
+	fprintf(fp, "            batch_offsets: %lx %s\n", 
+		(ulong)xd->xc_save.batch_offsets, 
+		xd->xc_save.batch_offsets ? "" : "(none)");
+	for (i = linefeed = 0; i < xd->xc_save.batch_count; i++) {
+		fprintf(fp, "[%d]: %llx ", i, 
+			(ulonglong)xd->xc_save.batch_offsets[i]);
+		if (((i+1)%4) == 0) {
+			fprintf(fp, "\n");
+			linefeed = FALSE;
+		} else
+			linefeed = TRUE;
+	}
+	if (linefeed)
+		fprintf(fp, "\n");
+	fprintf(fp, "             ia64_version: %ld\n", (ulong)xd->xc_save.ia64_version);
+	fprintf(fp, "        ia64_page_offsets: %lx ", (ulong)xd->xc_save.ia64_page_offsets);
+	if (xd->xc_save.ia64_page_offsets)
+		fprintf(fp, "(%ld entries)\n\n", xd->xc_save.nr_pfns);
+	else
+		fprintf(fp, "(none)\n\n");	
+
+	fprintf(fp, "      xc_core:\n");
+	fprintf(fp, "                   header:\n");
+	fprintf(fp, "                xch_magic: %x ", 
+		xd->xc_core.header.xch_magic);
+	if (xd->xc_core.header.xch_magic == XC_CORE_MAGIC)
+		fprintf(fp, "(XC_CORE_MAGIC)\n");
+	else if (xd->xc_core.header.xch_magic == XC_CORE_MAGIC_HVM)
+		fprintf(fp, "(XC_CORE_MAGIC_HVM)\n");
+	else
+		fprintf(fp, "(unknown)\n");
+	fprintf(fp, "             xch_nr_vcpus: %d\n", 
+		xd->xc_core.header.xch_nr_vcpus);
+	fprintf(fp, "             xch_nr_pages: %d (0x%x)\n",
+		xd->xc_core.header.xch_nr_pages,
+		xd->xc_core.header.xch_nr_pages);
+	fprintf(fp, "          xch_ctxt_offset: %d (0x%x)\n", 
+		xd->xc_core.header.xch_ctxt_offset,
+		xd->xc_core.header.xch_ctxt_offset);
+	fprintf(fp, "         xch_index_offset: %d (0x%x)\n",
+		xd->xc_core.header.xch_index_offset,
+		xd->xc_core.header.xch_index_offset);
+	fprintf(fp, "         xch_pages_offset: %d (0x%x)\n",
+		xd->xc_core.header.xch_pages_offset,
+		xd->xc_core.header.xch_pages_offset);
+
+	fprintf(fp, "                elf_class: %s\n", xd->xc_core.elf_class == ELFCLASS64 ? "ELFCLASS64" :
+		xd->xc_core.elf_class == ELFCLASS32 ? "ELFCLASS32" : "n/a");
+	fprintf(fp, "        elf_strtab_offset: %lld (0x%llx)\n", 
+		(ulonglong)xd->xc_core.elf_strtab_offset,
+		(ulonglong)xd->xc_core.elf_strtab_offset);
+	fprintf(fp, "           format_version: %016llx\n", 
+		(ulonglong)xd->xc_core.format_version);
+	fprintf(fp, "       shared_info_offset: %lld (0x%llx)\n", 
+		(ulonglong)xd->xc_core.shared_info_offset,
+		(ulonglong)xd->xc_core.shared_info_offset);
+	if (machine_type("IA64"))
+		fprintf(fp, "  ia64_mapped_regs_offset: %lld (0x%llx)\n", 
+			(ulonglong)xd->xc_core.ia64_mapped_regs_offset,
+			(ulonglong)xd->xc_core.ia64_mapped_regs_offset);
+	fprintf(fp, "       elf_index_pfn[%d]: %s", INDEX_PFN_COUNT,
+		xd->xc_core.elf_class ? "\n" : "(none used)\n");
+	if (xd->xc_core.elf_class) {
+		for (i = 0; i < INDEX_PFN_COUNT; i++) {
+			fprintf(fp, "%ld:%ld ", 
+			    xd->xc_core.elf_index_pfn[i].index,
+			    xd->xc_core.elf_index_pfn[i].pfn);
+		}
+		fprintf(fp, "\n");
+	}
+	fprintf(fp, "               last_batch:\n");
+	fprintf(fp, "                    index: %ld (%ld - %ld)\n", 
+		xd->xc_core.last_batch.index,
+		xd->xc_core.last_batch.start, xd->xc_core.last_batch.end);
+	fprintf(fp, "                 accesses: %ld\n", 
+		xd->xc_core.last_batch.accesses);
+	fprintf(fp, "               duplicates: %ld ", 
+		xd->xc_core.last_batch.duplicates);
+        if (xd->xc_core.last_batch.accesses)
+                fprintf(fp, "(%ld%%)\n", 
+			xd->xc_core.last_batch.duplicates * 100 / 
+			xd->xc_core.last_batch.accesses);
+        else
+                fprintf(fp, "\n");
+
+	fprintf(fp, "                    elf32: %lx\n", (ulong)xd->xc_core.elf32);
+	fprintf(fp, "                    elf64: %lx\n", (ulong)xd->xc_core.elf64);
+
+	fprintf(fp, "               p2m_frames: %d\n", 
+		xd->xc_core.p2m_frames);
+	fprintf(fp, "     p2m_frame_index_list: %s\n",
+		(xd->flags & (XC_CORE_NO_P2M|XC_SAVE)) ? "(not used)" : "");
+	for (i = 0; i < xd->xc_core.p2m_frames; i++) {
+		fprintf(fp, "%ld ", 
+			xd->xc_core.p2m_frame_index_list[i]);
+	}
+	fprintf(fp, xd->xc_core.p2m_frames ? "\n" : "");
+
+	if ((xd->flags & XC_CORE_ORIG) && CRASHDEBUG(8))
+		xc_core_mfns(XENDUMP_LOCAL, fp);
+
+        switch (xd->xc_core.elf_class)
+        {
+        case ELFCLASS32:
+		fpsave = xd->ofp;
+		xd->ofp = fp;
+		xc_core_elf_dump();
+		offset32 = xd->xc_core.elf32->e_shoff;
+		for (i = 0; i < xd->xc_core.elf32->e_shnum; i++) {
+			xc_core_dump_Elf32_Shdr(offset32, ELFREAD);
+			offset32 += xd->xc_core.elf32->e_shentsize;
+		}
+		xendump_print("\n");
+		xd->ofp = fpsave;
+                break;
+
+        case ELFCLASS64:
+		fpsave = xd->ofp;
+		xd->ofp = fp;
+		xc_core_elf_dump();
+		offset64 = xd->xc_core.elf64->e_shoff;
+		for (i = 0; i < xd->xc_core.elf64->e_shnum; i++) {
+			xc_core_dump_Elf64_Shdr(offset64, ELFREAD);
+			offset64 += xd->xc_core.elf64->e_shentsize;
+		}
+		xendump_print("\n");
+		xd->ofp = fpsave;
+		break;
+	}
+
+	return 0;
+}
+
+static void
+xen_dump_vmconfig(FILE *fp)
+{
+	int i, opens, closes;
+	char *p;
+
+	opens = closes = 0;
+	p = xd->xc_save.vmconfig_buf;
+	for (i = 0; i < xd->xc_save.vmconfig_size; i++, p++) {
+		if (ascii(*p))
+			fprintf(fp, "%c", *p);
+		else
+			fprintf(fp, "<%x>", *p);
+
+		if (*p == '(')
+			opens++;
+		else if (*p == ')')
+			closes++;
+	}
+	fprintf(fp, "\n");
+
+	if (opens != closes)
+		error(WARNING, "invalid vmconfig contents?\n");
+}
+
+/*
+ *  Looking at the active set, try to determine who panicked, 
+ *  or who was the "suspend" kernel thread. 
+ */
+ulong get_xendump_panic_task(void)
+{
+	int i;
+	ulong task;
+	struct task_context *tc;
+
+	switch (xd->flags & (XC_CORE_ORIG|XC_CORE_ELF|XC_SAVE))
+	{
+	case XC_CORE_ORIG:
+	case XC_CORE_ELF:
+		if (machdep->xendump_panic_task)
+			return (machdep->xendump_panic_task((void *)xd));
+		break;
+
+	case XC_SAVE:
+        	for (i = 0; i < NR_CPUS; i++) {
+                	if (!(task = tt->active_set[i]))
+                        	continue;
+			tc = task_to_context(task);
+			if (is_kernel_thread(task) &&
+			    STREQ(tc->comm, "suspend")) 
+				return tc->task;
+        	}
+		break;
+	}
+
+	return NO_TASK;
+}
+
+/*
+ *  Figure out the back trace hooks.
+ */
+void get_xendump_regs(struct bt_info *bt, ulong *pc, ulong *sp)
+{
+	int i;
+	ulong *up;
+
+	if ((tt->panic_task == bt->task) &&
+	    (xd->panic_pc && xd->panic_sp)) {
+		*pc = xd->panic_pc;
+		*sp = xd->panic_sp;
+		return;
+	}
+
+	switch (xd->flags & (XC_CORE_ORIG|XC_CORE_ELF|XC_SAVE))
+	{
+	case XC_CORE_ORIG:
+	case XC_CORE_ELF:
+		if (machdep->get_xendump_regs)
+			return (machdep->get_xendump_regs(xd, bt, pc, sp));
+		break;
+
+	case XC_SAVE:
+		if (tt->panic_task != bt->task) 
+			break;
+
+                for (i = 0, up = (ulong *)bt->stackbuf;
+                     i < LONGS_PER_STACK; i++, up++) {
+                        if (is_kernel_text(*up) &&
+		       	    (STREQ(closest_symbol(*up), 
+			    "__do_suspend"))) {
+				*pc = *up;
+				*sp = tt->flags & THREAD_INFO ?
+                               		bt->tc->thread_info +
+                                        (i * sizeof(long)) :
+                                        bt->task + 
+					(i * sizeof(long));
+				xd->panic_pc = *pc;
+				xd->panic_sp = *sp;
+				return;
+			}
+		}
+	}
+
+	machdep->get_stack_frame(bt, pc, sp);
+}
+
+/*
+ *  Farm out most of the work to the proper architecture to create
+ *  the p2m table.  For ELF core dumps, create the index;pfn table. 
+ */
+static void 
+xc_core_create_pfn_tables(void)
+{
+        if (xd->flags & XC_CORE_P2M_CREATE) {
+		if (!machdep->xendump_p2m_create)
+			error(FATAL, 
+			    "xen xc_core dumpfiles not supported on this architecture");
+	
+		if (!machdep->xendump_p2m_create((void *)xd))
+			error(FATAL,
+			    "cannot create xen pfn-to-mfn mapping\n");
+	}
+
+	if (xd->flags & XC_CORE_ELF)
+		xc_core_elf_pfn_init();
+
+	xd->flags &= ~(XC_CORE_P2M_CREATE|XC_CORE_PFN_CREATE);
+
+	if (CRASHDEBUG(1))
+		xendump_memory_dump(xd->ofp);
+}
+
+/*
+ *  Find the page index containing the mfn, and read the
+ *  machine page into the buffer.
+ */
+char *
+xc_core_mfn_to_page(ulong mfn, char *pgbuf)
+{
+	int i, b, idx, done;
+	ulong tmp[MAX_BATCH_SIZE];
+	off_t offset;
+	size_t size;
+	uint nr_pages;
+
+	if (xd->flags & XC_CORE_ELF)
+		return xc_core_elf_mfn_to_page(mfn, pgbuf);
+
+        if (lseek(xd->xfd, (off_t)xd->xc_core.header.xch_index_offset,
+            SEEK_SET) == -1) {
+                error(INFO, "cannot lseek to page index\n");
+		return NULL;
+	}
+
+	nr_pages = xd->xc_core.header.xch_nr_pages;
+	if (xd->flags & XC_CORE_64BIT_HOST)
+		nr_pages *= 2;
+
+        for (b = 0, idx = -1, done = FALSE; 
+	     !done && (b < nr_pages); b += MAX_BATCH_SIZE) {
+		size = sizeof(ulong) * MIN(MAX_BATCH_SIZE, nr_pages - b);
+                if (read(xd->xfd, tmp, size) != size) {
+                        error(INFO, "cannot read index page %d\n", b);
+			return NULL;
+		}
+
+                for (i = 0; i < MAX_BATCH_SIZE; i++) {
+			if ((b+i) >= nr_pages) {
+				done = TRUE;
+				break;
+			}
+                        if (tmp[i] == mfn) {
+                                idx = i+b;
+                                if (CRASHDEBUG(4))
+                                        fprintf(xd->ofp,
+                                            "page: found mfn 0x%lx (%ld) at index %d\n",
+                                                mfn, mfn, idx);
+				done = TRUE;
+                        }
+                }
+	}
+
+	if (idx == -1) {
+                error(INFO, "cannot find mfn %ld (0x%lx) in page index\n",
+			mfn, mfn);
+		return NULL;
+	}
+
+        if (lseek(xd->xfd, (off_t)xd->xc_core.header.xch_pages_offset,
+            SEEK_SET) == -1) {
+                error(INFO, "cannot lseek to xch_pages_offset\n");
+		return NULL;
+	}
+
+        offset = (off_t)(idx) * (off_t)xd->page_size;
+
+        if (lseek(xd->xfd, offset, SEEK_CUR) == -1) {
+                error(INFO, "cannot lseek to mfn-specified page\n");
+		return NULL;
+	}
+
+        if (read(xd->xfd, pgbuf, xd->page_size) != xd->page_size) {
+                error(INFO, "cannot read mfn-specified page\n");
+		return NULL;
+	}
+
+	return pgbuf;
+}
+
+/*
+ *  Find the page index containing the mfn, and read the
+ *  machine page into the buffer.
+ */
+static char *
+xc_core_elf_mfn_to_page(ulong mfn, char *pgbuf)
+{
+	int i, b, idx, done;
+	off_t offset;
+	size_t size;
+	uint nr_pages;
+	ulong tmp;
+	struct xen_dumpcore_p2m p2m_batch[MAX_BATCH_SIZE];
+
+        offset = xd->xc_core.header.xch_index_offset;
+	nr_pages = xd->xc_core.header.xch_nr_pages;
+
+        if (lseek(xd->xfd, offset, SEEK_SET) == -1)
+                error(FATAL, "cannot lseek to page index\n");
+
+        for (b = 0, idx = -1, done = FALSE; 
+	     !done && (b < nr_pages); b += MAX_BATCH_SIZE) {
+		size = sizeof(struct xen_dumpcore_p2m) *
+			MIN(MAX_BATCH_SIZE, nr_pages - b);
+                if (read(xd->xfd, &p2m_batch[0], size) != size) {
+                        error(INFO, "cannot read index page %d\n", b);
+			return NULL;
+		}
+
+                for (i = 0; i < MAX_BATCH_SIZE; i++) {
+			if ((b+i) >= nr_pages) {
+				done = TRUE;
+				break;
+			}
+
+			tmp = (ulong)p2m_batch[i].gmfn;
+
+                        if (tmp == mfn) {
+                                idx = i+b;
+                                if (CRASHDEBUG(4))
+                                        fprintf(xd->ofp,
+                                            "page: found mfn 0x%lx (%ld) at index %d\n",
+                                                mfn, mfn, idx);
+				done = TRUE;
+                        }
+                }
+	}
+
+	if (idx == -1) {
+                error(INFO, "cannot find mfn %ld (0x%lx) in page index\n",
+			mfn, mfn);
+		return NULL;
+	}
+
+        if (lseek(xd->xfd, (off_t)xd->xc_core.header.xch_pages_offset,
+            SEEK_SET) == -1)
+                error(FATAL, "cannot lseek to xch_pages_offset\n");
+
+        offset = (off_t)(idx) * (off_t)xd->page_size;
+
+        if (lseek(xd->xfd, offset, SEEK_CUR) == -1) {
+                error(INFO, "cannot lseek to mfn-specified page\n");
+		return NULL;
+	}
+
+        if (read(xd->xfd, pgbuf, xd->page_size) != xd->page_size) {
+                error(INFO, "cannot read mfn-specified page\n");
+		return NULL;
+	}
+
+	return pgbuf;
+}
+
+
+/*
+ *  Find and return the page index containing the mfn.
+ */
+int 
+xc_core_mfn_to_page_index(ulong mfn)
+{
+        int i, b;
+        ulong tmp[MAX_BATCH_SIZE];
+	uint nr_pages;
+	size_t size;
+
+	if (xd->flags & XC_CORE_ELF)
+		return xc_core_elf_mfn_to_page_index(mfn);
+
+        if (lseek(xd->xfd, (off_t)xd->xc_core.header.xch_index_offset,
+            SEEK_SET) == -1) {
+                error(INFO, "cannot lseek to page index\n");
+                return MFN_NOT_FOUND;
+        }
+
+	nr_pages = xd->xc_core.header.xch_nr_pages;
+	if (xd->flags & XC_CORE_64BIT_HOST)
+                nr_pages *= 2;
+
+        for (b = 0; b < nr_pages; b += MAX_BATCH_SIZE) {
+		size = sizeof(ulong) * MIN(MAX_BATCH_SIZE, nr_pages - b);
+                if (read(xd->xfd, tmp, size) != size) {
+                        error(INFO, "cannot read index page %d\n", b);
+			return MFN_NOT_FOUND;
+		}
+
+		for (i = 0; i < MAX_BATCH_SIZE; i++) {
+			if ((b+i) >= nr_pages)
+				break;
+			
+                	if (tmp[i] == mfn) {
+				if (CRASHDEBUG(4))
+                        		fprintf(xd->ofp, 
+				            "index: batch: %d found mfn %ld (0x%lx) at index %d\n",
+                                		b/MAX_BATCH_SIZE, mfn, mfn, i+b);
+                        	return (i+b);
+                	}
+		}
+        }
+
+        return MFN_NOT_FOUND;
+}
+
+/*
+ *  Find and return the page index containing the mfn.
+ */
+static int
+xc_core_elf_mfn_to_page_index(ulong mfn)
+{
+        int i, b;
+	off_t offset;
+	size_t size;
+	uint nr_pages;
+        ulong tmp;
+        struct xen_dumpcore_p2m p2m_batch[MAX_BATCH_SIZE];
+
+        offset = xd->xc_core.header.xch_index_offset;
+	nr_pages = xd->xc_core.header.xch_nr_pages;
+
+        if (lseek(xd->xfd, offset, SEEK_SET) == -1)
+                error(FATAL, "cannot lseek to page index\n");
+
+        for (b = 0; b < nr_pages; b += MAX_BATCH_SIZE) {
+		size = sizeof(struct xen_dumpcore_p2m) *
+			MIN(MAX_BATCH_SIZE, nr_pages - b);
+                if (read(xd->xfd, &p2m_batch[0], size) != size) {
+                        error(INFO, "cannot read index page %d\n", b);
+			return MFN_NOT_FOUND;
+		}
+
+		for (i = 0; i < MAX_BATCH_SIZE; i++) {
+			if ((b+i) >= nr_pages)
+				break;
+			
+			tmp = (ulong)p2m_batch[i].gmfn;
+
+                	if (tmp == mfn) {
+				if (CRASHDEBUG(4))
+                        		fprintf(xd->ofp, 
+				            "index: batch: %d found mfn %ld (0x%lx) at index %d\n",
+                                		b/MAX_BATCH_SIZE, mfn, mfn, i+b);
+                        	return (i+b);
+                	}
+		}
+        }
+
+        return MFN_NOT_FOUND;
+}
+
+
+/*
+ *  XC_CORE mfn-related utility function.
+ */
+static int
+xc_core_mfns(ulong arg, FILE *ofp)
+{
+        int i, b;
+	uint nr_pages;
+        ulong tmp[MAX_BATCH_SIZE];
+        ulonglong tmp64[MAX_BATCH_SIZE];
+	size_t size;
+
+        if (lseek(xd->xfd, (off_t)xd->xc_core.header.xch_index_offset,
+            SEEK_SET) == -1) {
+                error(INFO, "cannot lseek to page index\n");
+		return FALSE;
+        }
+
+	switch (arg)
+	{
+	case XC_CORE_64BIT_HOST:
+		/*
+		 *  Determine whether this is a 32-bit guest xendump that
+		 *  was taken on a 64-bit xen host.
+	         */
+		if (machine_type("X86_64") || machine_type("IA64"))
+			return FALSE;
+check_next_4:
+	        if (read(xd->xfd, tmp, sizeof(ulong) * 4) != (4 * sizeof(ulong))) {
+			error(INFO, "cannot read index pages\n");
+			return FALSE;
+	        }
+
+		if ((tmp[0] == 0xffffffff) || (tmp[1] == 0xffffffff) ||
+		    (tmp[2] == 0xffffffff) || (tmp[3] == 0xffffffff) ||
+		    (!tmp[0] && !tmp[1]) || (!tmp[2] && !tmp[3]))
+			goto check_next_4;
+
+		if (CRASHDEBUG(2))
+			fprintf(ofp, "mfns: %08lx %08lx %08lx %08lx\n", 
+					tmp[0], tmp[1], tmp[2], tmp[3]);
+
+		if (tmp[0] && !tmp[1] && tmp[2] && !tmp[3])
+			return TRUE;
+		else
+			return FALSE;
+
+	case XENDUMP_LOCAL:
+		if (BITS64() || (xd->flags & XC_CORE_64BIT_HOST))
+			goto show_64bit_mfns;
+
+		fprintf(ofp, "xch_index_offset mfn list:\n");
+
+		nr_pages = xd->xc_core.header.xch_nr_pages;
+
+	        for (b = 0; b < nr_pages; b += MAX_BATCH_SIZE) {
+			size = sizeof(ulong) *
+				MIN(MAX_BATCH_SIZE, nr_pages - b);
+	                if (read(xd->xfd, tmp, size) != size) {
+	                        error(INFO, "cannot read index page %d\n", b);
+	                        return FALSE;
+	                }
+	
+			if (b) fprintf(ofp, "\n");
+
+	                for (i = 0; i < MAX_BATCH_SIZE; i++) {
+				if ((b+i) >= nr_pages)
+					break;
+				if ((i%8) == 0)
+					fprintf(ofp, "%s[%d]:", 
+						i ? "\n" : "", b+i);
+				if (tmp[i] == 0xffffffff)
+					fprintf(ofp, " INVALID");
+				else
+					fprintf(ofp, " %lx", tmp[i]);
+			}
+		}
+
+		fprintf(ofp, "\nxch_nr_pages: %d\n", 
+			xd->xc_core.header.xch_nr_pages);
+		return TRUE;
+
+show_64bit_mfns:
+		fprintf(ofp, "xch_index_offset mfn list: %s\n",
+			BITS32() ? "(64-bit mfns)" : "");
+
+		nr_pages = xd->xc_core.header.xch_nr_pages;
+
+	        for (b = 0; b < nr_pages; b += MAX_BATCH_SIZE) {
+			size = sizeof(ulonglong) *
+				MIN(MAX_BATCH_SIZE, nr_pages - b);
+			if (read(xd->xfd, tmp64, size) != size) {
+	                        error(INFO, "cannot read index page %d\n", b);
+	                        return FALSE;
+	                }
+	
+			if (b) fprintf(ofp, "\n");
+
+	                for (i = 0; i < MAX_BATCH_SIZE; i++) {
+				if ((b+i) >= nr_pages)
+					break;
+				if ((i%8) == 0)
+					fprintf(ofp, "%s[%d]:", 
+						i ? "\n" : "", b+i);
+				if (tmp64[i] == 0xffffffffffffffffULL)
+					fprintf(ofp, " INVALID");
+				else
+					fprintf(ofp, " %llx", tmp64[i]);
+			}
+		}
+
+		fprintf(ofp, "\nxch_nr_pages: %d\n", nr_pages);
+		return TRUE;
+
+	default:
+		return FALSE;
+	}
+}
+
+/*
+ *  Given a normal kernel pfn, determine the page index in the dumpfile.
+ *
+ *  -  First determine which of the pages making up the 
+ *     phys_to_machine_mapping[] array would contain the pfn.
+ *  -  From the phys_to_machine_mapping page, determine the mfn.
+ *  -  Find the mfn in the dumpfile page index.
+ */
+#define PFNS_PER_PAGE  (xd->page_size/sizeof(unsigned long))
+
+static ulong
+xc_core_pfn_to_page_index(ulong pfn)
+{
+	ulong idx, p2m_idx, mfn_idx;
+	ulong *up, mfn;
+	off_t offset;
+
+	/*
+	 *  This function does not apply when there's no p2m
+	 *  mapping and/or if this is an ELF format dumpfile.
+	 */
+	switch (xd->flags & (XC_CORE_NO_P2M|XC_CORE_ELF))
+	{
+	case (XC_CORE_NO_P2M|XC_CORE_ELF):
+		return xc_core_elf_pfn_valid(pfn);
+
+	case XC_CORE_NO_P2M:
+		return(xc_core_pfn_valid(pfn) ? pfn : PFN_NOT_FOUND);
+	
+	case XC_CORE_ELF:
+		return xc_core_elf_pfn_to_page_index(pfn);
+	}
+
+	idx = pfn/PFNS_PER_PAGE;
+
+	if (idx >= xd->xc_core.p2m_frames) {
+		error(INFO, "pfn: %lx is too large for dumpfile\n", 
+			pfn);
+		return PFN_NOT_FOUND;
+	}
+
+	p2m_idx = xd->xc_core.p2m_frame_index_list[idx];
+
+	if (lseek(xd->xfd, (off_t)xd->xc_core.header.xch_pages_offset,
+            SEEK_SET) == -1) {
+                error(INFO, "cannot lseek to xch_pages_offset\n");
+                return PFN_NOT_FOUND;
+        }
+
+        offset = (off_t)(p2m_idx) * (off_t)xd->page_size;
+
+        if (lseek(xd->xfd, offset, SEEK_CUR) == -1) {
+                error(INFO, "cannot lseek to pfn-specified page\n");
+                return PFN_NOT_FOUND;
+        }
+
+        if (read(xd->xfd, xd->page, xd->page_size) != xd->page_size) {
+                error(INFO, "cannot read pfn-specified page\n");
+                return PFN_NOT_FOUND;
+        }
+
+	up = (ulong *)xd->page;
+	up += (pfn%PFNS_PER_PAGE);
+
+	mfn = *up;
+
+	if ((mfn_idx = xc_core_mfn_to_page_index(mfn)) == MFN_NOT_FOUND) {
+		error(INFO, "cannot find mfn in page index\n");
+		return PFN_NOT_FOUND;
+	}
+
+	return mfn_idx;
+}
+
+
+/*
+ *  Search the .xen_p2m array for the target pfn, starting at a 
+ *  higher batch if appropriate.  This presumes that the pfns
+ *  are laid out in ascending order.
+ */
+static ulong
+xc_core_elf_pfn_to_page_index(ulong pfn)
+{
+        int i, b, start_index;
+	off_t offset;
+	size_t size;
+	uint nr_pages;
+        ulong tmp;
+        struct xen_dumpcore_p2m p2m_batch[MAX_BATCH_SIZE];
+
+        offset = xd->xc_core.header.xch_index_offset;
+	nr_pages = xd->xc_core.header.xch_nr_pages;
+
+	/*
+	 *  Initialize the start_index.
+	 */
+	xd->xc_core.last_batch.accesses++;
+
+	if ((pfn >= xd->xc_core.last_batch.start) &&
+	    (pfn <= xd->xc_core.last_batch.end)) {
+		xd->xc_core.last_batch.duplicates++;
+		start_index = xd->xc_core.last_batch.index;
+	} else {
+		for (i = 0; i <= INDEX_PFN_COUNT; i++) {
+			if ((i == INDEX_PFN_COUNT) ||
+			    (pfn < xd->xc_core.elf_index_pfn[i].pfn)) {
+				if (--i < 0)
+					i = 0;
+				start_index = xd->xc_core.elf_index_pfn[i].index;
+				break;
+			}
+		}
+	}
+
+	offset += (start_index * sizeof(struct xen_dumpcore_p2m));
+        if (lseek(xd->xfd, offset, SEEK_SET) == -1)
+                error(FATAL, "cannot lseek to page index\n");
+
+        for (b = start_index; b < nr_pages; b += MAX_BATCH_SIZE) {
+		size = sizeof(struct xen_dumpcore_p2m) *
+			MIN(MAX_BATCH_SIZE, nr_pages - b);
+                if (read(xd->xfd, &p2m_batch[0], size) != size) {
+                        error(INFO, "cannot read index page %d\n", b);
+			return PFN_NOT_FOUND;
+		}
+
+		for (i = 0; i < MAX_BATCH_SIZE; i++) {
+			if ((b+i) >= nr_pages)
+				break;
+			
+			tmp = (ulong)p2m_batch[i].pfn;
+
+                	if (tmp == pfn) {
+				if (CRASHDEBUG(4))
+                        		fprintf(xd->ofp, 
+				            "index: batch: %d found pfn %ld (0x%lx) at index %d\n",
+                                		b/MAX_BATCH_SIZE, pfn, pfn, i+b);
+
+				if ((b+MAX_BATCH_SIZE) < nr_pages) {
+					xd->xc_core.last_batch.index = b;
+					xd->xc_core.last_batch.start = p2m_batch[0].pfn;
+					xd->xc_core.last_batch.end = p2m_batch[MAX_BATCH_SIZE-1].pfn;
+				}
+
+                        	return (i+b);
+                	}
+		}
+        }
+
+        return PFN_NOT_FOUND;
+}
+
+/*
+ *  In xendumps containing INVALID_MFN markers in the page index,
+ *  return the validity of the pfn.
+ */
+static int 
+xc_core_pfn_valid(ulong pfn)
+{
+	ulong mfn;
+	off_t offset;
+
+	if (pfn >= (ulong)xd->xc_core.header.xch_nr_pages)
+		return FALSE;
+
+        offset = (off_t)xd->xc_core.header.xch_index_offset;
+
+	if (xd->flags & XC_CORE_64BIT_HOST)
+		offset += (off_t)(pfn * sizeof(ulonglong));
+	else
+		offset += (off_t)(pfn * sizeof(ulong));
+
+	/*
+	 *  The lseek and read should never fail, so report 
+	 *  any errors unconditionally.
+	 */
+	if (lseek(xd->xfd, offset, SEEK_SET) == -1) {
+		error(INFO, 
+		    "xendump: cannot lseek to page index for pfn %lx\n", 
+			pfn);
+		return FALSE;
+	}
+
+	if (read(xd->xfd, &mfn, sizeof(ulong)) != sizeof(ulong)) {
+		error(INFO, 
+		    "xendump: cannot read index page for pfn %lx\n", 
+			pfn);
+		return FALSE;
+	}
+
+	/*
+	 *  If it's an invalid mfn, let the caller decide whether
+	 *  to display an error message (unless debugging).
+	 */
+	if (mfn == INVALID_MFN) {
+		if (CRASHDEBUG(1))
+			error(INFO, 
+		    	    "xendump: pfn %lx contains INVALID_MFN\n", 
+				pfn);
+		return FALSE;
+	} 
+
+	return TRUE;
+}
+
+/*
+ *  Return the index into the .xen_pfn array containing the pfn.
+ *  If not found, return PFN_NOT_FOUND.
+ */
+static ulong
+xc_core_elf_pfn_valid(ulong pfn)
+{
+        int i, b, start_index;
+	off_t offset;
+	size_t size;
+	uint nr_pages;
+        ulong tmp;
+        uint64_t pfn_batch[MAX_BATCH_SIZE];
+
+        offset = xd->xc_core.header.xch_index_offset;
+	nr_pages = xd->xc_core.header.xch_nr_pages;
+
+	/*
+	 *  Initialize the start_index.
+	 */
+	xd->xc_core.last_batch.accesses++;
+
+	if ((pfn >= xd->xc_core.last_batch.start) &&
+	    (pfn <= xd->xc_core.last_batch.end)) {
+		xd->xc_core.last_batch.duplicates++;
+		start_index = xd->xc_core.last_batch.index;
+	} else {
+		for (i = 0; i <= INDEX_PFN_COUNT; i++) {
+			if ((i == INDEX_PFN_COUNT) ||
+			    (pfn < xd->xc_core.elf_index_pfn[i].pfn)) {
+				if (--i < 0)
+					i = 0;
+				start_index = xd->xc_core.elf_index_pfn[i].index;
+				break;
+			}
+		}
+	}
+
+	offset += (start_index * sizeof(uint64_t));
+        if (lseek(xd->xfd, offset, SEEK_SET) == -1)
+                error(FATAL, "cannot lseek to page index\n");
+
+        for (b = start_index; b < nr_pages; b += MAX_BATCH_SIZE) {
+		size = sizeof(uint64_t) * MIN(MAX_BATCH_SIZE, nr_pages - b);
+                if (read(xd->xfd, &pfn_batch[0], size) != size) {
+                        error(INFO, "cannot read index page %d\n", b);
+			return PFN_NOT_FOUND;
+		}
+
+		for (i = 0; i < MAX_BATCH_SIZE; i++) {
+			if ((b+i) >= nr_pages)
+				break;
+			
+			tmp = (ulong)pfn_batch[i];
+
+                	if (tmp == pfn) {
+				if (CRASHDEBUG(4))
+                        		fprintf(xd->ofp, 
+				            "index: batch: %d found pfn %ld (0x%lx) at index %d\n",
+                                		b/MAX_BATCH_SIZE, pfn, pfn, i+b);
+
+				if ((b+MAX_BATCH_SIZE) < nr_pages) {
+					xd->xc_core.last_batch.index = b;
+					xd->xc_core.last_batch.start = (ulong)pfn_batch[0];
+					xd->xc_core.last_batch.end = (ulong)pfn_batch[MAX_BATCH_SIZE-1];
+				}
+
+                        	return (i+b);
+                	}
+		}
+        }
+
+        return PFN_NOT_FOUND;
+}
+
+/*
+ *  Store the panic task's stack hooks from where it was found
+ *  in get_active_set_panic_task().
+ */
+void
+xendump_panic_hook(char *stack)
+{
+	int i, err, argc;
+	char *arglist[MAXARGS];
+	char buf[BUFSIZE];
+	ulong value, *sp;
+
+	if (machine_type("IA64"))  /* needs switch_stack address */
+		return;
+
+	strcpy(buf, stack);
+
+        argc = parse_line(buf, arglist);
+
+	if ((value = htol(strip_ending_char(arglist[0], ':'), 
+	    RETURN_ON_ERROR, &err)) == BADADDR)
+		return;
+	for (sp = (ulong *)value, i = 1; i < argc; i++, sp++) {
+		if (strstr(arglist[i], "xen_panic_event")) {
+			if (!readmem((ulong)sp, KVADDR, &value,
+			    sizeof(ulong), "xen_panic_event address",
+                            RETURN_ON_ERROR))
+				return;
+
+			xd->panic_sp = (ulong)sp;
+			xd->panic_pc = value;
+		} else if (strstr(arglist[i], "panic") && !xd->panic_sp) {
+                        if (!readmem((ulong)sp, KVADDR, &value,
+                            sizeof(ulong), "xen_panic_event address",
+                            RETURN_ON_ERROR))
+                                return;
+
+			xd->panic_sp = (ulong)sp;
+			xd->panic_pc = value;
+		}
+	}
+}
+
+static void
+xendump_print(char *fmt, ...)
+{
+        char buf[BUFSIZE];
+        va_list ap;
+
+        if (!fmt || !strlen(fmt))
+                return;
+
+        va_start(ap, fmt);
+        (void)vsnprintf(buf, BUFSIZE, fmt, ap);
+        va_end(ap);
+
+        if (xd->ofp)
+                fprintf(xd->ofp, buf);
+        else if (!XENDUMP_VALID() && CRASHDEBUG(7))
+		fprintf(stderr, buf);
+                
+}
+
+/*
+ *  Support for xc_core ELF dumpfile format.
+ */
+static int
+xc_core_elf_verify(char *file, char *buf)
+{
+	int i;
+	Elf32_Ehdr *elf32;
+	Elf64_Ehdr *elf64;
+	Elf32_Off offset32;
+	Elf64_Off offset64;
+	char *eheader;
+	int swap;
+
+	eheader = buf;
+
+	if (!STRNEQ(eheader, ELFMAG) || eheader[EI_VERSION] != EV_CURRENT)
+		goto bailout;
+
+	swap = (((eheader[EI_DATA] == ELFDATA2LSB) && 
+	     (__BYTE_ORDER == __BIG_ENDIAN)) ||
+	    ((eheader[EI_DATA] == ELFDATA2MSB) && 
+	     (__BYTE_ORDER == __LITTLE_ENDIAN)));
+
+	elf32 = (Elf32_Ehdr *)buf;
+	elf64 = (Elf64_Ehdr *)buf;
+
+        if ((elf32->e_ident[EI_CLASS] == ELFCLASS32) &&
+	    (swap16(elf32->e_type, swap) == ET_CORE) &&
+	    (swap32(elf32->e_version, swap) == EV_CURRENT) &&
+	    (swap16(elf32->e_shnum, swap) > 0)) {
+		switch (swap16(elf32->e_machine, swap))
+		{
+		case EM_386:
+			if (machine_type_mismatch(file, "X86", NULL, 0))
+				goto bailout;
+			break;
+
+		default:
+			if (machine_type_mismatch(file, "(unknown)", NULL, 0))
+				goto bailout;
+			break;
+		}
+
+		if (endian_mismatch(file, elf32->e_ident[EI_DATA], 0))
+			goto bailout;
+
+		xd->xc_core.elf_class = ELFCLASS32;
+        	if ((xd->xc_core.elf32 = (Elf32_Ehdr *)malloc(sizeof(Elf32_Ehdr))) == NULL) {
+                	fprintf(stderr, "cannot malloc ELF header buffer\n");
+                	clean_exit(1);
+		}
+		BCOPY(buf, xd->xc_core.elf32, sizeof(Elf32_Ehdr));
+
+	} else if ((elf64->e_ident[EI_CLASS] == ELFCLASS64) &&
+	    (swap16(elf64->e_type, swap) == ET_CORE) &&
+	    (swap32(elf64->e_version, swap) == EV_CURRENT) &&
+	    (swap16(elf64->e_shnum, swap) > 0)) { 
+		switch (swap16(elf64->e_machine, swap))
+		{
+		case EM_IA_64:
+			if (machine_type_mismatch(file, "IA64", NULL, 0))
+				goto bailout;
+			break;
+
+		case EM_X86_64:
+			if (machine_type_mismatch(file, "X86_64", "X86", 0))
+				goto bailout;
+			break;
+
+		case EM_386:
+			if (machine_type_mismatch(file, "X86", NULL, 0))
+				goto bailout;
+			break;
+
+		default:
+			if (machine_type_mismatch(file, "(unknown)", NULL, 0))
+				goto bailout;
+		}
+
+		if (endian_mismatch(file, elf64->e_ident[EI_DATA], 0))
+			goto bailout;
+
+		xd->xc_core.elf_class = ELFCLASS64;
+        	if ((xd->xc_core.elf64 = (Elf64_Ehdr *)malloc(sizeof(Elf64_Ehdr))) == NULL) {
+                	fprintf(stderr, "cannot malloc ELF header buffer\n");
+                	clean_exit(1);
+		}
+		BCOPY(buf, xd->xc_core.elf64, sizeof(Elf64_Ehdr));
+
+	} else {
+		if (CRASHDEBUG(1))
+			error(INFO, "%s: not a xen ELF core file\n", file);
+		goto bailout;
+	}
+
+	xc_core_elf_dump();
+
+	switch (xd->xc_core.elf_class)
+	{
+	case ELFCLASS32:
+                offset32 = xd->xc_core.elf32->e_shoff;
+		for (i = 0; i < xd->xc_core.elf32->e_shnum; i++) {
+			xc_core_dump_Elf32_Shdr(offset32, ELFSTORE);
+			offset32 += xd->xc_core.elf32->e_shentsize;
+		}
+		xendump_print("\n");
+		break;
+
+	case ELFCLASS64:
+                offset64 = xd->xc_core.elf64->e_shoff;
+		for (i = 0; i < xd->xc_core.elf64->e_shnum; i++) {
+			xc_core_dump_Elf64_Shdr(offset64, ELFSTORE);
+			offset64 += xd->xc_core.elf64->e_shentsize;
+		}
+		xendump_print("\n");
+		break;
+	}
+
+        xd->flags |= (XENDUMP_LOCAL | XC_CORE_ELF);
+
+	if (!xd->page_size)
+		error(FATAL,
+		    "unknown page size: use -p <pagesize> command line option\n");
+
+	if (!(xd->page = (char *)malloc(xd->page_size)))
+		error(FATAL, "cannot malloc page space.");
+
+        if (!(xd->poc = (struct pfn_offset_cache *)calloc
+            (PFN_TO_OFFSET_CACHE_ENTRIES,
+            sizeof(struct pfn_offset_cache))))
+                error(FATAL, "cannot malloc pfn_offset_cache\n");
+	xd->last_pfn = ~(0UL);
+
+	for (i = 0; i < INDEX_PFN_COUNT; i++)
+        	xd->xc_core.elf_index_pfn[i].pfn = ~0UL;
+
+	if (CRASHDEBUG(1)) 
+                xendump_memory_dump(fp);
+
+	return TRUE;
+
+bailout:
+	return FALSE;
+}
+
+/*
+ *  Dump the relevant ELF header. 
+ */
+static void
+xc_core_elf_dump(void)
+{
+	switch (xd->xc_core.elf_class)
+	{
+	case ELFCLASS32:
+		xc_core_dump_Elf32_Ehdr(xd->xc_core.elf32);
+		break;
+	case ELFCLASS64:
+		xc_core_dump_Elf64_Ehdr(xd->xc_core.elf64);
+		break;
+	}
+}
+
+
+/*
+ *  Dump the 32-bit ELF header, and grab a pointer to the strtab section.
+ */
+static void 
+xc_core_dump_Elf32_Ehdr(Elf32_Ehdr *elf)
+{
+	char buf[BUFSIZE];
+	Elf32_Off offset32;
+	Elf32_Shdr shdr;
+
+	BZERO(buf, BUFSIZE);
+	BCOPY(elf->e_ident, buf, SELFMAG); 
+	xendump_print("\nElf32_Ehdr:\n");
+	xendump_print("                e_ident: \\%o%s\n", buf[0], 
+		&buf[1]);
+	xendump_print("      e_ident[EI_CLASS]: %d ", elf->e_ident[EI_CLASS]);
+	switch (elf->e_ident[EI_CLASS])
+	{
+	case ELFCLASSNONE:
+		xendump_print("(ELFCLASSNONE)");
+		break;
+	case ELFCLASS32:
+		xendump_print("(ELFCLASS32)\n");
+		break;
+	case ELFCLASS64:
+		xendump_print("(ELFCLASS64)\n");
+		break;
+	case ELFCLASSNUM:
+		xendump_print("(ELFCLASSNUM)\n");
+		break;
+	default:
+		xendump_print("(?)\n");
+		break;
+	}
+	xendump_print("       e_ident[EI_DATA]: %d ", elf->e_ident[EI_DATA]);
+	switch (elf->e_ident[EI_DATA])
+	{
+	case ELFDATANONE:
+		xendump_print("(ELFDATANONE)\n");
+		break;
+	case ELFDATA2LSB: 
+		xendump_print("(ELFDATA2LSB)\n");
+		break;
+	case ELFDATA2MSB:
+		xendump_print("(ELFDATA2MSB)\n");
+		break;
+	case ELFDATANUM:
+		xendump_print("(ELFDATANUM)\n");
+		break;
+        default:
+                xendump_print("(?)\n");
+	}
+	xendump_print("    e_ident[EI_VERSION]: %d ", 
+		elf->e_ident[EI_VERSION]);
+	if (elf->e_ident[EI_VERSION] == EV_CURRENT)
+		xendump_print("(EV_CURRENT)\n");
+	else
+		xendump_print("(?)\n");
+	xendump_print("      e_ident[EI_OSABI]: %d ", elf->e_ident[EI_OSABI]);
+	switch (elf->e_ident[EI_OSABI])
+	{
+	case ELFOSABI_SYSV:   
+		xendump_print("(ELFOSABI_SYSV)\n");
+		break;
+	case ELFOSABI_HPUX:    
+		xendump_print("(ELFOSABI_HPUX)\n");
+		break;
+	case ELFOSABI_ARM:      
+		xendump_print("(ELFOSABI_ARM)\n");
+		break;
+	case ELFOSABI_STANDALONE:
+		xendump_print("(ELFOSABI_STANDALONE)\n");
+		break;
+        default:
+                xendump_print("(?)\n");
+	}
+	xendump_print(" e_ident[EI_ABIVERSION]: %d\n", 
+		elf->e_ident[EI_ABIVERSION]);
+
+	xendump_print("                 e_type: %d ", elf->e_type);
+	switch (elf->e_type)
+	{
+	case ET_NONE:
+		xendump_print("(ET_NONE)\n");
+		break;
+	case ET_REL:
+		xendump_print("(ET_REL)\n");
+		break;
+	case ET_EXEC:
+		xendump_print("(ET_EXEC)\n");
+		break;
+	case ET_DYN:
+		xendump_print("(ET_DYN)\n");
+		break;
+	case ET_CORE:
+		xendump_print("(ET_CORE)\n");
+		break;
+	case ET_NUM:
+		xendump_print("(ET_NUM)\n");
+		break;
+	case ET_LOOS:
+		xendump_print("(ET_LOOS)\n");
+		break;
+	case ET_HIOS:
+		xendump_print("(ET_HIOS)\n");
+		break;
+	case ET_LOPROC:
+		xendump_print("(ET_LOPROC)\n");
+		break;
+	case ET_HIPROC:
+		xendump_print("(ET_HIPROC)\n");
+		break;
+	default:
+		xendump_print("(?)\n");
+	}
+
+        xendump_print("              e_machine: %d ", elf->e_machine);
+	switch (elf->e_machine) 
+	{
+	case EM_386:
+		xendump_print("(EM_386)\n");
+		break;
+	default:
+		xendump_print("(unsupported)\n");
+		break;
+	}
+
+        xendump_print("              e_version: %ld ", (ulong)elf->e_version);
+	xendump_print("%s\n", elf->e_version == EV_CURRENT ? 
+		"(EV_CURRENT)" : "");
+
+        xendump_print("                e_entry: %lx\n", (ulong)elf->e_entry);
+        xendump_print("                e_phoff: %lx\n", (ulong)elf->e_phoff);
+        xendump_print("                e_shoff: %lx\n", (ulong)elf->e_shoff);
+        xendump_print("                e_flags: %lx\n", (ulong)elf->e_flags);
+        xendump_print("               e_ehsize: %x\n", elf->e_ehsize);
+        xendump_print("            e_phentsize: %x\n", elf->e_phentsize);
+        xendump_print("                e_phnum: %x\n", elf->e_phnum);
+        xendump_print("            e_shentsize: %x\n", elf->e_shentsize);
+        xendump_print("                e_shnum: %x\n", elf->e_shnum);
+        xendump_print("             e_shstrndx: %x\n", elf->e_shstrndx);
+
+	/* Determine the strtab location. */
+	
+	offset32 = elf->e_shoff +
+		(elf->e_shstrndx * elf->e_shentsize);
+
+        if (lseek(xd->xfd, offset32, SEEK_SET) != offset32)
+                error(FATAL, 
+		    "xc_core_dump_Elf32_Ehdr: cannot seek to strtab Elf32_Shdr\n");
+        if (read(xd->xfd, &shdr, sizeof(Elf32_Shdr)) != sizeof(Elf32_Shdr))
+                error(FATAL, 
+		    "xc_core_dump_Elf32_Ehdr: cannot read strtab Elf32_Shdr\n");
+
+	xd->xc_core.elf_strtab_offset = (ulonglong)shdr.sh_offset;
+}
+
+/*
+ *  Dump the 64-bit ELF header, and grab a pointer to the strtab section.
+ */
+static void 
+xc_core_dump_Elf64_Ehdr(Elf64_Ehdr *elf)
+{
+	char buf[BUFSIZE];
+        Elf64_Off offset64;
+        Elf64_Shdr shdr;
+
+	BZERO(buf, BUFSIZE);
+	BCOPY(elf->e_ident, buf, SELFMAG); 
+	xendump_print("\nElf64_Ehdr:\n");
+	xendump_print("                e_ident: \\%o%s\n", buf[0], 
+		&buf[1]);
+	xendump_print("      e_ident[EI_CLASS]: %d ", elf->e_ident[EI_CLASS]);
+	switch (elf->e_ident[EI_CLASS])
+	{
+	case ELFCLASSNONE:
+		xendump_print("(ELFCLASSNONE)");
+		break;
+	case ELFCLASS32:
+		xendump_print("(ELFCLASS32)\n");
+		break;
+	case ELFCLASS64:
+		xendump_print("(ELFCLASS64)\n");
+		break;
+	case ELFCLASSNUM:
+		xendump_print("(ELFCLASSNUM)\n");
+		break;
+	default:
+		xendump_print("(?)\n");
+		break;
+	}
+	xendump_print("       e_ident[EI_DATA]: %d ", elf->e_ident[EI_DATA]);
+	switch (elf->e_ident[EI_DATA])
+	{
+	case ELFDATANONE:
+		xendump_print("(ELFDATANONE)\n");
+		break;
+	case ELFDATA2LSB: 
+		xendump_print("(ELFDATA2LSB)\n");
+		break;
+	case ELFDATA2MSB:
+		xendump_print("(ELFDATA2MSB)\n");
+		break;
+	case ELFDATANUM:
+		xendump_print("(ELFDATANUM)\n");
+		break;
+        default:
+                xendump_print("(?)\n");
+	}
+	xendump_print("    e_ident[EI_VERSION]: %d ", 
+		elf->e_ident[EI_VERSION]);
+	if (elf->e_ident[EI_VERSION] == EV_CURRENT)
+		xendump_print("(EV_CURRENT)\n");
+	else
+		xendump_print("(?)\n");
+	xendump_print("      e_ident[EI_OSABI]: %d ", elf->e_ident[EI_OSABI]);
+	switch (elf->e_ident[EI_OSABI])
+	{
+	case ELFOSABI_SYSV:   
+		xendump_print("(ELFOSABI_SYSV)\n");
+		break;
+	case ELFOSABI_HPUX:    
+		xendump_print("(ELFOSABI_HPUX)\n");
+		break;
+	case ELFOSABI_ARM:      
+		xendump_print("(ELFOSABI_ARM)\n");
+		break;
+	case ELFOSABI_STANDALONE:
+		xendump_print("(ELFOSABI_STANDALONE)\n");
+		break;
+        default:
+                xendump_print("(?)\n");
+	}
+	xendump_print(" e_ident[EI_ABIVERSION]: %d\n", 
+		elf->e_ident[EI_ABIVERSION]);
+
+	xendump_print("                 e_type: %d ", elf->e_type);
+	switch (elf->e_type)
+	{
+	case ET_NONE:
+		xendump_print("(ET_NONE)\n");
+		break;
+	case ET_REL:
+		xendump_print("(ET_REL)\n");
+		break;
+	case ET_EXEC:
+		xendump_print("(ET_EXEC)\n");
+		break;
+	case ET_DYN:
+		xendump_print("(ET_DYN)\n");
+		break;
+	case ET_CORE:
+		xendump_print("(ET_CORE)\n");
+		break;
+	case ET_NUM:
+		xendump_print("(ET_NUM)\n");
+		break;
+	case ET_LOOS:
+		xendump_print("(ET_LOOS)\n");
+		break;
+	case ET_HIOS:
+		xendump_print("(ET_HIOS)\n");
+		break;
+	case ET_LOPROC:
+		xendump_print("(ET_LOPROC)\n");
+		break;
+	case ET_HIPROC:
+		xendump_print("(ET_HIPROC)\n");
+		break;
+	default:
+		xendump_print("(?)\n");
+	}
+
+        xendump_print("              e_machine: %d ", elf->e_machine);
+        switch (elf->e_machine)
+        {
+	case EM_386:
+		xendump_print("(EM_386)\n");
+		break;
+        case EM_IA_64:
+                xendump_print("(EM_IA_64)\n");
+                break;
+        case EM_PPC64:
+                xendump_print("(EM_PPC64)\n");
+                break;
+        case EM_X86_64:
+                xendump_print("(EM_X86_64)\n");
+                break;
+        default:
+                xendump_print("(unsupported)\n");
+                break;
+        }
+
+        xendump_print("              e_version: %ld ", (ulong)elf->e_version);
+	xendump_print("%s\n", elf->e_version == EV_CURRENT ? 
+		"(EV_CURRENT)" : "");
+
+        xendump_print("                e_entry: %lx\n", (ulong)elf->e_entry);
+        xendump_print("                e_phoff: %lx\n", (ulong)elf->e_phoff);
+        xendump_print("                e_shoff: %lx\n", (ulong)elf->e_shoff);
+        xendump_print("                e_flags: %lx\n", (ulong)elf->e_flags);
+        xendump_print("               e_ehsize: %x\n", elf->e_ehsize);
+        xendump_print("            e_phentsize: %x\n", elf->e_phentsize);
+        xendump_print("                e_phnum: %x\n", elf->e_phnum);
+        xendump_print("            e_shentsize: %x\n", elf->e_shentsize);
+        xendump_print("                e_shnum: %x\n", elf->e_shnum);
+        xendump_print("             e_shstrndx: %x\n", elf->e_shstrndx);
+
+	/* Determine the strtab location. */
+
+	offset64 = elf->e_shoff +
+		(elf->e_shstrndx * elf->e_shentsize);
+
+        if (lseek(xd->xfd, offset64, SEEK_SET) != offset64)
+                error(FATAL, 
+		    "xc_core_dump_Elf64_Ehdr: cannot seek to strtab Elf32_Shdr\n");
+        if (read(xd->xfd, &shdr, sizeof(Elf32_Shdr)) != sizeof(Elf32_Shdr))
+                error(FATAL, 
+		    "xc_core_dump_Elf64_Ehdr:  cannot read strtab Elf32_Shdr\n");
+
+	xd->xc_core.elf_strtab_offset = (ulonglong)shdr.sh_offset;
+}
+
+/*
+ *  Dump each 32-bit section header and the data that they reference.
+ */
+static void 
+xc_core_dump_Elf32_Shdr(Elf32_Off offset, int store)
+{
+	Elf32_Shdr shdr;
+	char name[BUFSIZE];
+	int i;
+	char c;
+
+	if (lseek(xd->xfd, offset, SEEK_SET) != offset)
+		error(FATAL, 
+		    "xc_core_dump_Elf32_Shdr: cannot seek to Elf32_Shdr\n");
+	if (read(xd->xfd, &shdr, sizeof(Elf32_Shdr)) != sizeof(Elf32_Shdr)) 
+		error(FATAL, 
+		    "xc_core_dump_Elf32_Shdr: cannot read Elf32_Shdr\n");
+
+	xendump_print("\nElf32_Shdr:\n");
+	xendump_print("                sh_name: %lx ", shdr.sh_name);
+	xendump_print("\"%s\"\n", xc_core_strtab(shdr.sh_name, name));
+	xendump_print("                sh_type: %lx ", shdr.sh_type);
+	switch (shdr.sh_type)
+	{
+	case SHT_NULL:
+		xendump_print("(SHT_NULL)\n");
+		break;
+	case SHT_PROGBITS:
+		xendump_print("(SHT_PROGBITS)\n");
+		break;
+	case SHT_STRTAB:
+		xendump_print("(SHT_STRTAB)\n");
+		break;
+	case SHT_NOTE:
+		xendump_print("(SHT_NOTE)\n");
+		break;
+	default:
+		xendump_print("\n");
+		break;
+	}
+	xendump_print("               sh_flags: %lx\n", shdr.sh_flags);
+	xendump_print("                sh_addr: %lx\n", shdr.sh_addr);
+	xendump_print("              sh_offset: %lx\n", shdr.sh_offset);
+	xendump_print("                sh_size: %lx\n", shdr.sh_size);
+	xendump_print("                sh_link: %lx\n", shdr.sh_link);
+	xendump_print("                sh_info: %lx\n", shdr.sh_info);
+	xendump_print("           sh_addralign: %lx\n", shdr.sh_addralign);
+	xendump_print("             sh_entsize: %lx\n", shdr.sh_entsize);
+
+	if (STREQ(name, ".shstrtab")) {
+		if (lseek(xd->xfd, xd->xc_core.elf_strtab_offset, SEEK_SET) != 
+		    xd->xc_core.elf_strtab_offset)
+			error(FATAL,
+			    "xc_core_dump_Elf32_Shdr: cannot seek to strtab data\n");
+
+		xendump_print("                         ");
+		for (i = 0; i < shdr.sh_size; i++) {
+			if (read(xd->xfd, &c, sizeof(char)) != sizeof(char)) 
+				error(FATAL, 
+				    "xc_core_dump_Elf32_Shdr: cannot read strtab data\n");
+			if (i && !c)
+				xendump_print("\n                         ");
+			else
+				xendump_print("%c", c);
+		}
+        }
+
+	if (STREQ(name, ".note.Xen"))
+		xc_core_dump_elfnote((off_t)shdr.sh_offset, 
+			(size_t)shdr.sh_size, store);
+
+	if (!store)
+		return;
+
+	if (STREQ(name, ".xen_prstatus"))
+		xd->xc_core.header.xch_ctxt_offset = 
+			(unsigned int)shdr.sh_offset;
+
+	if (STREQ(name, ".xen_shared_info"))
+		xd->xc_core.shared_info_offset = (off_t)shdr.sh_offset;
+
+	if (STREQ(name, ".xen_pfn")) {
+		xd->xc_core.header.xch_index_offset = shdr.sh_offset;
+		xd->flags |= (XC_CORE_NO_P2M|XC_CORE_PFN_CREATE);
+	}
+
+	if (STREQ(name, ".xen_p2m")) {
+		xd->xc_core.header.xch_index_offset = shdr.sh_offset;
+		xd->flags |= XC_CORE_P2M_CREATE;
+	}
+
+	if (STREQ(name, ".xen_pages"))
+		xd->xc_core.header.xch_pages_offset = 
+			(unsigned int)shdr.sh_offset;
+
+	if (STREQ(name, ".xen_ia64_mapped_regs"))
+		xd->xc_core.ia64_mapped_regs_offset = 
+			(off_t)shdr.sh_offset;
+}
+
+/*
+ *  Dump each 64-bit section header and the data that they reference.
+ */
+static void 
+xc_core_dump_Elf64_Shdr(Elf64_Off offset, int store)
+{
+	Elf64_Shdr shdr;
+	char name[BUFSIZE];
+	int i;
+	char c;
+
+	if (lseek(xd->xfd, offset, SEEK_SET) != offset)
+		error(FATAL, 
+		    "xc_core_dump_Elf64_Shdr: cannot seek to Elf64_Shdr\n");
+	if (read(xd->xfd, &shdr, sizeof(Elf64_Shdr)) != sizeof(Elf64_Shdr))
+		error(FATAL, 
+		    "xc_core_dump_Elf64_Shdr: cannot read Elf64_Shdr\n");
+
+	xendump_print("\nElf64_Shdr:\n");
+	xendump_print("                sh_name: %x ", shdr.sh_name);
+	xendump_print("\"%s\"\n", xc_core_strtab(shdr.sh_name, name));
+	xendump_print("                sh_type: %x ", shdr.sh_type);
+	switch (shdr.sh_type)
+	{
+	case SHT_NULL:
+		xendump_print("(SHT_NULL)\n");
+		break;
+	case SHT_PROGBITS:
+		xendump_print("(SHT_PROGBITS)\n");
+		break;
+	case SHT_STRTAB:
+		xendump_print("(SHT_STRTAB)\n");
+		break;
+	case SHT_NOTE:
+		xendump_print("(SHT_NOTE)\n");
+		break;
+	default:
+		xendump_print("\n");
+		break;
+	}
+	xendump_print("               sh_flags: %lx\n", shdr.sh_flags);
+	xendump_print("                sh_addr: %lx\n", shdr.sh_addr);
+	xendump_print("              sh_offset: %lx\n", shdr.sh_offset);
+	xendump_print("                sh_size: %lx\n", shdr.sh_size);
+	xendump_print("                sh_link: %x\n", shdr.sh_link);
+	xendump_print("                sh_info: %x\n", shdr.sh_info);
+	xendump_print("           sh_addralign: %lx\n", shdr.sh_addralign);
+	xendump_print("             sh_entsize: %lx\n", shdr.sh_entsize);
+
+	if (STREQ(name, ".shstrtab")) {
+		if (lseek(xd->xfd, xd->xc_core.elf_strtab_offset, SEEK_SET) != 
+		    xd->xc_core.elf_strtab_offset)
+			error(FATAL,
+			    "xc_core_dump_Elf64_Shdr: cannot seek to strtab data\n");
+
+		xendump_print("                         ");
+		for (i = 0; i < shdr.sh_size; i++) {
+			if (read(xd->xfd, &c, sizeof(char)) != sizeof(char)) 
+				error(FATAL, 
+				    "xc_core_dump_Elf64_Shdr: cannot read strtab data\n");
+			if (i && !c)
+				xendump_print("\n                         ");
+			else
+				xendump_print("%c", c);
+		}
+	}
+
+	if (STREQ(name, ".note.Xen"))
+		xc_core_dump_elfnote((off_t)shdr.sh_offset, 
+			(size_t)shdr.sh_size, store);
+
+	if (!store)
+		return;
+
+	if (STREQ(name, ".xen_prstatus"))
+		xd->xc_core.header.xch_ctxt_offset = 
+			(unsigned int)shdr.sh_offset;
+
+	if (STREQ(name, ".xen_shared_info"))
+		xd->xc_core.shared_info_offset = (off_t)shdr.sh_offset;
+
+	if (STREQ(name, ".xen_pfn")) {
+		xd->xc_core.header.xch_index_offset = shdr.sh_offset;
+		xd->flags |= (XC_CORE_NO_P2M|XC_CORE_PFN_CREATE);
+	}
+
+	if (STREQ(name, ".xen_p2m")) {
+		xd->xc_core.header.xch_index_offset = shdr.sh_offset;
+		xd->flags |= XC_CORE_P2M_CREATE;
+	}
+
+	if (STREQ(name, ".xen_pages"))
+		xd->xc_core.header.xch_pages_offset = 
+			(unsigned int)shdr.sh_offset;
+
+	if (STREQ(name, ".xen_ia64_mapped_regs"))
+		xd->xc_core.ia64_mapped_regs_offset = 
+			(off_t)shdr.sh_offset;
+}
+
+/*
+ *  Return the string found at the specified index into
+ *  the dumpfile's strtab.
+ */
+static char *
+xc_core_strtab(uint32_t index, char *buf)
+{
+	off_t offset;
+	int i;
+
+	offset = xd->xc_core.elf_strtab_offset + index;
+
+	if (lseek(xd->xfd, offset, SEEK_SET) != offset)
+		error(FATAL, 
+		    "xc_core_strtab: cannot seek to Elf64_Shdr\n");
+
+	BZERO(buf, BUFSIZE);
+	i = 0;
+
+	while (read(xd->xfd, &buf[i], sizeof(char)) == sizeof(char)) {
+		if (buf[i] == NULLCHAR)
+			break;
+		i++;
+	}
+
+	return buf;
+}
+
+
+/*
+ *  Dump the array of elfnote structures, storing relevant info
+ *  when requested during initialization.  This function is 
+ *  common to both 32-bit and 64-bit ELF files.
+ */
+static void 
+xc_core_dump_elfnote(off_t sh_offset, size_t sh_size, int store)
+{
+	int i, lf, index;
+	char *notes_buffer;
+	struct elfnote *elfnote;
+	ulonglong *data;
+	struct xen_dumpcore_elfnote_header_desc *elfnote_header;
+	struct xen_dumpcore_elfnote_format_version_desc *format_version;
+
+	elfnote_header = NULL;
+
+        if (!(notes_buffer = (char *)malloc(sh_size)))
+                error(FATAL, "cannot malloc notes space.");
+
+	if (lseek(xd->xfd, sh_offset, SEEK_SET) != sh_offset)
+		error(FATAL, 
+		    "xc_core_dump_elfnote: cannot seek to sh_offset\n");
+
+        if (read(xd->xfd, notes_buffer, sh_size) != sh_size)
+                error(FATAL,
+                    "xc_core_dump_elfnote: cannot read elfnote data\n");
+
+	for (index = 0; index < sh_size; ) {
+		elfnote = (struct elfnote *)&notes_buffer[index];
+		xendump_print("                 namesz: %d\n", elfnote->namesz);
+		xendump_print("                  descz: %d\n", elfnote->descsz);
+		xendump_print("                   type: %x ", elfnote->type);
+		switch (elfnote->type) 
+		{
+		case XEN_ELFNOTE_DUMPCORE_NONE:           
+			xendump_print("(XEN_ELFNOTE_DUMPCORE_NONE)\n");
+			break;
+		case XEN_ELFNOTE_DUMPCORE_HEADER:
+			xendump_print("(XEN_ELFNOTE_DUMPCORE_HEADER)\n");
+			elfnote_header = (struct xen_dumpcore_elfnote_header_desc *)
+				(elfnote+1);
+			break;
+		case XEN_ELFNOTE_DUMPCORE_XEN_VERSION:   
+			xendump_print("(XEN_ELFNOTE_DUMPCORE_XEN_VERSION)\n");
+			break;
+		case XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION:
+			xendump_print("(XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION)\n");
+			format_version = (struct xen_dumpcore_elfnote_format_version_desc *)
+				(elfnote+1);
+			break;
+		default:
+			xendump_print("(unknown)\n");
+			break;
+		}
+		xendump_print("                   name: %s\n", elfnote->name);
+
+		data = (ulonglong *)(elfnote+1);
+		for (i = lf = 0; i < elfnote->descsz/sizeof(ulonglong); i++) {
+			if (((i%2)==0)) {
+				xendump_print("%s                         ",
+					i ? "\n" : "");
+				lf++;
+			} else
+				lf = 0;
+			xendump_print("%016llx ", *data++);
+                }
+		if (!elfnote->descsz)
+			xendump_print("                         (empty)");
+		xendump_print("\n");
+
+		index += sizeof(struct elfnote) + elfnote->descsz;
+	}
+
+	if (!store)
+		return;
+
+	if (elfnote_header) {
+		xd->xc_core.header.xch_magic = elfnote_header->xch_magic;
+		xd->xc_core.header.xch_nr_vcpus = elfnote_header->xch_nr_vcpus;
+		xd->xc_core.header.xch_nr_pages = elfnote_header->xch_nr_pages;
+		xd->page_size = elfnote_header->xch_page_size;
+	}
+
+	if (format_version) {
+		switch (format_version->version)
+		{
+		case FORMAT_VERSION_0000000000000001:
+			break;
+		default:
+			error(WARNING, 
+			    "unsupported xen dump-core format version: %016llx\n",
+				format_version->version);
+		}
+		xd->xc_core.format_version = format_version->version;
+	}
+
+}
+
+/*
+ *  Initialize the batching list for the .xen_p2m or .xen_pfn
+ *  arrays.
+ */
+static void 
+xc_core_elf_pfn_init(void)
+{
+	int i, c, chunk;
+	off_t offset;
+	struct xen_dumpcore_p2m p2m;
+	uint64_t pfn;
+
+	switch (xd->flags & (XC_CORE_ELF|XC_CORE_NO_P2M)) 
+	{
+	case (XC_CORE_ELF|XC_CORE_NO_P2M):
+		chunk = xd->xc_core.header.xch_nr_pages/INDEX_PFN_COUNT;
+
+		for (i = c = 0; i < INDEX_PFN_COUNT; i++, c += chunk) {
+			offset = (off_t)xd->xc_core.header.xch_index_offset +
+				(off_t)(c * sizeof(uint64_t));
+
+	        	if (lseek(xd->xfd, offset, SEEK_SET) == -1)
+	                	error(FATAL, 
+				    "cannot lseek to page index %d\n", c);
+			if (read(xd->xfd, &pfn, sizeof(uint64_t)) != 
+			    sizeof(uint64_t))
+	                	error(FATAL, 
+				    "cannot read page index %d\n", c);
+
+			xd->xc_core.elf_index_pfn[i].index = c;
+			xd->xc_core.elf_index_pfn[i].pfn = (ulong)pfn;
+		}
+		break;
+
+	case XC_CORE_ELF:
+		chunk = xd->xc_core.header.xch_nr_pages/INDEX_PFN_COUNT;
+	
+		for (i = c = 0; i < INDEX_PFN_COUNT; i++, c += chunk) {
+			offset = (off_t)xd->xc_core.header.xch_index_offset +
+				(off_t)(c * sizeof(struct xen_dumpcore_p2m));
+
+	        	if (lseek(xd->xfd, offset, SEEK_SET) == -1)
+	                	error(FATAL, 
+				    "cannot lseek to page index %d\n", c);
+			if (read(xd->xfd, &p2m, sizeof(struct xen_dumpcore_p2m)) !=
+				sizeof(struct xen_dumpcore_p2m))
+	                	error(FATAL, 
+				    "cannot read page index %d\n", c);
+	
+			xd->xc_core.elf_index_pfn[i].index = c;
+			xd->xc_core.elf_index_pfn[i].pfn = (ulong)p2m.pfn;
+		}
+		break;
+	}
+}
+
+struct xendump_data *
+get_xendump_data(void)
+{
+	return (XENDUMP_VALID() ? xd : NULL);
+}
--- crash/ppc.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/ppc.c	2007-03-21 15:02:37.000000000 -0400
@@ -51,6 +51,9 @@
 void
 ppc_init(int when)
 {
+	uint cpu_features;
+	ulong cur_cpu_spec;
+
 	switch (when)
 	{
 	case PRE_SYMTAB:
@@ -135,9 +138,23 @@
 				"irq_desc", NULL, 0);
 		else
 			machdep->nr_irqs = 0;
-		machdep->hz = HZ;
-		if (THIS_KERNEL_VERSION >= LINUX(2,6,0))
-			machdep->hz = 1000;
+		if (!machdep->hz) {
+			machdep->hz = HZ;
+			if (THIS_KERNEL_VERSION >= LINUX(2,6,0))
+				machdep->hz = 1000;
+		}
+		if (symbol_exists("cur_cpu_spec")) {
+			get_symbol_data("cur_cpu_spec", sizeof(void *), &cur_cpu_spec);
+			readmem(cur_cpu_spec + MEMBER_OFFSET("cpu_spec", "cpu_user_features"), 
+				KVADDR, &cpu_features, sizeof(uint), "cpu user features",
+				FAULT_ON_ERROR);
+			if (cpu_features & CPU_BOOKE)
+				machdep->flags |= CPU_BOOKE;
+		}
+		else
+			machdep->flags |= CPU_BOOKE;
+		machdep->section_size_bits = _SECTION_SIZE_BITS;
+		machdep->max_physmem_bits = _MAX_PHYSMEM_BITS;
 		break;
 
 	case POST_INIT:
@@ -154,8 +171,6 @@
         fprintf(fp, "              flags: %lx (", machdep->flags);
 	if (machdep->flags & KSYMS_START)
 		fprintf(fp, "%sKSYMS_START", others++ ? "|" : "");
-	if (machdep->flags & SYSRQ)
-		fprintf(fp, "%sSYSRQ", others++ ? "|" : "");
         fprintf(fp, ")\n");
 
 	fprintf(fp, "             kvbase: %lx\n", machdep->kvbase);
@@ -205,6 +220,9 @@
         fprintf(fp, "                pmd: %lx\n", (ulong)machdep->pmd);
         fprintf(fp, "               ptbl: %lx\n", (ulong)machdep->ptbl);
 	fprintf(fp, "       ptrs_per_pgd: %d\n", machdep->ptrs_per_pgd);
+        fprintf(fp, "  section_size_bits: %ld\n", machdep->section_size_bits);
+        fprintf(fp, "   max_physmem_bits: %ld\n", machdep->max_physmem_bits);
+        fprintf(fp, "  sections_per_root: %ld\n", machdep->sections_per_root);
 	fprintf(fp, "           machspec: %lx\n", (ulong)machdep->machspec);
 }
 
@@ -280,7 +298,11 @@
 
 	page_middle = (ulong *)pgd_pte;
 
-	page_table = page_middle + (BTOP(vaddr) & (PTRS_PER_PTE - 1));
+	if (machdep->flags & CPU_BOOKE)
+		page_table = page_middle + (BTOP(vaddr) & (PTRS_PER_PTE - 1));
+	else
+		page_table = (ulong *)(((pgd_pte & (ulong)machdep->pagemask) + machdep->kvbase) +
+			((ulong)BTOP(vaddr) & (PTRS_PER_PTE-1)));
 
 	if (verbose)
 		fprintf(fp, "  PMD: %lx => %lx\n",(ulong)page_middle, 
@@ -364,7 +386,11 @@
 
 	page_middle = (ulong *)pgd_pte;
 
-	page_table = page_middle + (BTOP(kvaddr) & (PTRS_PER_PTE-1));
+	if (machdep->flags & CPU_BOOKE)
+		page_table = page_middle + (BTOP(kvaddr) & (PTRS_PER_PTE - 1));
+	else
+		page_table = (ulong *)(((pgd_pte & (ulong)machdep->pagemask) + machdep->kvbase) +
+			((ulong)BTOP(kvaddr) & (PTRS_PER_PTE-1)));
 
 	if (verbose)
 		fprintf(fp, "  PMD: %lx => %lx\n", (ulong)page_middle, 
--- crash/x86.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/x86.c	2009-02-04 15:30:50.000000000 -0500
@@ -1,8 +1,8 @@
 /* x86.c - core analysis suite
  *
  * Portions Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc.
- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson
- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 David Anderson
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 Red Hat, Inc. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -51,6 +51,7 @@
  * rights to redistribute these changes.
  */
 #include "defs.h"
+#include "xen_hyper_defs.h"
 
 #ifndef MCLX
 
@@ -176,6 +177,7 @@
 static void db_symbol_values(db_sym_t, char **, db_expr_t *);
 static int db_sym_numargs(db_sym_t, int *, char **);
 static void x86_dump_line_number(ulong);
+static void x86_clear_machdep_cache(void);
 
 static ulong mach_debug = 0;
 
@@ -215,7 +217,7 @@
 
 	argp = (int *)db_get_value((int)&fp->f_retaddr, 4, FALSE, bt);
 	/*
-	 * XXX etext is wrong for LKMs.  We should attempt to interpret
+	 * etext is wrong for LKMs.  We should attempt to interpret
 	 * the instruction at the return address in all cases.  This
 	 * may require better fault handling.
 	 */
@@ -685,6 +687,7 @@
 	    bt->debug || 
 	    (bt->flags & BT_FRAMESIZE_DEBUG) ||
 	    !(bt->flags & BT_OLD_BACK_TRACE)) {
+		bt->flags &= ~BT_OLD_BACK_TRACE;
                 lkcd_x86_back_trace(bt, 0, fp);
                 return;
         }
@@ -962,8 +965,12 @@
  */
 static int x86_uvtop(struct task_context *, ulong, physaddr_t *, int);
 static int x86_kvtop(struct task_context *, ulong, physaddr_t *, int);
-static int x86_uvtop_pae(struct task_context *, ulong, physaddr_t *, int);
-static int x86_kvtop_pae(struct task_context *, ulong, physaddr_t *, int);
+static int x86_uvtop_PAE(struct task_context *, ulong, physaddr_t *, int);
+static int x86_kvtop_PAE(struct task_context *, ulong, physaddr_t *, int);
+static int x86_uvtop_xen_wpt(struct task_context *, ulong, physaddr_t *, int);
+static int x86_kvtop_xen_wpt(struct task_context *, ulong, physaddr_t *, int);
+static int x86_uvtop_xen_wpt_PAE(struct task_context *, ulong, physaddr_t *, int);
+static int x86_kvtop_xen_wpt_PAE(struct task_context *, ulong, physaddr_t *, int);
 static ulong x86_get_task_pgd(ulong);
 static ulong x86_processor_speed(void);
 static ulong x86_get_pc(struct bt_info *);
@@ -973,6 +980,7 @@
 static uint64_t x86_memory_size(void);
 static ulong x86_vmalloc_start(void);
 static ulong *read_idt_table(int);
+static void eframe_init(void);
 #define READ_IDT_INIT     1
 #define READ_IDT_RUNTIME  2
 static char *extract_idt_function(ulong *, char *, ulong *);
@@ -983,26 +991,42 @@
 static int x86_dis_filter(ulong, char *);
 static struct line_number_hook x86_line_number_hooks[];
 static int x86_is_uvaddr(ulong, struct task_context *);
+static void x86_init_kernel_pgd(void);
+static ulong xen_m2p_nonPAE(ulong);
+static int x86_xendump_p2m_create(struct xendump_data *);
+static int x86_xen_kdump_p2m_create(struct xen_kdump_data *);
+static char *x86_xen_kdump_load_page(ulong, char *);
+static char *x86_xen_kdump_load_page_PAE(ulong, char *);
+static ulong x86_xen_kdump_page_mfn(ulong);
+static ulong x86_xen_kdump_page_mfn_PAE(ulong);
+static ulong x86_xendump_panic_task(struct xendump_data *);
+static void x86_get_xendump_regs(struct xendump_data *, struct bt_info *, ulong *, ulong *);
+static char *x86_xendump_load_page(ulong, char *);
+static char *x86_xendump_load_page_PAE(ulong, char *);
+static int x86_xendump_page_index(ulong);
+static int x86_xendump_page_index_PAE(ulong);
+static void x86_init_hyper(int);
+static ulong x86_get_stackbase_hyper(ulong);
+static ulong x86_get_stacktop_hyper(ulong);
+
+int INT_EFRAME_SS = 14;
+int INT_EFRAME_ESP = 13;
+int INT_EFRAME_EFLAGS = 12;   /* CS lcall7 */
+int INT_EFRAME_CS = 11;       /* EIP lcall7 */
+int INT_EFRAME_EIP = 10;      /* EFLAGS lcall7 */
+int INT_EFRAME_ERR = 9;
+int INT_EFRAME_ES = 8;
+int INT_EFRAME_DS = 7;
+int INT_EFRAME_EAX = 6;
+int INT_EFRAME_EBP = 5;
+int INT_EFRAME_EDI = 4;
+int INT_EFRAME_ESI = 3;
+int INT_EFRAME_EDX = 2;
+int INT_EFRAME_ECX = 1;
+int INT_EFRAME_EBX = 0;
+int INT_EFRAME_GS = -1;
 
-
-#define INT_EFRAME_SS      (14)
-#define INT_EFRAME_ESP     (13)
-#define INT_EFRAME_EFLAGS  (12)   /* CS lcall7 */
-#define INT_EFRAME_CS      (11)   /* EIP lcall7 */
-#define INT_EFRAME_EIP     (10)   /* EFLAGS lcall7 */
-#define INT_EFRAME_ERR     (9)    
-
-#define INT_EFRAME_ES      (8)
-#define INT_EFRAME_DS      (7)
-#define INT_EFRAME_EAX     (6)
-#define INT_EFRAME_EBP     (5)
-#define INT_EFRAME_EDI     (4)
-#define INT_EFRAME_ESI     (3)
-#define INT_EFRAME_EDX     (2)
-#define INT_EFRAME_ECX     (1)
-#define INT_EFRAME_EBX     (0)
-
-#define USER_EFRAME_SIZE   (INT_EFRAME_SS+1)
+#define MAX_USER_EFRAME_SIZE   (16)
 #define KERNEL_EFRAME_SIZE (INT_EFRAME_EFLAGS+1)
 
 #define EFRAME_USER   (1)
@@ -1015,7 +1039,7 @@
 {
 	int i;
 	char buf[BUFSIZE], *sp;
-	ulong int_eframe[USER_EFRAME_SIZE];
+	ulong int_eframe[MAX_USER_EFRAME_SIZE];
 	int eframe_type, args;
 	ulong value, *argp;
 
@@ -1025,11 +1049,11 @@
 		return(frame_number);
 
 	GET_STACK_DATA(ep->eframe_addr, (char *)int_eframe,
-		USER_EFRAME_SIZE * sizeof(ulong));	
+		SIZE(pt_regs));	
 
 	if (int_eframe[INT_EFRAME_CS] & DPL_BITS) {
 		if (!INSTACK(ep->eframe_addr + 
-		    (USER_EFRAME_SIZE*sizeof(ulong)) - 1, bt))
+		    SIZE(pt_regs) - 1, bt))
 			return(frame_number);
 	/* error(FATAL, "read of exception frame would go beyond stack\n"); */
 		eframe_type = EFRAME_USER;
@@ -1158,17 +1182,24 @@
                         int_eframe[INT_EFRAME_EDX]);
 
         fprintf(fp, 
-		"    DS:  %04x      ESI: %08lx  ES:  %04x      EDI: %08lx \n",
+		"    DS:  %04x      ESI: %08lx  ES:  %04x      EDI: %08lx",
                 (short)int_eframe[INT_EFRAME_DS],
                 int_eframe[INT_EFRAME_ESI],
                 (short)int_eframe[INT_EFRAME_ES],
                 int_eframe[INT_EFRAME_EDI]);
+	if (kernel && (INT_EFRAME_GS != -1))
+		fprintf(fp, "  GS:  %04x", (short)int_eframe[INT_EFRAME_GS]);
+	fprintf(fp, "\n");
 
-	if (!kernel)
-		fprintf(fp, "    SS:  %04x      ESP: %08lx  EBP: %08lx \n",
+	if (!kernel) {
+		fprintf(fp, "    SS:  %04x      ESP: %08lx  EBP: %08lx",
 			(short)int_eframe[INT_EFRAME_SS],
 			int_eframe[INT_EFRAME_ESP],
                         int_eframe[INT_EFRAME_EBP]);
+		if (INT_EFRAME_GS != -1)
+			fprintf(fp, "  GS:  %04x", (short)int_eframe[INT_EFRAME_GS]);
+		fprintf(fp, "\n");
+	}
 
 	fprintf(fp, 
 	    "    CS:  %04x      EIP: %08lx  ERR: %08lx  EFLAGS: %08lx \n",
@@ -1355,7 +1386,7 @@
  */
 
 struct x86_pt_regs {
-	ulong reg_value[USER_EFRAME_SIZE];
+	ulong reg_value[MAX_USER_EFRAME_SIZE];
 };
 
 /*
@@ -1420,6 +1451,17 @@
                         break;
                 }
 
+                if (XEN() && ((short)pt->reg_value[INT_EFRAME_CS] == 0x61) &&
+                    ((short)pt->reg_value[INT_EFRAME_DS] == 0x7b) &&
+                    ((short)pt->reg_value[INT_EFRAME_ES] == 0x7b) &&
+                    IS_KVADDR(pt->reg_value[INT_EFRAME_EIP])) {
+                        if (!(machdep->flags & OMIT_FRAME_PTR) &&
+                            !INSTACK(pt->reg_value[INT_EFRAME_EBP], bt))
+                                continue;
+                        rv = bt->stackbase + sizeof(ulong) * (first - stack);
+                        break;
+                }
+
 		/* check for user exception frame */
 
 		if (((short)pt->reg_value[INT_EFRAME_CS] == 0x23) &&
@@ -1441,6 +1483,20 @@
                         rv = bt->stackbase + sizeof(ulong) * (first - stack);
                         break;
                 }
+
+		/*
+		 *  2.6 kernels using sysenter_entry instead of system_call
+		 *  have a funky trampoline EIP address.
+		 */
+                if (((short)pt->reg_value[INT_EFRAME_CS] == 0x73) &&
+                    ((short)pt->reg_value[INT_EFRAME_DS] == 0x7b) &&
+                    ((short)pt->reg_value[INT_EFRAME_ES] == 0x7b) &&
+                    ((short)pt->reg_value[INT_EFRAME_SS] == 0x7b) &&
+                    (pt->reg_value[INT_EFRAME_EFLAGS] == 0x246) &&
+                    IS_UVADDR(pt->reg_value[INT_EFRAME_ESP], bt->tc)) {
+                        rv = bt->stackbase + sizeof(ulong) * (first - stack);
+                        break;
+                }
         }
         return(rv);
 }
@@ -1536,6 +1592,8 @@
                         mode = "USER-MODE";
                 } else if ((cs == 0x10) || (cs == 0x60)) {
                         mode = "KERNEL-MODE";
+		} else if (XEN() && (cs == 0x61)) {
+                        mode = "KERNEL-MODE";
                 } else {
                         mode = "UNKNOWN-MODE";
                 }
@@ -1559,6 +1617,9 @@
 x86_in_irqstack(ulong addr)
 {
 	int c;
+
+	if (!(tt->flags & IRQSTACKS))
+		return 0;
 	
 	for (c = 0; c < NR_CPUS; c++) {
                 if (tt->hardirq_ctx[c]) {
@@ -1626,6 +1687,11 @@
 {
 	struct syment *sp, *spn;
 
+	if (XEN_HYPER_MODE()) {
+		x86_init_hyper(when);
+		return;
+	}
+
 	switch (when)
 	{
 	case PRE_SYMTAB:
@@ -1639,7 +1705,7 @@
 		machdep->stacksize = machdep->pagesize * 2;
         	if ((machdep->pgd = (char *)malloc(PAGESIZE())) == NULL)
                 	error(FATAL, "cannot malloc pgd space.");
-               if ((machdep->pmd = (char *)malloc(PAGESIZE())) == NULL)
+                if ((machdep->pmd = (char *)malloc(PAGESIZE())) == NULL)
                         error(FATAL, "cannot malloc pmd space.");
         	if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL)
                 	error(FATAL, "cannot malloc ptbl space.");
@@ -1659,8 +1725,8 @@
 			PGDIR_SHIFT = PGDIR_SHIFT_3LEVEL;
 			PTRS_PER_PTE = PTRS_PER_PTE_3LEVEL;
 			PTRS_PER_PGD = PTRS_PER_PGD_3LEVEL;
-                        machdep->uvtop = x86_uvtop_pae;
-                        machdep->kvtop = x86_kvtop_pae;
+                        machdep->uvtop = x86_uvtop_PAE;
+                        machdep->kvtop = x86_kvtop_PAE;
 		} else {
 			PGDIR_SHIFT = PGDIR_SHIFT_2LEVEL;
                         PTRS_PER_PTE = PTRS_PER_PTE_2LEVEL;
@@ -1696,19 +1762,53 @@
 		machdep->cmd_mach = x86_cmd_mach;
 		machdep->get_smp_cpus = x86_get_smp_cpus;
 		machdep->line_number_hooks = x86_line_number_hooks;
-		if (x86_omit_frame_pointer())
-			machdep->flags |= OMIT_FRAME_PTR;
 		machdep->flags |= FRAMESIZE_DEBUG;
 		machdep->value_to_symbol = generic_machdep_value_to_symbol;
-		machdep->init_kernel_pgd = NULL;
+		machdep->init_kernel_pgd = x86_init_kernel_pgd;
+		machdep->xendump_p2m_create = x86_xendump_p2m_create;
+		machdep->xen_kdump_p2m_create = x86_xen_kdump_p2m_create;
+		machdep->xendump_panic_task = x86_xendump_panic_task;
+		machdep->get_xendump_regs = x86_get_xendump_regs;
+		machdep->clear_machdep_cache = x86_clear_machdep_cache;
 		break;
 
 	case POST_GDB:
+		if (x86_omit_frame_pointer())
+			machdep->flags |= OMIT_FRAME_PTR;
 		STRUCT_SIZE_INIT(user_regs_struct, "user_regs_struct");
-		MEMBER_OFFSET_INIT(user_regs_struct_ebp,
-			"user_regs_struct", "ebp");
-		MEMBER_OFFSET_INIT(user_regs_struct_esp,
-			"user_regs_struct", "esp");
+		if (MEMBER_EXISTS("user_regs_struct", "ebp"))
+			MEMBER_OFFSET_INIT(user_regs_struct_ebp,
+				"user_regs_struct", "ebp");
+		else
+			MEMBER_OFFSET_INIT(user_regs_struct_ebp,
+				"user_regs_struct", "bp");
+		if (MEMBER_EXISTS("user_regs_struct", "esp"))
+			MEMBER_OFFSET_INIT(user_regs_struct_esp,
+				"user_regs_struct", "esp");
+		else
+			MEMBER_OFFSET_INIT(user_regs_struct_esp,
+				"user_regs_struct", "sp");
+		if (!VALID_STRUCT(user_regs_struct)) {
+			/*  Use this hardwired version -- sometimes the 
+			 *  debuginfo doesn't pick this up even though
+			 *  it exists in the kernel; it shouldn't change.
+			 */
+			struct x86_user_regs_struct {
+			        long ebx, ecx, edx, esi, edi, ebp, eax;
+			        unsigned short ds, __ds, es, __es;
+			        unsigned short fs, __fs, gs, __gs;
+			        long orig_eax, eip;
+			        unsigned short cs, __cs;
+			        long eflags, esp;
+			        unsigned short ss, __ss;
+			};
+			ASSIGN_SIZE(user_regs_struct) = 
+				sizeof(struct x86_user_regs_struct);
+			ASSIGN_OFFSET(user_regs_struct_ebp) =
+				offsetof(struct x86_user_regs_struct, ebp);
+			ASSIGN_OFFSET(user_regs_struct_esp) =
+				offsetof(struct x86_user_regs_struct, esp);
+		}
 		MEMBER_OFFSET_INIT(thread_struct_cr3, "thread_struct", "cr3");
 		STRUCT_SIZE_INIT(cpuinfo_x86, "cpuinfo_x86");
 		STRUCT_SIZE_INIT(e820map, "e820map");
@@ -1723,9 +1823,42 @@
 				"irq_desc", NULL, 0);
 		else
 			machdep->nr_irqs = 224;  /* NR_IRQS */
-		machdep->hz = HZ;
-		if (THIS_KERNEL_VERSION >= LINUX(2,6,0))
-			machdep->hz = 1000;
+		if (!machdep->hz) {
+			machdep->hz = HZ;
+			if (THIS_KERNEL_VERSION >= LINUX(2,6,0))
+				machdep->hz = 1000;
+		}
+
+		if (machdep->flags & PAE) {
+			if (THIS_KERNEL_VERSION < LINUX(2,6,26))
+				machdep->section_size_bits =
+					_SECTION_SIZE_BITS_PAE_ORIG;
+			else
+				machdep->section_size_bits =
+					_SECTION_SIZE_BITS_PAE_2_6_26;
+			machdep->max_physmem_bits = _MAX_PHYSMEM_BITS_PAE;
+		} else {
+			machdep->section_size_bits = _SECTION_SIZE_BITS;
+			machdep->max_physmem_bits = _MAX_PHYSMEM_BITS;
+		}
+
+		if (XEN() && (kt->xen_flags & WRITABLE_PAGE_TABLES)) {
+			if (machdep->flags & PAE) 
+                        	machdep->uvtop = x86_uvtop_xen_wpt_PAE;
+			else
+                        	machdep->uvtop = x86_uvtop_xen_wpt;
+		} 
+
+		if (XEN()) {
+			MEMBER_OFFSET_INIT(vcpu_guest_context_user_regs,
+				"vcpu_guest_context", "user_regs");
+			MEMBER_OFFSET_INIT(cpu_user_regs_esp,
+				"cpu_user_regs", "esp");
+			MEMBER_OFFSET_INIT(cpu_user_regs_eip,
+				"cpu_user_regs", "eip");
+		}
+
+		eframe_init();
 		break;
 
 	case POST_INIT:
@@ -1735,6 +1868,67 @@
 }
 
 /*
+ *  Account for addition of pt_regs.xgs field in 2.6.20+ kernels.
+ */
+static void
+eframe_init(void)
+{
+	if (INVALID_SIZE(pt_regs)) {
+		if (THIS_KERNEL_VERSION < LINUX(2,6,20))
+			ASSIGN_SIZE(pt_regs) = (MAX_USER_EFRAME_SIZE-1)*sizeof(ulong);
+		else {
+			ASSIGN_SIZE(pt_regs) = MAX_USER_EFRAME_SIZE*sizeof(ulong);
+			INT_EFRAME_SS = 15;
+			INT_EFRAME_ESP = 14;
+			INT_EFRAME_EFLAGS = 13;
+			INT_EFRAME_CS = 12;
+			INT_EFRAME_EIP = 11;
+			INT_EFRAME_ERR = 10;
+			INT_EFRAME_GS = 9;
+		}
+		return;
+	}
+
+	if (MEMBER_EXISTS("pt_regs", "esp")) {
+		INT_EFRAME_SS = MEMBER_OFFSET("pt_regs", "xss") / 4; 
+		INT_EFRAME_ESP = MEMBER_OFFSET("pt_regs", "esp") / 4;
+		INT_EFRAME_EFLAGS = MEMBER_OFFSET("pt_regs", "eflags") / 4;
+		INT_EFRAME_CS = MEMBER_OFFSET("pt_regs", "xcs") / 4;
+		INT_EFRAME_EIP = MEMBER_OFFSET("pt_regs", "eip") / 4;
+		INT_EFRAME_ERR = MEMBER_OFFSET("pt_regs", "orig_eax") / 4;
+		if ((INT_EFRAME_GS = MEMBER_OFFSET("pt_regs", "xgs")) != -1)
+			INT_EFRAME_GS /= 4;
+		INT_EFRAME_ES = MEMBER_OFFSET("pt_regs", "xes") / 4;
+		INT_EFRAME_DS = MEMBER_OFFSET("pt_regs", "xds") / 4;
+		INT_EFRAME_EAX = MEMBER_OFFSET("pt_regs", "eax") / 4;
+		INT_EFRAME_EBP = MEMBER_OFFSET("pt_regs", "ebp") / 4;
+		INT_EFRAME_EDI = MEMBER_OFFSET("pt_regs", "edi") / 4;
+		INT_EFRAME_ESI = MEMBER_OFFSET("pt_regs", "esi") / 4;
+		INT_EFRAME_EDX = MEMBER_OFFSET("pt_regs", "edx") / 4;
+		INT_EFRAME_ECX = MEMBER_OFFSET("pt_regs", "ecx") / 4;
+		INT_EFRAME_EBX = MEMBER_OFFSET("pt_regs", "ebx") / 4;
+	} else {
+		INT_EFRAME_SS = MEMBER_OFFSET("pt_regs", "ss") / 4; 
+		INT_EFRAME_ESP = MEMBER_OFFSET("pt_regs", "sp") / 4;
+		INT_EFRAME_EFLAGS = MEMBER_OFFSET("pt_regs", "flags") / 4;
+		INT_EFRAME_CS = MEMBER_OFFSET("pt_regs", "cs") / 4;
+		INT_EFRAME_EIP = MEMBER_OFFSET("pt_regs", "ip") / 4;
+		INT_EFRAME_ERR = MEMBER_OFFSET("pt_regs", "orig_ax") / 4;
+		if ((INT_EFRAME_GS = MEMBER_OFFSET("pt_regs", "gs")) != -1)
+			INT_EFRAME_GS /= 4;
+		INT_EFRAME_ES = MEMBER_OFFSET("pt_regs", "es") / 4;
+		INT_EFRAME_DS = MEMBER_OFFSET("pt_regs", "ds") / 4;
+		INT_EFRAME_EAX = MEMBER_OFFSET("pt_regs", "ax") / 4;
+		INT_EFRAME_EBP = MEMBER_OFFSET("pt_regs", "bp") / 4;
+		INT_EFRAME_EDI = MEMBER_OFFSET("pt_regs", "di") / 4;
+		INT_EFRAME_ESI = MEMBER_OFFSET("pt_regs", "si") / 4;
+		INT_EFRAME_EDX = MEMBER_OFFSET("pt_regs", "dx") / 4;
+		INT_EFRAME_ECX = MEMBER_OFFSET("pt_regs", "cx") / 4;
+		INT_EFRAME_EBX = MEMBER_OFFSET("pt_regs", "bx") / 4;
+	}
+}
+
+/*
  *  Needs to be done this way because of potential 4G/4G split.
  */
 static int 
@@ -1825,7 +2019,7 @@
                         fprintf(fp, " PAGE: %s  (4MB)\n\n", 
 				mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, 
 				MKSTR(NONPAE_PAGEBASE(pgd_pte))));
-			x86_translate_pte(0, 0, pgd_pte);
+			x86_translate_pte(pgd_pte, 0, 0);
 		}
 
 		*paddr = NONPAE_PAGEBASE(pgd_pte) + (vaddr & ~_4MB_PAGE_MASK);
@@ -1892,7 +2086,170 @@
 }
 
 static int
-x86_uvtop_pae(struct task_context *tc, ulong vaddr, physaddr_t *paddr, int verbose)
+x86_uvtop_xen_wpt(struct task_context *tc, ulong vaddr, physaddr_t *paddr, int verbose)
+{
+	ulong mm, active_mm;
+	ulong *pgd;
+	ulong *page_dir;
+	ulong *page_middle;
+	ulong *machine_page_table, *pseudo_page_table;
+	ulong pgd_pte, pseudo_pgd_pte;
+	ulong pmd_pte;
+	ulong machine_pte, pseudo_pte;
+	char buf[BUFSIZE];
+
+	if (!tc)
+		error(FATAL, "current context invalid\n");
+
+	*paddr = 0;
+
+        if (is_kernel_thread(tc->task) && IS_KVADDR(vaddr)) { 
+	    	if (VALID_MEMBER(thread_struct_cr3)) 
+                	pgd = (ulong *)machdep->get_task_pgd(tc->task);
+		else {
+			if (INVALID_MEMBER(task_struct_active_mm))
+				error(FATAL, "no cr3 or active_mm?\n");
+
+                	readmem(tc->task + OFFSET(task_struct_active_mm), 
+				KVADDR, &active_mm, sizeof(void *),
+                        	"task active_mm contents", FAULT_ON_ERROR);
+
+			if (!active_mm)
+				error(FATAL, 
+				     "no active_mm for this kernel thread\n");
+
+			readmem(active_mm + OFFSET(mm_struct_pgd), 
+				KVADDR, &pgd, sizeof(long), 
+				"mm_struct pgd", FAULT_ON_ERROR);
+		}
+        } else {
+		if ((mm = task_mm(tc->task, TRUE)))
+			pgd = ULONG_PTR(tt->mm_struct + 
+				OFFSET(mm_struct_pgd));
+		else
+			readmem(tc->mm_struct + OFFSET(mm_struct_pgd), 
+				KVADDR, &pgd, sizeof(long), "mm_struct pgd", 
+				FAULT_ON_ERROR);
+	}
+
+	if (verbose) 
+		fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd);
+
+	page_dir = pgd + (vaddr >> PGDIR_SHIFT);
+
+	FILL_PGD(NONPAE_PAGEBASE(pgd), KVADDR, PAGESIZE());
+	pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir));
+
+	if (verbose)
+		fprintf(fp, "  PGD: %s => %lx\n", 
+			mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, 
+			MKSTR((ulong)page_dir)),
+			pgd_pte);
+
+	if (!pgd_pte)
+		goto no_upage;
+
+        if (pgd_pte & _PAGE_4M) {
+                if (verbose) 
+                        fprintf(fp, " PAGE: %s  (4MB) [machine]\n", 
+				mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, 
+				MKSTR(NONPAE_PAGEBASE(pgd_pte))));
+
+		pseudo_pgd_pte = xen_m2p_nonPAE(NONPAE_PAGEBASE(pgd_pte));
+
+                if (pseudo_pgd_pte == XEN_MFN_NOT_FOUND) {
+                        if (verbose)
+                                fprintf(fp, " PAGE: page not available\n");
+                        *paddr = PADDR_NOT_AVAILABLE;
+                        return FALSE;
+                }
+
+		pseudo_pgd_pte |= PAGEOFFSET(pgd_pte);
+
+		if (verbose) {
+			fprintf(fp, " PAGE: %s  (4MB)\n\n", 
+				mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX,
+                        	MKSTR(NONPAE_PAGEBASE(pseudo_pgd_pte))));
+
+			x86_translate_pte(pseudo_pgd_pte, 0, 0);
+		}
+
+		*paddr = NONPAE_PAGEBASE(pseudo_pgd_pte) + 
+			(vaddr & ~_4MB_PAGE_MASK);
+
+		return TRUE;
+        }
+
+	page_middle = page_dir;
+
+	FILL_PMD(NONPAE_PAGEBASE(page_middle), KVADDR, PAGESIZE());
+	pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle));
+
+	if (verbose)
+		fprintf(fp, "  PMD: %s => %lx\n", 
+		        mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX,
+                        MKSTR((ulong)page_middle)),
+			pmd_pte);
+
+	if (!pmd_pte)
+		goto no_upage;
+
+        machine_page_table = (ulong *)((NONPAE_PAGEBASE(pmd_pte)) +
+                ((vaddr>>10) & ((PTRS_PER_PTE-1)<<2)));
+
+        pseudo_page_table = (ulong *)
+                xen_m2p_nonPAE(NONPAE_PAGEBASE(machine_page_table));
+
+        FILL_PTBL(NONPAE_PAGEBASE(pseudo_page_table), PHYSADDR, PAGESIZE());
+        machine_pte = ULONG(machdep->ptbl + PAGEOFFSET(machine_page_table));
+
+        if (verbose) {
+                fprintf(fp, "  PTE: %s [machine]\n", 
+			mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX,
+                        MKSTR((ulong)machine_page_table)));
+
+                fprintf(fp, "  PTE: %s => %lx\n",
+                        mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX,
+                        MKSTR((ulong)pseudo_page_table +
+                        PAGEOFFSET(machine_page_table))), machine_pte);
+	}
+
+	if (!(machine_pte & (_PAGE_PRESENT | _PAGE_PROTNONE))) {
+		*paddr = machine_pte;
+
+		if (machine_pte && verbose) {
+			fprintf(fp, "\n");
+			x86_translate_pte(machine_pte, 0, 0);
+		}
+		
+		goto no_upage;
+	}
+
+        pseudo_pte = xen_m2p_nonPAE(NONPAE_PAGEBASE(machine_pte));
+        pseudo_pte |= PAGEOFFSET(machine_pte);
+
+	*paddr = NONPAE_PAGEBASE(pseudo_pte) + PAGEOFFSET(vaddr);
+
+        if (verbose) {
+                fprintf(fp, " PAGE: %s [machine]\n", 
+			mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, 
+			MKSTR(NONPAE_PAGEBASE(machine_pte))));
+
+                fprintf(fp, " PAGE: %s\n\n",
+                        mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX,
+                        MKSTR(NONPAE_PAGEBASE(pseudo_pte))));
+
+                x86_translate_pte(pseudo_pte, 0, 0);
+	}
+
+	return TRUE;
+
+no_upage:
+	return FALSE;
+}
+
+static int
+x86_uvtop_PAE(struct task_context *tc, ulong vaddr, physaddr_t *paddr, int verbose)
 {
 	ulong mm, active_mm;
 	ulonglong *pgd;
@@ -1962,7 +2319,7 @@
 
 	page_middle = PAE_PAGEBASE(page_dir_entry);
 
-	FILL_PMD(page_middle, PHYSADDR, PAGESIZE());
+	FILL_PMD_PAE(page_middle, PHYSADDR, PAGESIZE());
 
 	offset = ((vaddr >> PMD_SHIFT) & (PTRS_PER_PMD-1)) * sizeof(ulonglong);
 
@@ -1998,7 +2355,7 @@
 
         page_table = PAE_PAGEBASE(page_middle_entry);
 
-        FILL_PTBL(page_table, PHYSADDR, PAGESIZE());
+        FILL_PTBL_PAE(page_table, PHYSADDR, PAGESIZE());
 
 	offset = ((vaddr >> PAGESHIFT()) & (PTRS_PER_PTE-1)) * 
 		sizeof(ulonglong);
@@ -2028,9 +2385,10 @@
         *paddr = physpage;
 
         if (verbose) {
-                fprintf(fp, " PAGE: %s\n\n", 
-			mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, 
-			MKSTR(&physpage)));
+                ull = PAE_PAGEBASE(page_table_entry);
+                fprintf(fp, " PAGE: %s\n\n",
+                        mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX,
+                        MKSTR(&ull)));
                 x86_translate_pte(0, 0, page_table_entry);
         }
 
@@ -2040,50 +2398,247 @@
 	return FALSE;
 }
 
-/*
- *  Translates a kernel virtual address to its physical address.  cmd_vtop()
- *  sets the verbose flag so that the pte translation gets displayed; all
- *  other callers quietly accept the translation.
- */
-
 static int
-x86_kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose)
+x86_uvtop_xen_wpt_PAE(struct task_context *tc, ulong vaddr, physaddr_t *paddr, int verbose)
 {
-	ulong *pgd;
-	ulong *page_dir;
-	ulong *page_middle;
-	ulong *page_table;
-        ulong pgd_pte;
-        ulong pmd_pte;
-        ulong pte;
+	ulong mm, active_mm;
+	ulonglong *pgd;
+	ulonglong page_dir_entry;
+	ulonglong page_middle, pseudo_page_middle;
+	ulonglong page_middle_entry;
+	ulonglong page_table, pseudo_page_table;
+	ulonglong page_table_entry;
+	ulonglong physpage, pseudo_physpage;
+	ulonglong ull;
+	ulong offset;
 	char buf[BUFSIZE];
 
-	if (!IS_KVADDR(kvaddr))
-		return FALSE;
+	if (!tc)
+		error(FATAL, "current context invalid\n");
 
-	if (!vt->vmalloc_start) {
-		*paddr = VTOP(kvaddr);
-		return TRUE;
-	}
+	*paddr = 0;
 
-	if (!IS_VMALLOC_ADDR(kvaddr)) { 
-		*paddr = VTOP(kvaddr);
-		if (!verbose)
-			return TRUE;
-	}
+        if (is_kernel_thread(tc->task) && IS_KVADDR(vaddr)) { 
+	    	if (VALID_MEMBER(thread_struct_cr3)) 
+                	pgd = (ulonglong *)machdep->get_task_pgd(tc->task);
+		else {
+			if (INVALID_MEMBER(task_struct_active_mm))
+				error(FATAL, "no cr3 or active_mm?\n");
 
-	pgd = (ulong *)vt->kernel_pgd[0];
+                	readmem(tc->task + OFFSET(task_struct_active_mm), 
+				KVADDR, &active_mm, sizeof(void *),
+                        	"task active_mm contents", FAULT_ON_ERROR);
+
+			if (!active_mm)
+				error(FATAL, 
+				     "no active_mm for this kernel thread\n");
+
+			readmem(active_mm + OFFSET(mm_struct_pgd), 
+				KVADDR, &pgd, sizeof(long), 
+				"mm_struct pgd", FAULT_ON_ERROR);
+		}
+        } else {
+		if ((mm = task_mm(tc->task, TRUE)))
+			pgd = (ulonglong *)(ULONG_PTR(tt->mm_struct + 
+				OFFSET(mm_struct_pgd)));
+		else
+			readmem(tc->mm_struct + OFFSET(mm_struct_pgd), 
+				KVADDR, &pgd, sizeof(long), "mm_struct pgd", 
+				FAULT_ON_ERROR);
+	}
 
 	if (verbose) 
 		fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd);
 
-	page_dir = pgd + (kvaddr >> PGDIR_SHIFT);
+	FILL_PGD(pgd, KVADDR, PTRS_PER_PGD * sizeof(ulonglong));
 
-        FILL_PGD(NONPAE_PAGEBASE(pgd), KVADDR, PAGESIZE());
-        pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir));
+	offset = ((vaddr >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) * 
+		sizeof(ulonglong);
+
+	page_dir_entry = *((ulonglong *)&machdep->pgd[offset]);
 
 	if (verbose)
-		fprintf(fp, "  PGD: %s => %lx\n", 
+		fprintf(fp, "  PGD: %s => %llx [machine]\n", 
+			mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, 
+			MKSTR((ulong)pgd + offset)), 
+			page_dir_entry);
+
+	if (!(page_dir_entry & _PAGE_PRESENT)) {
+		goto no_upage;
+	}
+
+	page_middle = PAE_PAGEBASE(page_dir_entry);
+	pseudo_page_middle = xen_m2p(page_middle); 
+
+        if (verbose)
+                fprintf(fp, "  PGD: %s => %llx\n",
+                        mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX,
+                        MKSTR((ulong)pgd + offset)),
+                        pseudo_page_middle | PAGEOFFSET(page_dir_entry) |
+                        (page_dir_entry & _PAGE_NX));
+
+	FILL_PMD_PAE(pseudo_page_middle, PHYSADDR, PAGESIZE());
+
+	offset = ((vaddr >> PMD_SHIFT) & (PTRS_PER_PMD-1)) * sizeof(ulonglong);
+
+        page_middle_entry = *((ulonglong *)&machdep->pmd[offset]);
+
+        if (verbose) {
+		ull = page_middle + offset;
+                fprintf(fp, "  PMD: %s => %llx [machine]\n", 
+			mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, 
+			MKSTR(&ull)), 
+			page_middle_entry);
+	}
+
+        if (!(page_middle_entry & _PAGE_PRESENT)) {
+                goto no_upage;
+        }
+
+        if (page_middle_entry & _PAGE_PSE) {
+		error(FATAL, "_PAGE_PSE in an mfn not supported\n");  /* XXX */
+                if (verbose) {
+			ull = PAE_PAGEBASE(page_middle_entry);
+                        fprintf(fp, " PAGE: %s  (2MB)\n\n",
+				mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX,
+                        	MKSTR(&ull)));
+                        x86_translate_pte(0, 0, page_middle_entry);
+                }
+
+                physpage = PAE_PAGEBASE(page_middle_entry) +
+                        (vaddr & ~_2MB_PAGE_MASK);
+                *paddr = physpage;
+
+                return TRUE;
+        }
+
+        page_table = PAE_PAGEBASE(page_middle_entry);
+	pseudo_page_table = xen_m2p(page_table); 
+
+        if (verbose) {
+                ull = page_middle + offset;
+                fprintf(fp, "  PMD: %s => %llx\n",
+                        mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX,
+                        MKSTR(&ull)),
+                        pseudo_page_table | PAGEOFFSET(page_middle_entry) |
+                        (page_middle_entry & _PAGE_NX));
+        }
+
+        FILL_PTBL_PAE(pseudo_page_table, PHYSADDR, PAGESIZE());
+
+	offset = ((vaddr >> PAGESHIFT()) & (PTRS_PER_PTE-1)) * 
+		sizeof(ulonglong);
+
+        page_table_entry = *((ulonglong *)&machdep->ptbl[offset]);
+
+        if (verbose) {
+		ull = page_table + offset;
+                fprintf(fp, "  PTE: %s => %llx [machine]\n", 
+			mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, 
+			MKSTR(&ull)), page_table_entry);
+	}
+
+        if (!(page_table_entry & (_PAGE_PRESENT | _PAGE_PROTNONE))) {
+                *paddr = page_table_entry;
+
+                if (page_table_entry && verbose) {
+                        fprintf(fp, "\n");
+                        x86_translate_pte(0, 0, page_table_entry);
+                }
+
+                goto no_upage;
+        }
+
+	physpage = PAE_PAGEBASE(page_table_entry) + PAGEOFFSET(vaddr);
+	pseudo_physpage = xen_m2p(physpage); 
+
+        if (verbose) {
+                ull = page_table + offset;
+                fprintf(fp, "  PTE: %s => %llx\n",
+                        mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX,
+                        MKSTR(&ull)),
+                        pseudo_physpage | PAGEOFFSET(page_table_entry) |
+                        (page_table_entry & _PAGE_NX));
+        }
+
+        *paddr = pseudo_physpage + PAGEOFFSET(vaddr);
+
+        if (verbose) {
+                fprintf(fp, " PAGE: %s [machine]\n", 
+			mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, 
+			MKSTR(&physpage)));
+
+                pseudo_physpage += (PAGEOFFSET(vaddr) |
+                        (page_table_entry & (_PAGE_NX|machdep->pageoffset)));
+
+                fprintf(fp, " PAGE: %s\n\n",
+                        mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX,
+                        MKSTR(&pseudo_physpage)));
+
+                x86_translate_pte(0, 0, pseudo_physpage);
+        }
+
+        return TRUE;
+
+no_upage:
+	return FALSE;
+}
+
+/*
+ *  Translates a kernel virtual address to its physical address.  cmd_vtop()
+ *  sets the verbose flag so that the pte translation gets displayed; all
+ *  other callers quietly accept the translation.
+ */
+
+static int
+x86_kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose)
+{
+	ulong *pgd;
+	ulong *page_dir;
+	ulong *page_middle;
+	ulong *page_table;
+        ulong pgd_pte;
+        ulong pmd_pte;
+        ulong pte;
+	char buf[BUFSIZE];
+
+	if (!IS_KVADDR(kvaddr))
+		return FALSE;
+
+	if (XEN_HYPER_MODE()) {
+		if (DIRECTMAP_VIRT_ADDR(kvaddr)) {
+			*paddr = kvaddr - DIRECTMAP_VIRT_START;
+			return TRUE;
+		}
+		pgd = (ulong *)symbol_value("idle_pg_table_l2");
+	} else {
+		if (!vt->vmalloc_start) {
+			*paddr = VTOP(kvaddr);
+			return TRUE;
+		}
+
+		if (!IS_VMALLOC_ADDR(kvaddr)) { 
+			*paddr = VTOP(kvaddr);
+			if (!verbose)
+				return TRUE;
+		}
+
+		if (XEN() && (kt->xen_flags & WRITABLE_PAGE_TABLES))
+			return (x86_kvtop_xen_wpt(tc, kvaddr, paddr, verbose));
+
+		pgd = (ulong *)vt->kernel_pgd[0];
+	}
+
+	if (verbose) 
+		fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd);
+
+	page_dir = pgd + (kvaddr >> PGDIR_SHIFT);
+
+        FILL_PGD(NONPAE_PAGEBASE(pgd), KVADDR, PAGESIZE());
+        pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir));
+
+	if (verbose)
+		fprintf(fp, "  PGD: %s => %lx\n", 
 			mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, 
 			MKSTR((ulong)page_dir)), pgd_pte);
 
@@ -2095,7 +2650,7 @@
 			fprintf(fp, " PAGE: %s  (4MB)\n\n", 
 				mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX,
                         	MKSTR(NONPAE_PAGEBASE(pgd_pte))));
-			x86_translate_pte(0, 0, pgd_pte);
+			x86_translate_pte(pgd_pte, 0, 0);
 		}
 
 		*paddr = NONPAE_PAGEBASE(pgd_pte) + (kvaddr & ~_4MB_PAGE_MASK);
@@ -2158,9 +2713,134 @@
 	return FALSE;
 }
 
+static int
+x86_kvtop_xen_wpt(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose)
+{
+	ulong *pgd;
+	ulong *page_dir;
+	ulong *page_middle;
+	ulong *machine_page_table, *pseudo_page_table;
+        ulong pgd_pte, pseudo_pgd_pte;
+        ulong pmd_pte;
+        ulong machine_pte, pseudo_pte;
+	char buf[BUFSIZE];
+
+	pgd = (ulong *)vt->kernel_pgd[0];
+
+	if (verbose) 
+		fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd);
+
+	page_dir = pgd + (kvaddr >> PGDIR_SHIFT);
+
+        FILL_PGD(NONPAE_PAGEBASE(pgd), KVADDR, PAGESIZE());
+        pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir));
+
+	if (verbose)
+		fprintf(fp, "  PGD: %s => %lx\n", 
+			mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, 
+			MKSTR((ulong)page_dir)), pgd_pte);
+
+	if (!pgd_pte)
+		goto no_kpage;
+
+	if (pgd_pte & _PAGE_4M) {
+		if (verbose)
+			fprintf(fp, " PAGE: %s  (4MB) [machine]\n", 
+				mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX,
+                        	MKSTR(NONPAE_PAGEBASE(pgd_pte))));
+
+		pseudo_pgd_pte = xen_m2p_nonPAE(NONPAE_PAGEBASE(pgd_pte));
+
+		if (pseudo_pgd_pte == XEN_MFN_NOT_FOUND) {
+			if (verbose)
+				fprintf(fp, " PAGE: page not available\n");
+			*paddr = PADDR_NOT_AVAILABLE;
+			return FALSE;
+		}
+
+		pseudo_pgd_pte |= PAGEOFFSET(pgd_pte);
+
+		if (verbose) {
+			fprintf(fp, " PAGE: %s  (4MB)\n\n", 
+				mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX,
+                        	MKSTR(NONPAE_PAGEBASE(pseudo_pgd_pte))));
+
+			x86_translate_pte(pseudo_pgd_pte, 0, 0);
+		}
+
+		*paddr = NONPAE_PAGEBASE(pseudo_pgd_pte) + 
+			(kvaddr & ~_4MB_PAGE_MASK);
+
+		return TRUE;
+	} 
+
+	page_middle = page_dir;
+
+        FILL_PMD(NONPAE_PAGEBASE(page_middle), KVADDR, PAGESIZE());
+        pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle));
+
+	if (verbose)
+		fprintf(fp, "  PMD: %s => %lx\n", 
+			mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, 
+			MKSTR((ulong)page_middle)), pmd_pte);
+
+	if (!pmd_pte)
+		goto no_kpage;
+
+        machine_page_table = (ulong *)((NONPAE_PAGEBASE(pmd_pte)) +
+                ((kvaddr>>10) & ((PTRS_PER_PTE-1)<<2)));
+
+	pseudo_page_table = (ulong *)
+		xen_m2p_nonPAE(NONPAE_PAGEBASE(machine_page_table));
+
+        FILL_PTBL(NONPAE_PAGEBASE(pseudo_page_table), PHYSADDR, PAGESIZE());
+        machine_pte = ULONG(machdep->ptbl + PAGEOFFSET(machine_page_table));
+
+        if (verbose) {
+                fprintf(fp, "  PTE: %s [machine]\n", 
+			mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, 
+			MKSTR((ulong)machine_page_table)));
+
+                fprintf(fp, "  PTE: %s => %lx\n", 
+			mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX,
+                        MKSTR((ulong)pseudo_page_table + 
+			PAGEOFFSET(machine_page_table))), machine_pte);
+	}
+
+	if (!(machine_pte & (_PAGE_PRESENT | _PAGE_PROTNONE))) {
+		if (machine_pte && verbose) {
+			fprintf(fp, "\n");
+			x86_translate_pte(machine_pte, 0, 0);
+		}
+		goto no_kpage;
+	}
+
+	pseudo_pte = xen_m2p_nonPAE(NONPAE_PAGEBASE(machine_pte));
+	pseudo_pte |= PAGEOFFSET(machine_pte);
+
+	if (verbose) {
+		fprintf(fp, " PAGE: %s [machine]\n", 
+			mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, 
+			MKSTR(NONPAE_PAGEBASE(machine_pte))));
+
+		fprintf(fp, " PAGE: %s\n\n", 
+			mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, 
+			MKSTR(NONPAE_PAGEBASE(pseudo_pte))));
+
+		x86_translate_pte(pseudo_pte, 0, 0);
+	}
+
+	*paddr = NONPAE_PAGEBASE(pseudo_pte) + PAGEOFFSET(kvaddr);
+
+	return TRUE;
+
+no_kpage:
+	return FALSE;
+}
+
 
 static int
-x86_kvtop_pae(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose)
+x86_kvtop_PAE(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose)
 {
 	ulonglong *pgd;
         ulonglong page_dir_entry;
@@ -2177,18 +2857,32 @@
 	if (!IS_KVADDR(kvaddr))
 		return FALSE;
 
-	if (!vt->vmalloc_start) {
-		*paddr = VTOP(kvaddr);
-		return TRUE;
-	}
-
-	if (!IS_VMALLOC_ADDR(kvaddr)) { 
-		*paddr = VTOP(kvaddr);
-		if (!verbose)
+	if (XEN_HYPER_MODE()) {
+		if (DIRECTMAP_VIRT_ADDR(kvaddr)) {
+			*paddr = kvaddr - DIRECTMAP_VIRT_START;
 			return TRUE;
-	}
+		}
+		if (symbol_exists("idle_pg_table_l3"))
+			pgd = (ulonglong *)symbol_value("idle_pg_table_l3");
+		else
+			pgd = (ulonglong *)symbol_value("idle_pg_table");
+	} else {
+		if (!vt->vmalloc_start) {
+			*paddr = VTOP(kvaddr);
+			return TRUE;
+		}
 
-	pgd = (ulonglong *)vt->kernel_pgd[0];
+		if (!IS_VMALLOC_ADDR(kvaddr)) { 
+			*paddr = VTOP(kvaddr);
+			if (!verbose)
+				return TRUE;
+		}
+
+	        if (XEN() && (kt->xen_flags & WRITABLE_PAGE_TABLES))
+	       	        return (x86_kvtop_xen_wpt_PAE(tc, kvaddr, paddr, verbose));
+
+		pgd = (ulonglong *)vt->kernel_pgd[0];
+	}
 
 	if (verbose) 
 		fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd);
@@ -2212,7 +2906,7 @@
 
 	page_middle = PAE_PAGEBASE(page_dir_entry);
 
-	FILL_PMD(page_middle, PHYSADDR, PAGESIZE());
+	FILL_PMD_PAE(page_middle, PHYSADDR, PAGESIZE());
 
 	offset = ((kvaddr >> PMD_SHIFT) & (PTRS_PER_PMD-1)) * sizeof(ulonglong);
 
@@ -2249,7 +2943,7 @@
 
         page_table = PAE_PAGEBASE(page_middle_entry);
 
-        FILL_PTBL(page_table, PHYSADDR, PAGESIZE());
+        FILL_PTBL_PAE(page_table, PHYSADDR, PAGESIZE());
 
 	offset = ((kvaddr >> PAGESHIFT()) & (PTRS_PER_PTE-1)) * 
 		sizeof(ulonglong);
@@ -2277,9 +2971,10 @@
         *paddr = physpage;
 
         if (verbose) {
+		ull = PAE_PAGEBASE(page_table_entry);
                 fprintf(fp, " PAGE: %s\n\n",
                         mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX,
-                        MKSTR(&physpage)));
+                        MKSTR(&ull)));
                 x86_translate_pte(0, 0, page_table_entry);
         }
 
@@ -2289,13 +2984,172 @@
 	return FALSE;
 }
 
-/*
- *  Get the relevant page directory pointer from a task structure.
- */
-static ulong
-x86_get_task_pgd(ulong task)
+static int
+x86_kvtop_xen_wpt_PAE(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose)
 {
-	long offset;
+	ulonglong *pgd;
+        ulonglong page_dir_entry;
+        ulonglong page_middle, pseudo_page_middle;
+        ulonglong page_middle_entry;
+        ulonglong page_table, pseudo_page_table;
+        ulonglong page_table_entry;
+        ulonglong physpage, pseudo_physpage;
+        ulonglong ull;
+        ulong offset;
+	char buf[BUFSIZE];
+
+        pgd = (ulonglong *)vt->kernel_pgd[0];
+
+        if (verbose)
+                fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd);
+
+        FILL_PGD(pgd, KVADDR, PTRS_PER_PGD * sizeof(ulonglong));
+
+        offset = ((kvaddr >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) *
+                sizeof(ulonglong);
+
+        page_dir_entry = *((ulonglong *)&machdep->pgd[offset]);
+
+        if (verbose)
+                fprintf(fp, "  PGD: %s => %llx [machine]\n",
+                        mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX,
+                        MKSTR((ulong)pgd + offset)),
+                        page_dir_entry);
+
+        if (!(page_dir_entry & _PAGE_PRESENT)) {
+                goto no_kpage;
+        }
+
+        page_middle = PAE_PAGEBASE(page_dir_entry);
+	pseudo_page_middle = xen_m2p(page_middle); 
+
+        if (verbose)
+                fprintf(fp, "  PGD: %s => %llx\n",
+                        mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX,
+                        MKSTR((ulong)pgd + offset)),
+			pseudo_page_middle | PAGEOFFSET(page_dir_entry) |
+			(page_dir_entry & _PAGE_NX));
+
+	FILL_PMD_PAE(pseudo_page_middle, PHYSADDR, PAGESIZE());
+
+	offset = ((kvaddr >> PMD_SHIFT) & (PTRS_PER_PMD-1)) * sizeof(ulonglong);
+
+        page_middle_entry = *((ulonglong *)&machdep->pmd[offset]);
+
+        if (verbose) {
+                ull = page_middle + offset;
+                fprintf(fp, "  PMD: %s => %llx [machine]\n",
+                        mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX,
+                        MKSTR(&ull)),
+                        page_middle_entry);
+	}
+
+        if (!(page_middle_entry & _PAGE_PRESENT)) {
+                goto no_kpage;
+        }
+
+        if (page_middle_entry & _PAGE_PSE) {
+		error(FATAL, "_PAGE_PSE in an mfn not supported\n");  /* XXX */
+                if (verbose) {
+                        ull = PAE_PAGEBASE(page_middle_entry);
+                        fprintf(fp, " PAGE: %s  (2MB)\n\n",
+                                mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX,
+                                MKSTR(&ull)));
+                        x86_translate_pte(0, 0, page_middle_entry);
+                }
+
+		physpage = PAE_PAGEBASE(page_middle_entry) +
+			(kvaddr & ~_2MB_PAGE_MASK);
+                *paddr = physpage;
+
+
+                return TRUE;
+        }
+
+        page_table = PAE_PAGEBASE(page_middle_entry);
+	pseudo_page_table = xen_m2p(page_table); 
+
+        if (verbose) {
+                ull = page_middle + offset;
+                fprintf(fp, "  PMD: %s => %llx\n",
+                        mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX,
+                        MKSTR(&ull)),
+                        pseudo_page_table | PAGEOFFSET(page_middle_entry) | 
+			(page_middle_entry & _PAGE_NX));
+        }
+
+        FILL_PTBL_PAE(pseudo_page_table, PHYSADDR, PAGESIZE());
+
+	offset = ((kvaddr >> PAGESHIFT()) & (PTRS_PER_PTE-1)) * 
+		sizeof(ulonglong);
+
+        page_table_entry = *((ulonglong *)&machdep->ptbl[offset]);
+
+        if (verbose) {
+                ull = page_table + offset;
+                fprintf(fp, "  PTE: %s => %llx [machine]\n",
+                        mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX,
+                        MKSTR(&ull)), page_table_entry);
+	}
+
+        if (!(page_table_entry & (_PAGE_PRESENT | _PAGE_PROTNONE))) {
+                if (page_table_entry && verbose) {
+                        fprintf(fp, "\n");
+                        x86_translate_pte(0, 0, page_table_entry);
+                }
+
+                goto no_kpage;
+        }
+
+	physpage = PAE_PAGEBASE(page_table_entry) + PAGEOFFSET(kvaddr);
+	pseudo_physpage = xen_m2p(physpage); 
+
+        if (verbose) {
+                ull = page_table + offset;
+                fprintf(fp, "  PTE: %s => %llx\n",
+                        mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX,
+                        MKSTR(&ull)), 
+			pseudo_physpage | PAGEOFFSET(page_table_entry) |
+			(page_table_entry & _PAGE_NX));
+        }
+
+        *paddr = pseudo_physpage + PAGEOFFSET(kvaddr);
+
+        if (verbose) {
+                fprintf(fp, " PAGE: %s [machine]\n",
+                        mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX,
+                        MKSTR(&physpage)));
+
+		pseudo_physpage += (PAGEOFFSET(kvaddr) | 
+			(page_table_entry & _PAGE_NX));
+
+                fprintf(fp, " PAGE: %s\n\n",
+                        mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX,
+                        MKSTR(&pseudo_physpage)));
+
+                x86_translate_pte(0, 0, pseudo_physpage);
+        }
+
+        return TRUE;
+
+no_kpage:
+	return FALSE;
+}
+
+void
+x86_clear_machdep_cache(void)
+{
+        machdep->machspec->last_pmd_read_PAE = 0;
+        machdep->machspec->last_ptbl_read_PAE = 0;
+}
+
+/*
+ *  Get the relevant page directory pointer from a task structure.
+ */
+static ulong
+x86_get_task_pgd(ulong task)
+{
+	long offset;
 	ulong cr3;
 
 	offset = OFFSET_OPTION(task_struct_thread, task_struct_tss);
@@ -2341,6 +3195,8 @@
 x86_dump_machdep_table(ulong arg)
 {
         int others;
+	ulong xen_wpt;
+	char buf[BUFSIZE];
 
 	switch (arg) {
 	default:
@@ -2355,8 +3211,6 @@
                 fprintf(fp, "%sPAE", others++ ? "|" : "");
         if (machdep->flags & OMIT_FRAME_PTR)
                 fprintf(fp, "%sOMIT_FRAME_PTR", others++ ? "|" : "");
-        if (machdep->flags & SYSRQ)
-                fprintf(fp, "%sSYSRQ", others++ ? "|" : "");
         if (machdep->flags & FRAMESIZE_DEBUG)
                 fprintf(fp, "%sFRAMESIZE_DEBUG", others++ ? "|" : "");
         fprintf(fp, ")\n");
@@ -2376,12 +3230,17 @@
         fprintf(fp, "      eframe_search: x86_eframe_search()\n");
         fprintf(fp, "         back_trace: x86_back_trace_cmd()\n");
         fprintf(fp, "get_processor_speed: x86_processor_speed()\n");
+	xen_wpt = XEN() && (kt->xen_flags & WRITABLE_PAGE_TABLES);
 	if (machdep->flags & PAE) {
-        	fprintf(fp, "              uvtop: x86_uvtop_pae()\n");
-        	fprintf(fp, "              kvtop: x86_uvtop_pae()\n");
+        	fprintf(fp, "              uvtop: %s()\n", 
+			xen_wpt ?  "x86_uvtop_xen_wpt_PAE" : "x86_uvtop_PAE");
+        	fprintf(fp, "              kvtop: x86_kvtop_PAE()%s\n",
+			xen_wpt ? " -> x86_kvtop_xen_wpt_PAE()" : "");
 	} else {
-        	fprintf(fp, "              uvtop: x86_uvtop()\n");
-        	fprintf(fp, "              kvtop: x86_uvtop()\n");
+        	fprintf(fp, "              uvtop: %s()\n", 
+			xen_wpt ?  "x86_uvtop_xen_wpt" : "x86_uvtop");
+        	fprintf(fp, "              kvtop: x86_kvtop()%s\n",
+			xen_wpt ? " -> x86_kvtop_xen_wpt()" : "");
 	}
         fprintf(fp, "       get_task_pgd: x86_get_task_pgd()\n");
 	fprintf(fp, "           dump_irq: generic_dump_irq()\n");
@@ -2397,9 +3256,10 @@
 	fprintf(fp, "           cmd_mach: x86_cmd_mach()\n");
 	fprintf(fp, "       get_smp_cpus: x86_get_smp_cpus()\n");
 	fprintf(fp, "          is_kvaddr: generic_is_kvaddr()\n");
-	fprintf(fp, "          is_uvaddr: generic_is_uvaddr()\n");
+	fprintf(fp, "          is_uvaddr: %s\n", COMMON_VADDR_SPACE() ?
+                        "x86_is_uvaddr()" : "generic_is_uvaddr()");
 	fprintf(fp, "       verify_paddr: generic_verify_paddr()\n");
-        fprintf(fp, "    init_kernel_pgd: NULL\n");
+        fprintf(fp, "    init_kernel_pgd: x86_init_kernel_pgd()\n");
 	fprintf(fp, "    value_to_symbol: %s\n",
 		machdep->value_to_symbol == generic_machdep_value_to_symbol ?
 		"generic_machdep_value_to_symbol()" :
@@ -2412,6 +3272,48 @@
 	fprintf(fp, "                pmd: %lx\n", (ulong)machdep->pmd);
 	fprintf(fp, "               ptbl: %lx\n", (ulong)machdep->ptbl);
 	fprintf(fp, "       ptrs_per_pgd: %d\n", machdep->ptrs_per_pgd);
+	fprintf(fp, "  section_size_bits: %ld\n", machdep->section_size_bits);
+        fprintf(fp, "   max_physmem_bits: %ld\n", machdep->max_physmem_bits);
+        fprintf(fp, "  sections_per_root: %ld\n", machdep->sections_per_root);
+	fprintf(fp, " xendump_p2m_create: x86_xendump_p2m_create()\n");
+	fprintf(fp, " xendump_panic_task: x86_xendump_panic_task()\n");
+	fprintf(fp, "   get_xendump_regs: x86_get_xendump_regs()\n");
+	fprintf(fp, "xen_kdump_p2m_create: x86_xen_kdump_p2m_create()\n");
+	fprintf(fp, "clear_machdep_cache: x86_clear_machdep_cache()\n");
+	fprintf(fp, "   INT_EFRAME_[reg]:\n");
+	fprintf(fp, "%s %d\n", 
+		mkstring(buf, 21, RJUST, "SS: "), INT_EFRAME_SS);
+	fprintf(fp, "%s %d\n", 
+		mkstring(buf, 21, RJUST, "ESP: "), INT_EFRAME_ESP);
+	fprintf(fp, "%s %d\n", 
+		mkstring(buf, 21, RJUST, "EFLAGS: "), INT_EFRAME_EFLAGS);
+	fprintf(fp, "%s %d\n", 
+		mkstring(buf, 21, RJUST, "CS: "), INT_EFRAME_CS);
+	fprintf(fp, "%s %d\n", 
+		mkstring(buf, 21, RJUST, "IP: "), INT_EFRAME_EIP);
+	fprintf(fp, "%s %d\n", 
+		mkstring(buf, 21, RJUST, "ERR: "), INT_EFRAME_ERR);
+	fprintf(fp, "%s %d\n", 
+		mkstring(buf, 21, RJUST, "ES: "), INT_EFRAME_ES);
+	fprintf(fp, "%s %d\n", 
+		mkstring(buf, 21, RJUST, "DS: "), INT_EFRAME_DS);
+	fprintf(fp, "%s %d\n", 
+		mkstring(buf, 21, RJUST, "EAX: "), INT_EFRAME_EAX);
+	fprintf(fp, "%s %d\n", 
+		mkstring(buf, 21, RJUST, "EBP: "), INT_EFRAME_EBP);
+	fprintf(fp, "%s %d\n", 
+		mkstring(buf, 21, RJUST, "EDI: "), INT_EFRAME_EDI);
+	fprintf(fp, "%s %d\n", 
+		mkstring(buf, 21, RJUST, "ESI: "), INT_EFRAME_ESI);
+	fprintf(fp, "%s %d\n", 
+		mkstring(buf, 21, RJUST, "EDX: "), INT_EFRAME_EDX);
+	fprintf(fp, "%s %d\n", 
+		mkstring(buf, 21, RJUST, "ECX: "), INT_EFRAME_ECX);
+	fprintf(fp, "%s %d\n", 
+		mkstring(buf, 21, RJUST, "EBX: "), INT_EFRAME_EBX);
+	fprintf(fp, "%s %d\n", 
+		mkstring(buf, 21, RJUST, "GS: "), INT_EFRAME_GS);
+
         fprintf(fp, "           machspec: x86_machine_specific\n");
 	fprintf(fp, "                     idt_table: %lx\n",
 		(ulong)machdep->machspec->idt_table); 
@@ -2421,6 +3323,11 @@
 		machdep->machspec->entry_tramp_end);
 	fprintf(fp, "        entry_tramp_start_phys: %llx\n",
 		machdep->machspec->entry_tramp_start_phys);
+	fprintf(fp, "             last_pmd_read_PAE: %llx\n",
+		machdep->machspec->last_pmd_read_PAE);
+	fprintf(fp, "            last_ptbl_read_PAE: %llx\n",
+		machdep->machspec->last_ptbl_read_PAE);
+
 }
 
 /*
@@ -2732,6 +3639,9 @@
 	switch (flag)
 	{
 	case READ_IDT_INIT:
+		if (!symbol_exists("idt_table"))
+			return NULL;
+
        		if (!(idt = (ulong *)malloc(desc_struct_size))) {
 			error(WARNING, "cannot malloc idt_table\n\n");
 			return NULL;
@@ -2779,6 +3689,10 @@
 		break;
 
         case READ_IDT_RUNTIME:
+		if (!symbol_exists("idt_table"))
+			error(FATAL, 
+			    "idt_table does not exist on this architecture\n");
+
 		idt = (ulong *)GETBUF(desc_struct_size);
                 readmem(symbol_value("idt_table"), KVADDR, idt,
                         desc_struct_size, "idt_table", FAULT_ON_ERROR);
@@ -2942,7 +3856,11 @@
                             !strstr(buf2, "+"))
                                 sprintf(p1, buf1);
 		}
-	}
+	} 
+	else if (STREQ(argv[2], "ud2a"))
+		pc->curcmd_flags |= UD2A_INSTRUCTION;
+	else if (STREQ(argv[2], "(bad)"))
+		pc->curcmd_flags |= BAD_INSTRUCTION;
 
 	if (CRASHDEBUG(1))
 		console("    %s", inbuf);
@@ -2969,6 +3887,16 @@
 		} 
 	}
 
+	if (XEN() && (count == 1) && symbol_exists("cpu_present_map")) {
+        	ulong cpu_present_map;
+
+        	get_symbol_data("cpu_present_map", sizeof(ulong), 
+			&cpu_present_map);
+
+        	cpucount = count_bits_long(cpu_present_map);
+		count = MAX(cpucount, kt->cpus);
+	}
+
 	return count;
 }
 
@@ -3026,7 +3954,7 @@
 		fprintf(fp, "(unknown)\n");
 	fprintf(fp, "                 HZ: %d\n", machdep->hz);
 	fprintf(fp, "          PAGE SIZE: %d\n", PAGESIZE());
-	fprintf(fp, "      L1 CACHE SIZE: %d\n", l1_cache_size());
+//	fprintf(fp, "      L1 CACHE SIZE: %d\n", l1_cache_size());
 	fprintf(fp, "KERNEL VIRTUAL BASE: %lx\n", machdep->kvbase);
 	fprintf(fp, "KERNEL VMALLOC BASE: %lx\n", vt->vmalloc_start);
 	fprintf(fp, "  KERNEL STACK SIZE: %ld\n", STACKSIZE());
@@ -3092,31 +4020,31 @@
  *  with the -fomit-frame-pointer flag.
  */
 #define PUSH_BP_MOV_ESP_BP 0xe58955
+#define PUSH_BP_CLR_EAX_MOV_ESP_BP 0xe589c03155ULL
 
 static int
 x86_omit_frame_pointer(void)
 {
-	ulong push_bp_mov_esp_bp[3];
+	ulonglong push_bp_mov_esp_bp;
+        int i;
+        char *checkfuncs[] = {"sys_open", "sys_fork", "sys_read"};
 
 	if (pc->flags & KERNEL_DEBUG_QUERY)
 		return FALSE;
 
-	if (!readmem(symbol_value("sys_open"), KVADDR, &push_bp_mov_esp_bp[0], 
-	    sizeof(ulong), "x86_omit_frame_pointer", RETURN_ON_ERROR))
-		return TRUE;
-	if (!readmem(symbol_value("sys_fork"), KVADDR, &push_bp_mov_esp_bp[1], 
-	    sizeof(ulong), "x86_omit_frame_pointer", RETURN_ON_ERROR))
-		return TRUE;
-	if (!readmem(symbol_value("sys_read"), KVADDR, &push_bp_mov_esp_bp[2], 
-	    sizeof(ulong), "x86_omit_frame_pointer", RETURN_ON_ERROR))
-		return TRUE;
-
-	if (((push_bp_mov_esp_bp[0] & 0xffffff) == PUSH_BP_MOV_ESP_BP) &&
-	    ((push_bp_mov_esp_bp[1] & 0xffffff) == PUSH_BP_MOV_ESP_BP) &&
-	    ((push_bp_mov_esp_bp[2] & 0xffffff) == PUSH_BP_MOV_ESP_BP))
-		return FALSE;
+        for (i = 0; i < 2; i++) {
+                if (!readmem(symbol_value(checkfuncs[i]), KVADDR,
+                    &push_bp_mov_esp_bp, sizeof(ulonglong),
+                    "x86_omit_frame_pointer", RETURN_ON_ERROR))
+                        return TRUE;
+                if (!(((push_bp_mov_esp_bp & 0x0000ffffffULL) == 
+		    PUSH_BP_MOV_ESP_BP) ||
+                    ((push_bp_mov_esp_bp & 0xffffffffffULL) ==
+                    PUSH_BP_CLR_EAX_MOV_ESP_BP)))
+                        return TRUE;
+        }
 
-	return TRUE;
+	return FALSE;
 }
 
 /*
@@ -3207,4 +4135,921 @@
 
         return ((sp = value_search(value, offset))); 
 }
+
+static void
+x86_init_kernel_pgd(void)
+{
+        int i;
+	ulong value;
+
+	if (XEN()) 
+		get_symbol_data("swapper_pg_dir", sizeof(ulong), &value);
+	else
+     		value = symbol_value("swapper_pg_dir");
+
+       	for (i = 0; i < NR_CPUS; i++)
+       		vt->kernel_pgd[i] = value;
+
+}
+
+static ulong
+xen_m2p_nonPAE(ulong machine)
+{
+	ulonglong pseudo;
+
+	pseudo = xen_m2p((ulonglong)machine);
+
+	if (pseudo == XEN_MACHADDR_NOT_FOUND)
+		return XEN_MFN_NOT_FOUND;
+
+	return ((ulong)pseudo);
+}
+
+#include "netdump.h"
+
+/*
+ *  From the xen vmcore, create an index of mfns for each page that makes 
+ *  up the dom0 kernel's complete phys_to_machine_mapping[max_pfn] array.
+ */
+
+#define MAX_X86_FRAMES  (16)    
+#define MFNS_PER_FRAME  (PAGESIZE()/sizeof(ulong))
+
+static int 
+x86_xen_kdump_p2m_create(struct xen_kdump_data *xkd)
+{
+	int i, j;
+	ulong kvaddr;
+	ulong *up;
+	ulonglong *ulp;
+	ulong frames;
+	ulong frame_mfn[MAX_X86_FRAMES] = { 0 };
+	int mfns[MAX_X86_FRAMES] = { 0 };
+
+	/*
+	 *  Temporarily read physical (machine) addresses from vmcore by
+	 *  going directly to read_netdump() instead of via read_kdump().
+	 */ 
+	pc->readmem = read_netdump;
+
+	if (xkd->flags & KDUMP_CR3)
+		goto use_cr3;
+
+        xkd->p2m_frames = 0;
+
+	if (CRASHDEBUG(1))
+		fprintf(fp, "x86_xen_kdump_p2m_create: p2m_mfn: %lx\n",
+			xkd->p2m_mfn);
+
+	if (!readmem(PTOB(xkd->p2m_mfn), PHYSADDR, xkd->page, PAGESIZE(), 
+	    "xen kdump p2m mfn page", RETURN_ON_ERROR))
+		error(FATAL, "cannot read xen kdump p2m mfn page\n");
+
+	if (CRASHDEBUG(1)) {
+		up = (ulong *)xkd->page;
+		for (i = 0; i < 4; i++) {
+                	fprintf(fp, "%08lx: %08lx %08lx %08lx %08lx\n",
+                        	(ulong)((i * 4) * sizeof(ulong)),
+                        	*up, *(up+1), *(up+2), *(up+3));
+                        up += 4;
+		}
+		fprintf(fp, "\n");
+	}
+
+	for (i = 0, up = (ulong *)xkd->page; i < MAX_X86_FRAMES; i++, up++)
+		frame_mfn[i] = *up;
+
+	for (i = 0; i < MAX_X86_FRAMES; i++) {
+		if (!frame_mfn[i])
+			break;
+
+        	if (!readmem(PTOB(frame_mfn[i]), PHYSADDR, xkd->page, 
+		    PAGESIZE(), "xen kdump p2m mfn list page", RETURN_ON_ERROR))
+                	error(FATAL, "cannot read xen kdump p2m mfn list page\n");
+
+		for (j = 0, up = (ulong *)xkd->page; j < MFNS_PER_FRAME; j++, up++)
+			if (*up)
+				mfns[i]++;
+
+		xkd->p2m_frames += mfns[i];
+		
+	        if (CRASHDEBUG(7)) {
+	                up = (ulong *)xkd->page;
+	                for (j = 0; j < 256; j++) {
+	                        fprintf(fp, "%08lx: %08lx %08lx %08lx %08lx\n",
+	                                (ulong)((j * 4) * sizeof(ulong)),
+	                                *up, *(up+1), *(up+2), *(up+3));
+	                        up += 4;
+	                }
+	        }
+	}
+
+        if (CRASHDEBUG(1))
+		fprintf(fp, "p2m_frames: %d\n", xkd->p2m_frames);
+
+        if ((xkd->p2m_mfn_frame_list = (ulong *)
+	    malloc(xkd->p2m_frames * sizeof(ulong))) == NULL)
+                error(FATAL, "cannot malloc p2m_frame_index_list");
+
+	for (i = 0, frames = xkd->p2m_frames; frames; i++) {
+        	if (!readmem(PTOB(frame_mfn[i]), PHYSADDR, 
+		    &xkd->p2m_mfn_frame_list[i * MFNS_PER_FRAME], 
+		    mfns[i] * sizeof(ulong), "xen kdump p2m mfn list page", 
+		    RETURN_ON_ERROR))
+                	error(FATAL, "cannot read xen kdump p2m mfn list page\n");
+
+		frames -= mfns[i];
+	}
+
+        if (CRASHDEBUG(2)) {
+                for (i = 0; i < xkd->p2m_frames; i++)
+                        fprintf(fp, "%lx ", xkd->p2m_mfn_frame_list[i]);
+                fprintf(fp, "\n");
+        }
+
+	pc->readmem = read_kdump;
+	return TRUE;
+
+use_cr3:
+	if (CRASHDEBUG(1))
+		fprintf(fp, "x86_xen_kdump_p2m_create: cr3: %lx\n", xkd->cr3);
+
+	if (!readmem(PTOB(xkd->cr3), PHYSADDR, machdep->pgd, PAGESIZE(), 
+	    "xen kdump cr3 page", RETURN_ON_ERROR))
+		error(FATAL, "cannot read xen kdump cr3 page\n");
+
+	if (CRASHDEBUG(7)) {
+		fprintf(fp, "contents of page directory page:\n");	
+
+		if (machdep->flags & PAE) {
+			ulp = (ulonglong *)machdep->pgd;
+			fprintf(fp, 
+			    "%016llx %016llx %016llx %016llx\n",
+				*ulp, *(ulp+1), *(ulp+2), *(ulp+3));
+		} else {
+			up = (ulong *)machdep->pgd;
+			for (i = 0; i < 256; i++) {
+				fprintf(fp, 
+				    "%08lx: %08lx %08lx %08lx %08lx\n", 
+					(ulong)((i * 4) * sizeof(ulong)),
+					*up, *(up+1), *(up+2), *(up+3));
+				up += 4;
+			}
+		}
+	}
+
+	kvaddr = symbol_value("max_pfn");
+        if (!x86_xen_kdump_load_page(kvaddr, xkd->page))
+                return FALSE;
+	up = (ulong *)(xkd->page + PAGEOFFSET(kvaddr));
+
+        xkd->p2m_frames = (*up/(PAGESIZE()/sizeof(ulong))) +
+		((*up%(PAGESIZE()/sizeof(ulong))) ? 1 : 0);
+
+        if (CRASHDEBUG(1))
+                fprintf(fp, "max_pfn at %lx: %lx (%ld) -> %d p2m_frames\n", 
+			kvaddr, *up, *up, xkd->p2m_frames);
+
+        if ((xkd->p2m_mfn_frame_list = (ulong *)
+            malloc(xkd->p2m_frames * sizeof(ulong))) == NULL)
+                error(FATAL, "cannot malloc p2m_frame_index_list");
+
+        kvaddr = symbol_value("phys_to_machine_mapping");
+        if (!x86_xen_kdump_load_page(kvaddr, xkd->page))
+                return FALSE;
+        up = (ulong *)(xkd->page + PAGEOFFSET(kvaddr));
+        kvaddr = *up;
+        if (CRASHDEBUG(1))
+                fprintf(fp, "phys_to_machine_mapping: %lx\n", kvaddr);
+
+        if (CRASHDEBUG(7)) {
+                fprintf(fp, "contents of first phys_to_machine_mapping page:\n");
+        	if (!x86_xen_kdump_load_page(kvaddr, xkd->page))
+			error(INFO, 
+			    "cannot read first phys_to_machine_mapping page\n");
+
+                 up = (ulong *)xkd->page;
+                 for (i = 0; i < 256; i++) {
+                         fprintf(fp, "%08lx: %08lx %08lx %08lx %08lx\n",
+                         	(ulong)((i * 4) * sizeof(ulong)),
+                         	*up, *(up+1), *(up+2), *(up+3));
+                         up += 4;
+                 }
+        }
+
+        machdep->last_ptbl_read = BADADDR;
+        machdep->last_pmd_read = BADADDR;
+
+        for (i = 0; i < xkd->p2m_frames; i++) {
+                xkd->p2m_mfn_frame_list[i] = x86_xen_kdump_page_mfn(kvaddr);
+                kvaddr += PAGESIZE();
+        }
+
+        if (CRASHDEBUG(1)) {
+        	for (i = 0; i < xkd->p2m_frames; i++)
+			fprintf(fp, "%lx ", xkd->p2m_mfn_frame_list[i]);
+		fprintf(fp, "\n");
+	}
+
+        machdep->last_ptbl_read = 0;
+        machdep->last_pmd_read = 0;
+	pc->readmem = read_kdump;
+
+	return TRUE;
+}
+
+/*
+ *  Find the page associate with the kvaddr, and read its contents
+ *  into the passed-in buffer.
+ */
+static char *
+x86_xen_kdump_load_page(ulong kvaddr, char *pgbuf)
+{
+        ulong *entry;
+        ulong *up;
+        ulong mfn;
+
+        if (machdep->flags & PAE)
+                return x86_xen_kdump_load_page_PAE(kvaddr, pgbuf);
+
+        up = (ulong *)machdep->pgd;
+        entry = up + (kvaddr >> PGDIR_SHIFT);
+        mfn = (*entry) >> PAGESHIFT();
+
+	if (!readmem(PTOB(mfn), PHYSADDR, pgbuf, PAGESIZE(), 
+	    "xen kdump pgd entry", RETURN_ON_ERROR)) {
+                error(INFO, "cannot read/find pgd entry from cr3 page\n");
+		return NULL;
+	}
+
+        up = (ulong *)pgbuf;
+        entry = up + ((kvaddr >> 12) & (PTRS_PER_PTE-1));
+        mfn = (*entry) >> PAGESHIFT();
+
+	if (!readmem(PTOB(mfn), PHYSADDR, pgbuf, PAGESIZE(), 
+	    "xen page table page", RETURN_ON_ERROR)) {
+                error(INFO, "cannot read/find page table page\n");
+		return NULL;
+	}
+
+	return pgbuf;
+}
+
+static char *
+x86_xen_kdump_load_page_PAE(ulong kvaddr, char *pgbuf)
+{
+	ulonglong *entry;
+	ulonglong *up;
+	ulong mfn;
+
+        up = (ulonglong *)machdep->pgd;
+        entry = up + (kvaddr >> PGDIR_SHIFT);
+        mfn = (ulong)((*entry) >> PAGESHIFT());
+
+	if (!readmem(PTOB(mfn), PHYSADDR, pgbuf, PAGESIZE(), 
+	    "xen kdump pgd entry", RETURN_ON_ERROR)) {
+                error(INFO, "cannot read/find pgd entry from cr3 page\n");
+                return NULL;
+        }
+
+        up = (ulonglong *)pgbuf;
+        entry = up + ((kvaddr >> PMD_SHIFT) & (PTRS_PER_PMD-1));
+        mfn = (ulong)((*entry) >> PAGESHIFT());
+
+	if (!readmem(PTOB(mfn), PHYSADDR, pgbuf, PAGESIZE(), 
+	    "xen kdump pmd entry", RETURN_ON_ERROR)) {
+                error(INFO, "cannot read/find pmd entry from pgd\n");
+                return NULL;
+        }
+
+        up = (ulonglong *)pgbuf;
+        entry = up + ((kvaddr >> PAGESHIFT()) & (PTRS_PER_PTE-1));
+        mfn = (ulong)((*entry) >> PAGESHIFT());
+
+	if (!readmem(PTOB(mfn), PHYSADDR, pgbuf, PAGESIZE(), 
+	    "xen kdump page table page", RETURN_ON_ERROR)) {
+                error(INFO, "cannot read/find page table page from pmd\n");
+                return NULL;
+        }
+
+	return pgbuf;
+}
+
+/*
+ *  Return the mfn value associated with a virtual address.
+ */
+static ulong 
+x86_xen_kdump_page_mfn(ulong kvaddr)
+{
+        ulong *entry;
+        ulong *up;
+        ulong mfn;
+
+        if (machdep->flags & PAE)
+                return x86_xen_kdump_page_mfn_PAE(kvaddr);
+
+        up = (ulong *)machdep->pgd;
+        entry = up + (kvaddr >> PGDIR_SHIFT);
+        mfn = (*entry) >> PAGESHIFT();
+
+	if ((mfn != machdep->last_ptbl_read) && 
+	    !readmem(PTOB(mfn), PHYSADDR, machdep->ptbl, PAGESIZE(), 
+	    "xen kdump pgd entry", RETURN_ON_ERROR))
+                error(FATAL, 
+		    "cannot read/find pgd entry from cr3 page (mfn: %lx)\n", 
+			mfn);
+	machdep->last_ptbl_read = mfn;
+
+        up = (ulong *)machdep->ptbl;
+        entry = up + ((kvaddr >> 12) & (PTRS_PER_PTE-1));
+        mfn = (*entry) >> PAGESHIFT();
+
+	return mfn;
+}
+
+static ulong
+x86_xen_kdump_page_mfn_PAE(ulong kvaddr)
+{
+	ulonglong *entry;
+	ulonglong *up;
+	ulong mfn;
+
+        up = (ulonglong *)machdep->pgd;
+        entry = up + (kvaddr >> PGDIR_SHIFT);
+        mfn = (ulong)((*entry) >> PAGESHIFT());
+
+	if ((mfn != machdep->last_pmd_read) &&
+	    !readmem(PTOB(mfn), PHYSADDR, machdep->pmd, PAGESIZE(), 
+	    "xen kdump pgd entry", RETURN_ON_ERROR))
+                error(FATAL, 
+		    "cannot read/find pgd entry from cr3 page (mfn: %lx)\n",
+			mfn);
+	machdep->last_pmd_read = mfn;
+
+        up = (ulonglong *)machdep->pmd;
+        entry = up + ((kvaddr >> PMD_SHIFT) & (PTRS_PER_PMD-1));
+        mfn = (ulong)((*entry) >> PAGESHIFT());
+
+	if ((mfn != machdep->last_ptbl_read) &&
+	    !readmem(PTOB(mfn), PHYSADDR, machdep->ptbl, PAGESIZE(), 
+	    "xen kdump pmd entry", RETURN_ON_ERROR))
+                error(FATAL, 
+		    "cannot read/find pmd entry from pgd (mfn: %lx)\n",
+			mfn);
+	machdep->last_ptbl_read = mfn;
+
+        up = (ulonglong *)machdep->ptbl;
+        entry = up + ((kvaddr >> PAGESHIFT()) & (PTRS_PER_PTE-1));
+        mfn = (ulong)((*entry) >> PAGESHIFT());
+
+	return mfn;
+}
+
+#include "xendump.h"
+
+/*
+ *  Create an index of mfns for each page that makes up the
+ *  kernel's complete phys_to_machine_mapping[max_pfn] array.
+ */
+static int 
+x86_xendump_p2m_create(struct xendump_data *xd)
+{
+	int i, idx;
+	ulong mfn, kvaddr, ctrlreg[8], ctrlreg_offset;
+	ulong *up;
+	ulonglong *ulp;
+	off_t offset; 
+
+        if (!symbol_exists("phys_to_machine_mapping")) {
+                xd->flags |= XC_CORE_NO_P2M;
+                return TRUE;
+        }
+
+	if ((ctrlreg_offset = MEMBER_OFFSET("vcpu_guest_context", "ctrlreg")) ==
+	     INVALID_OFFSET)
+		error(FATAL, 
+		    "cannot determine vcpu_guest_context.ctrlreg offset\n");
+	else if (CRASHDEBUG(1))
+		fprintf(xd->ofp, 
+		    "MEMBER_OFFSET(vcpu_guest_context, ctrlreg): %ld\n",
+			ctrlreg_offset);
+
+	offset = (off_t)xd->xc_core.header.xch_ctxt_offset + 
+		(off_t)ctrlreg_offset;
+
+	if (lseek(xd->xfd, offset, SEEK_SET) == -1)
+		error(FATAL, "cannot lseek to xch_ctxt_offset\n");
+
+	if (read(xd->xfd, &ctrlreg, sizeof(ctrlreg)) !=
+	    sizeof(ctrlreg))
+		error(FATAL, "cannot read vcpu_guest_context ctrlreg[8]\n");
+
+	mfn = (ctrlreg[3] >> PAGESHIFT()) | (ctrlreg[3] << (BITS()-PAGESHIFT()));
+
+	for (i = 0; CRASHDEBUG(1) && (i < 8); i++) {
+		fprintf(xd->ofp, "ctrlreg[%d]: %lx", i, ctrlreg[i]);
+		if (i == 3)
+			fprintf(xd->ofp, " -> mfn: %lx", mfn);
+		fprintf(xd->ofp, "\n");
+	}
+
+	if (!xc_core_mfn_to_page(mfn, machdep->pgd))
+		error(FATAL, "cannot read/find cr3 page\n");
+
+	if (CRASHDEBUG(1)) {
+		fprintf(xd->ofp, "contents of page directory page:\n");	
+
+		if (machdep->flags & PAE) {
+			ulp = (ulonglong *)machdep->pgd;
+			fprintf(xd->ofp, 
+			    "%016llx %016llx %016llx %016llx\n",
+				*ulp, *(ulp+1), *(ulp+2), *(ulp+3));
+		} else {
+			up = (ulong *)machdep->pgd;
+			for (i = 0; i < 256; i++) {
+				fprintf(xd->ofp, 
+				    "%08lx: %08lx %08lx %08lx %08lx\n", 
+					(ulong)((i * 4) * sizeof(ulong)),
+					*up, *(up+1), *(up+2), *(up+3));
+				up += 4;
+			}
+		}
+	}
+
+	kvaddr = symbol_value("max_pfn");
+	if (!x86_xendump_load_page(kvaddr, xd->page))
+		return FALSE;
+	up = (ulong *)(xd->page + PAGEOFFSET(kvaddr));
+	if (CRASHDEBUG(1))
+		fprintf(xd->ofp, "max_pfn: %lx\n", *up);
+
+        xd->xc_core.p2m_frames = (*up/(PAGESIZE()/sizeof(ulong))) +
+                ((*up%(PAGESIZE()/sizeof(ulong))) ? 1 : 0);
+
+	if ((xd->xc_core.p2m_frame_index_list = (ulong *)
+	    malloc(xd->xc_core.p2m_frames * sizeof(int))) == NULL)
+        	error(FATAL, "cannot malloc p2m_frame_index_list");
+
+	kvaddr = symbol_value("phys_to_machine_mapping");
+	if (!x86_xendump_load_page(kvaddr, xd->page))
+		return FALSE;
+	up = (ulong *)(xd->page + PAGEOFFSET(kvaddr));
+	if (CRASHDEBUG(1))
+		fprintf(fp, "phys_to_machine_mapping: %lx\n", *up);
+
+	kvaddr = *up;
+	machdep->last_ptbl_read = BADADDR;
+	machdep->last_pmd_read = BADADDR;
+
+	for (i = 0; i < xd->xc_core.p2m_frames; i++) {
+		if ((idx = x86_xendump_page_index(kvaddr)) == MFN_NOT_FOUND)
+			return FALSE;
+		xd->xc_core.p2m_frame_index_list[i] = idx; 
+		kvaddr += PAGESIZE();
+	}
+
+	machdep->last_ptbl_read = 0;
+	machdep->last_pmd_read = 0;
+
+	return TRUE;
+}
+
+/*
+ *  Find the page associate with the kvaddr, and read its contents
+ *  into the passed-in buffer.
+ */
+static char *
+x86_xendump_load_page(ulong kvaddr, char *pgbuf)
+{
+	ulong *entry;
+	ulong *up;
+	ulong mfn;
+
+	if (machdep->flags & PAE)
+		return x86_xendump_load_page_PAE(kvaddr, pgbuf);
+
+        up = (ulong *)machdep->pgd;
+        entry = up + (kvaddr >> PGDIR_SHIFT);
+        mfn = (*entry) >> PAGESHIFT();
+
+        if (!xc_core_mfn_to_page(mfn, pgbuf)) {
+                error(INFO, "cannot read/find pgd entry from cr3 page\n");
+		return NULL;
+	}
+
+        up = (ulong *)pgbuf;
+        entry = up + ((kvaddr >> 12) & (PTRS_PER_PTE-1));
+        mfn = (*entry) >> PAGESHIFT();
+
+        if (!xc_core_mfn_to_page(mfn, pgbuf)) {
+                error(INFO, "cannot read/find page table page\n");
+		return NULL;
+	}
+
+	return pgbuf;
+}
+
+static char *
+x86_xendump_load_page_PAE(ulong kvaddr, char *pgbuf)
+{
+	ulonglong *entry;
+	ulonglong *up;
+	ulong mfn;
+
+        up = (ulonglong *)machdep->pgd;
+        entry = up + (kvaddr >> PGDIR_SHIFT);
+        mfn = (ulong)((*entry) >> PAGESHIFT());
+
+        if (!xc_core_mfn_to_page(mfn, pgbuf)) {
+                error(INFO, "cannot read/find pgd entry from cr3 page\n");
+                return NULL;
+        }
+
+        up = (ulonglong *)pgbuf;
+        entry = up + ((kvaddr >> PMD_SHIFT) & (PTRS_PER_PMD-1));
+        mfn = (ulong)((*entry) >> PAGESHIFT());
+
+        if (!xc_core_mfn_to_page(mfn, pgbuf)) {
+                error(INFO, "cannot read/find pmd entry from pgd\n");
+                return NULL;
+        }
+
+        up = (ulonglong *)pgbuf;
+        entry = up + ((kvaddr >> PAGESHIFT()) & (PTRS_PER_PTE-1));
+        mfn = (ulong)((*entry) >> PAGESHIFT());
+
+        if (!xc_core_mfn_to_page(mfn, pgbuf)) {
+                error(INFO, "cannot read/find page table page from pmd\n");
+                return NULL;
+        }
+
+	return pgbuf;
+}
+
+/*
+ *  Find the dumpfile page index associated with the kvaddr.
+ */
+static int 
+x86_xendump_page_index(ulong kvaddr)
+{
+	int idx;
+        ulong *entry;
+        ulong *up;
+        ulong mfn;
+
+	if (machdep->flags & PAE)
+		return x86_xendump_page_index_PAE(kvaddr);
+
+        up = (ulong *)machdep->pgd;
+        entry = up + (kvaddr >> PGDIR_SHIFT);
+        mfn = (*entry) >> PAGESHIFT();
+	if ((mfn != machdep->last_ptbl_read) && 
+            !xc_core_mfn_to_page(mfn, machdep->ptbl)) {
+                error(INFO, "cannot read/find pgd entry from cr3 page\n");
+		return MFN_NOT_FOUND;
+	}
+	machdep->last_ptbl_read = mfn;
+
+        up = (ulong *)machdep->ptbl;
+        entry = up + ((kvaddr>>12) & (PTRS_PER_PTE-1));
+        mfn = (*entry) >> PAGESHIFT();
+	if ((idx = xc_core_mfn_to_page_index(mfn)) == MFN_NOT_FOUND)
+                error(INFO, "cannot determine page index for %lx\n", 
+			kvaddr);
+
+	return idx;
+}
+
+static int 
+x86_xendump_page_index_PAE(ulong kvaddr)
+{
+	int idx;
+        ulonglong *entry;
+        ulonglong *up;
+        ulong mfn;
+
+        up = (ulonglong *)machdep->pgd;
+        entry = up + (kvaddr >> PGDIR_SHIFT);
+        mfn = (ulong)((*entry) >> PAGESHIFT());
+	if ((mfn != machdep->last_pmd_read) &&
+	    !xc_core_mfn_to_page(mfn, machdep->pmd)) {
+                error(INFO, "cannot read/find pgd entry from cr3 page\n");
+		return MFN_NOT_FOUND;
+	}
+	machdep->last_pmd_read = mfn;
+
+        up = (ulonglong *)machdep->pmd;
+        entry = up + ((kvaddr >> PMD_SHIFT) & (PTRS_PER_PMD-1));
+        mfn = (ulong)((*entry) >> PAGESHIFT());
+        if ((mfn != machdep->last_ptbl_read) &&
+	    !xc_core_mfn_to_page(mfn, machdep->ptbl)) {
+                error(INFO, "cannot read/find pmd entry from pgd\n");
+                return MFN_NOT_FOUND;
+        }
+	machdep->last_ptbl_read = mfn;
+
+        up = (ulonglong *)machdep->ptbl;
+        entry = up + ((kvaddr >> PAGESHIFT()) & (PTRS_PER_PTE-1));
+        mfn = (ulong)((*entry) >> PAGESHIFT());
+	if ((idx = xc_core_mfn_to_page_index(mfn)) == MFN_NOT_FOUND)
+                error(INFO, "cannot determine page index for %lx\n", 
+			kvaddr);
+
+	return idx;
+}
+
+/*
+ *  Pull the esp from the cpu_user_regs struct in the header
+ *  turn it into a task, and match it with the active_set.
+ *  Unfortunately, the registers in the vcpu_guest_context 
+ *  are not necessarily those of the panic task, so for now
+ *  let get_active_set_panic_task() get the right task.
+ */
+static ulong 
+x86_xendump_panic_task(struct xendump_data *xd)
+{
+	return NO_TASK;
+
+#ifdef TO_BE_REVISITED
+	int i;
+	ulong esp;
+	off_t offset;
+	ulong task;
+
+
+	if (INVALID_MEMBER(vcpu_guest_context_user_regs) ||
+	    INVALID_MEMBER(cpu_user_regs_esp))
+		return NO_TASK;
+
+        offset = (off_t)xd->xc_core.header.xch_ctxt_offset +
+                (off_t)OFFSET(vcpu_guest_context_user_regs) +
+		(off_t)OFFSET(cpu_user_regs_esp);
+
+        if (lseek(xd->xfd, offset, SEEK_SET) == -1)
+		return NO_TASK;
+
+        if (read(xd->xfd, &esp, sizeof(ulong)) != sizeof(ulong))
+		return NO_TASK;
+
+        if (IS_KVADDR(esp) && (task = stkptr_to_task(esp))) {
+
+                for (i = 0; i < NR_CPUS; i++) {
+                	if (task == tt->active_set[i]) {
+                        	if (CRASHDEBUG(0))
+                                	error(INFO,
+                            "x86_xendump_panic_task: esp: %lx -> task: %lx\n",
+                                        	esp, task);
+                        	return task;
+			}
+		}               
+
+               	error(WARNING,
+		    "x86_xendump_panic_task: esp: %lx -> task: %lx (not active)\n",
+			esp);
+        }
+
+	return NO_TASK;
+#endif
+}
+
+/*
+ *  Because of an off-by-one vcpu bug in early xc_domain_dumpcore()
+ *  instantiations, the registers in the vcpu_guest_context are not 
+ *  necessarily those of the panic task.  If not, the eip/esp will be
+ *  in stop_this_cpu, as a result of the IP interrupt in panic(),
+ *  but the trace is strange because it comes out of the hypervisor
+ *  at least if the vcpu had been idle.
+ */
+static void 
+x86_get_xendump_regs(struct xendump_data *xd, struct bt_info *bt, ulong *eip, ulong *esp)
+{
+	ulong task, xeip, xesp;
+	off_t offset;
+
+        if (INVALID_MEMBER(vcpu_guest_context_user_regs) ||
+            INVALID_MEMBER(cpu_user_regs_eip) ||
+            INVALID_MEMBER(cpu_user_regs_esp))
+                goto generic;
+
+        offset = (off_t)xd->xc_core.header.xch_ctxt_offset +
+                (off_t)OFFSET(vcpu_guest_context_user_regs) +
+                (off_t)OFFSET(cpu_user_regs_esp);
+        if (lseek(xd->xfd, offset, SEEK_SET) == -1)
+                goto generic;
+        if (read(xd->xfd, &xesp, sizeof(ulong)) != sizeof(ulong))
+                goto generic;
+
+        offset = (off_t)xd->xc_core.header.xch_ctxt_offset +
+                (off_t)OFFSET(vcpu_guest_context_user_regs) +
+                (off_t)OFFSET(cpu_user_regs_eip);
+        if (lseek(xd->xfd, offset, SEEK_SET) == -1)
+                goto generic;
+        if (read(xd->xfd, &xeip, sizeof(ulong)) != sizeof(ulong))
+                goto generic;
+
+        if (IS_KVADDR(xesp) && (task = stkptr_to_task(xesp)) &&
+	    (task == bt->task)) {
+		if (CRASHDEBUG(1))
+			fprintf(xd->ofp, 
+		"hooks from vcpu_guest_context: eip: %lx esp: %lx\n", xeip, xesp);
+		*eip = xeip;
+		*esp = xesp;
+		return;
+	}
+
+generic:
+	return machdep->get_stack_frame(bt, eip, esp);
+}
+
+/* for Xen Hypervisor analysis */
+
+static int
+x86_xenhyper_is_kvaddr(ulong addr)
+{
+	if (machdep->flags & PAE) {
+		return (addr >= HYPERVISOR_VIRT_START_PAE);
+	}
+	return (addr >= HYPERVISOR_VIRT_START);
+}
+
+static ulong
+x86_get_stackbase_hyper(ulong task)
+{
+	struct xen_hyper_vcpu_context *vcc;
+	int pcpu;
+	ulong init_tss;
+	ulong esp, base;
+	char *buf;
+
+	/* task means vcpu here */
+	vcc = xen_hyper_vcpu_to_vcpu_context(task);
+	if (!vcc)
+		error(FATAL, "invalid vcpu\n");
+
+	pcpu = vcc->processor;
+	if (!xen_hyper_test_pcpu_id(pcpu)) {
+		error(FATAL, "invalid pcpu number\n");
+	}
+	init_tss = symbol_value("init_tss");
+	buf = GETBUF(XEN_HYPER_SIZE(tss_struct));
+	init_tss += XEN_HYPER_SIZE(tss_struct) * pcpu;
+	if (!readmem(init_tss, KVADDR, buf,
+			XEN_HYPER_SIZE(tss_struct), "init_tss", RETURN_ON_ERROR)) {
+		error(FATAL, "cannot read init_tss.\n");
+	}
+	esp = ULONG(buf + XEN_HYPER_OFFSET(tss_struct_esp0));
+	FREEBUF(buf);
+	base = esp & (~(STACKSIZE() - 1));
+
+	return base;
+}
+
+static ulong
+x86_get_stacktop_hyper(ulong task)
+{
+	return x86_get_stackbase_hyper(task) + STACKSIZE();
+}
+
+static void
+x86_get_stack_frame_hyper(struct bt_info *bt, ulong *pcp, ulong *spp)
+{
+	struct xen_hyper_vcpu_context *vcc;
+	int pcpu;
+	ulong *regs;
+	ulong esp, eip;
+
+	/* task means vcpu here */
+	vcc = xen_hyper_vcpu_to_vcpu_context(bt->task);
+	if (!vcc)
+		error(FATAL, "invalid vcpu\n");
+
+	pcpu = vcc->processor;
+	if (!xen_hyper_test_pcpu_id(pcpu)) {
+		error(FATAL, "invalid pcpu number\n");
+	}
+
+	if (bt->flags & BT_TEXT_SYMBOLS_ALL) {
+		if (spp)
+			*spp = x86_get_stackbase_hyper(bt->task);
+		if (pcp)
+			*pcp = 0;
+		bt->flags &= ~BT_TEXT_SYMBOLS_ALL;
+		return;
+	}
+
+	regs = (ulong *)xen_hyper_id_to_dumpinfo_context(pcpu)->pr_reg_ptr;
+	esp = XEN_HYPER_X86_NOTE_ESP(regs);
+	eip = XEN_HYPER_X86_NOTE_EIP(regs);
+
+	if (spp) {
+		if (esp < x86_get_stackbase_hyper(bt->task) ||
+			esp >= x86_get_stacktop_hyper(bt->task))
+			*spp = x86_get_stackbase_hyper(bt->task);
+		else
+			*spp = esp;
+	}
+	if (pcp) {
+		if (is_kernel_text(eip))
+			*pcp = eip;
+		else
+			*pcp = 0;
+	}
+}
+
+static void
+x86_init_hyper(int when)
+{
+	switch (when)
+	{
+	case PRE_SYMTAB:
+		machdep->verify_symbol = x86_verify_symbol;
+                if (pc->flags & KERNEL_DEBUG_QUERY)
+                        return;
+                machdep->pagesize = memory_page_size();
+                machdep->pageshift = ffs(machdep->pagesize) - 1;
+                machdep->pageoffset = machdep->pagesize - 1;
+                machdep->pagemask = ~((ulonglong)machdep->pageoffset);
+		machdep->stacksize = machdep->pagesize * 4; /* ODA: magic num */
+        	if ((machdep->pgd = (char *)malloc(PAGESIZE())) == NULL)
+                	error(FATAL, "cannot malloc pgd space.");
+                if ((machdep->pmd = (char *)malloc(PAGESIZE())) == NULL)
+                        error(FATAL, "cannot malloc pmd space.");
+        	if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL)
+                	error(FATAL, "cannot malloc ptbl space.");
+		machdep->last_pgd_read = 0;
+		machdep->last_pmd_read = 0;
+		machdep->last_ptbl_read = 0;
+		machdep->machspec = &x86_machine_specific; /* some members used */
+		break;
+
+	case PRE_GDB:
+		if (symbol_exists("create_pae_xen_mappings") ||
+		    symbol_exists("idle_pg_table_l3")) {
+                	machdep->flags |= PAE;
+			PGDIR_SHIFT = PGDIR_SHIFT_3LEVEL;
+			PTRS_PER_PTE = PTRS_PER_PTE_3LEVEL;
+			PTRS_PER_PGD = PTRS_PER_PGD_3LEVEL;
+                        machdep->kvtop = x86_kvtop_PAE;
+			machdep->kvbase = HYPERVISOR_VIRT_START_PAE;
+		} else {
+			PGDIR_SHIFT = PGDIR_SHIFT_2LEVEL;
+                        PTRS_PER_PTE = PTRS_PER_PTE_2LEVEL;
+                        PTRS_PER_PGD = PTRS_PER_PGD_2LEVEL;
+                	machdep->kvtop = x86_kvtop;
+			free(machdep->pmd);
+			machdep->pmd = machdep->pgd;   
+			machdep->kvbase = HYPERVISOR_VIRT_START;
+		}
+		machdep->ptrs_per_pgd = PTRS_PER_PGD;
+		machdep->identity_map_base = DIRECTMAP_VIRT_START;
+                machdep->is_kvaddr = x86_xenhyper_is_kvaddr;
+	        machdep->eframe_search = x86_eframe_search;
+	        machdep->back_trace = x86_back_trace_cmd;
+	        machdep->processor_speed = x86_processor_speed;		/* ODA: check */
+		machdep->dump_irq = generic_dump_irq; 			/* ODA: check */
+		machdep->get_stack_frame = x86_get_stack_frame_hyper;
+		machdep->get_stackbase = x86_get_stackbase_hyper;
+		machdep->get_stacktop = x86_get_stacktop_hyper;
+		machdep->translate_pte = x86_translate_pte;
+		machdep->memory_size = xen_hyper_x86_memory_size;
+		machdep->dis_filter = x86_dis_filter;
+//		machdep->cmd_mach = x86_cmd_mach;			/* ODA: check */
+		machdep->get_smp_cpus = xen_hyper_x86_get_smp_cpus;
+//		machdep->line_number_hooks = x86_line_number_hooks;	/* ODA: check */
+		machdep->flags |= FRAMESIZE_DEBUG;			/* ODA: check */
+		machdep->value_to_symbol = generic_machdep_value_to_symbol;
+		machdep->clear_machdep_cache = x86_clear_machdep_cache;
+
+		/* machdep table for Xen Hypervisor */
+		xhmachdep->pcpu_init = xen_hyper_x86_pcpu_init;
+		break;
+
+	case POST_GDB:
+#if 0	/* ODA: need this ? */
+		if (x86_omit_frame_pointer()) {
+			machdep->flags |= OMIT_FRAME_PTR;
+#endif
+		XEN_HYPER_STRUCT_SIZE_INIT(cpu_time, "cpu_time");
+		XEN_HYPER_STRUCT_SIZE_INIT(cpuinfo_x86, "cpuinfo_x86");
+		XEN_HYPER_STRUCT_SIZE_INIT(tss_struct, "tss_struct");
+		XEN_HYPER_MEMBER_OFFSET_INIT(tss_struct_esp0, "tss_struct", "esp0");
+		XEN_HYPER_MEMBER_OFFSET_INIT(cpu_time_local_tsc_stamp, "cpu_time", "local_tsc_stamp");
+		XEN_HYPER_MEMBER_OFFSET_INIT(cpu_time_stime_local_stamp, "cpu_time", "stime_local_stamp");
+		XEN_HYPER_MEMBER_OFFSET_INIT(cpu_time_stime_master_stamp, "cpu_time", "stime_master_stamp");
+		XEN_HYPER_MEMBER_OFFSET_INIT(cpu_time_tsc_scale, "cpu_time", "tsc_scale");
+		XEN_HYPER_MEMBER_OFFSET_INIT(cpu_time_calibration_timer, "cpu_time", "calibration_timer");
+		if (symbol_exists("cpu_data")) {
+			xht->cpu_data_address = symbol_value("cpu_data");
+		}
+/* KAK Can this be calculated? */
+		if (!machdep->hz) {
+			machdep->hz = XEN_HYPER_HZ;
+		}
+		break;
+
+	case POST_INIT:
+		break;
+	}
+}
+
 #endif /* X86 */
--- crash/lkcd_x86_trace.h.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/lkcd_x86_trace.h	2007-12-19 16:36:56.000000000 -0500
@@ -35,6 +35,25 @@
 
 typedef uint32_t kaddr_t; 
 
+extern int INT_EFRAME_SS;
+extern int INT_EFRAME_ESP;
+extern int INT_EFRAME_EFLAGS;
+extern int INT_EFRAME_CS;
+extern int INT_EFRAME_EIP;
+extern int INT_EFRAME_ERR;
+extern int INT_EFRAME_ES;
+extern int INT_EFRAME_DS;
+extern int INT_EFRAME_EAX;
+extern int INT_EFRAME_EBP;
+extern int INT_EFRAME_EDI;
+extern int INT_EFRAME_ESI;
+extern int INT_EFRAME_EDX;
+extern int INT_EFRAME_ECX;
+extern int INT_EFRAME_EBX;
+extern int INT_EFRAME_GS;
+
+extern ulong int_eframe[];
+
 #endif  /* REDHAT */
 
 
--- crash/gdb_interface.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/gdb_interface.c	2008-10-24 14:07:45.000000000 -0400
@@ -1,8 +1,8 @@
 /* gdb_interface.c - core analysis suite
  *
  * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc.
- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson
- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 David Anderson
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Red Hat, Inc. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -31,9 +31,6 @@
 {
 	argc = 1;
 
-	if (CRASHDEBUG(1))
-		gdb_readnow_warning();
-
 	if (pc->flags & SILENT) {
 		if (pc->flags & READNOW)
 			argv[argc++] = "--readnow";
@@ -198,20 +195,28 @@
 retry:
 	BZERO(req->buf, BUFSIZE);
         req->command = GNU_GET_DATATYPE;
-        req->name = "task_struct";
+	req->name = XEN_HYPER_MODE() ? "page_info" : "task_struct";
         req->flags = GNU_RETURN_ON_ERROR;
         gdb_interface(req);
 
         if (req->flags & GNU_COMMAND_FAILED) {
+		if (XEN_HYPER_MODE())
+			no_debugging_data(WARNING);  /* just bail out */
+
 		if (!debug_data_pulled_in) {
 			if (CRASHDEBUG(1))
 				error(INFO, 
-         "gdb_session_init: pulling in debug data by accessing init_mm.mmap\n");
+           "gdb_session_init: pulling in debug data by accessing init_mm.mmap %s\n",
+					symbol_exists("sysfs_mount") ?
+					"and syfs_mount" : "");
 			debug_data_pulled_in = TRUE;
 			req->command = GNU_PASS_THROUGH;
 			req->flags = GNU_RETURN_ON_ERROR|GNU_NO_READMEM;
 			req->name = NULL;
-			sprintf(req->buf, "print init_mm.mmap");
+			if (symbol_exists("sysfs_mount"))
+				sprintf(req->buf, "print sysfs_mount, init_mm.mmap");
+			else
+				sprintf(req->buf, "print init_mm.mmap");
 			gdb_interface(req);
         		if (!(req->flags & GNU_COMMAND_FAILED)) 
 				goto retry;
@@ -237,11 +242,16 @@
 	sprintf(req->buf, "set height 0");
 	gdb_interface(req);
 
+	req->command = GNU_PASS_THROUGH;
+	req->name = NULL, req->flags = 0;
+	sprintf(req->buf, "set width 0");
+	gdb_interface(req);
+
        /*
         *  Patch gdb's symbol values with the correct values from either
         *  the System.map or non-debug vmlinux, whichever is in effect.
         */
-	if ((pc->flags & SYSMAP) || 
+	if ((pc->flags & SYSMAP) || (kt->flags & (RELOC_SET|RELOC_FORCE)) || 
 	    (pc->namelist_debug && !pc->debuginfo_file)) {
 		req->command = GNU_PATCH_SYMBOL_VALUES;
         	req->flags = GNU_RETURN_ON_ERROR;
@@ -364,6 +374,7 @@
         fprintf(fp, "    prettyprint_arrays: %d\n", prettyprint_arrays);
         fprintf(fp, "   prettyprint_structs: %d\n", prettyprint_structs);
         fprintf(fp, "repeat_count_threshold: %x\n", repeat_count_threshold);
+	fprintf(fp, "    stop_print_at_null: %d\n", stop_print_at_null);
 	fprintf(fp, "             print_max: %d\n", print_max);
         fprintf(fp, "          output_radix: %d\n", output_radix);
         fprintf(fp, "         output_format: ");
@@ -556,6 +567,14 @@
 
 	error_hook = NULL;
 
+	if (st->flags & ADD_SYMBOL_FILE) {
+		error(INFO, 
+		    "%s\n     gdb add-symbol-file command failed\n", 
+			st->current->mod_namelist);
+		delete_load_module(st->current->mod_base);
+                st->flags &= ~ADD_SYMBOL_FILE;
+	}
+
 	if (pc->cur_gdb_cmd) {
 		pc->last_gdb_cmd = pc->cur_gdb_cmd;
 		pc->cur_gdb_cmd = 0;
@@ -619,6 +638,7 @@
 	"clear", "disable", "enable", "condition", "ignore", "frame", 
 	"select-frame", "f", "up", "down", "catch", "tcatch", "return",
 	"file", "exec-file", "core-file", "symbol-file", "load", "si", "ni", 
+	"shell", 
 	NULL  /* must be last */
 };
 
@@ -628,7 +648,7 @@
 };
 
 #define RESTRICTED_GDB_COMMAND \
-        "restricted gdb command: %s\n%s\"%s\" may only be used in a .gdbinit file or in a command file.\n%sThe .gdbinit file is read automatically during %s initialization.\n%sOther user-defined command files may be read interactively during\n%s%s runtime by using the gdb \"source\" command."
+        "restricted gdb command: %s\n%s\"%s\" may only be used in a .gdbinit file or in a command file.\n%sThe .gdbinit file is read automatically during %s initialization.\n%sOther user-defined command files may be read interactively during\n%s%s runtime by using the gdb \"source\" command.\n"
 
 static int
 is_restricted_command(char *cmd, ulong flags)
@@ -668,6 +688,7 @@
 cmd_gdb(void)
 {
         int c;
+	char buf[BUFSIZE];
 
         while ((c = getopt(argcnt, args, "")) != EOF) {
                 switch(c)
@@ -694,14 +715,8 @@
 	 *  If the command is not restricted, pass it on.
 	 */
 	if (!is_restricted_command(args[1], FAULT_ON_ERROR)) {
-		if (pc->redirect & (REDIRECT_TO_PIPE|REDIRECT_TO_FILE))
-			pc->orig_line[pc->eoc_index] = NULLCHAR;
-	
-		if (STRNEQ(pc->orig_line, "gdb") && 
-	            whitespace(pc->orig_line[3]))
-			shift_string_left(pc->orig_line, strlen("gdb")+1); 
-	
-		gdb_pass_through(clean_line(pc->orig_line), NULL, 0);
+		concat_args(buf, 1, 0);
+		gdb_pass_through(buf, NULL, 0);
 	}
 }
 
@@ -722,8 +737,10 @@
 	if (pc->cur_req->flags & GNU_NO_READMEM)
 		return TRUE;
 
-	if (UNIQUE_COMMAND("dis"))
+	if (pc->curcmd_flags & MEMTYPE_UVADDR)
 		memtype = UVADDR;
+	else if (pc->curcmd_flags & MEMTYPE_FILEADDR)
+		memtype = FILEADDR;
 	else if (!IS_KVADDR(addr)) {
 		if (STREQ(pc->curcmd, "gdb") && 
 		    STRNEQ(pc->cur_req->buf, "x/")) {
@@ -740,12 +757,11 @@
 	if (CRASHDEBUG(1))
 		console("gdb_readmem_callback[%d]: %lx %d\n", 
 			memtype, addr, len);
-	
-#ifdef OLDWAY
-	return(readmem(addr, KVADDR, buf, len, 
-		"gdb_readmem_callback", RETURN_ON_ERROR));
-#endif
 
+	if (memtype == FILEADDR)
+		return(readmem(pc->curcmd_private, memtype, buf, len,
+                	"gdb_readmem_callback", RETURN_ON_ERROR));
+	
 	switch (len)
 	{
 	case SIZEOF_8BIT:
--- crash/README.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/README	2009-02-12 09:31:02.000000000 -0500
@@ -69,7 +69,7 @@
      After the kernel is re-compiled, the uncompressed "vmlinux" kernel
      that is created in the top-level kernel build directory must be saved.
 
-  To build this utility, simply uncompress the tar file, enter the crash-4.0
+  To build this utility, simply uncompress the tar file, enter the crash-4.0-7.7p1
   subdirectory, and type "make".  The initial build will take several minutes 
   because the gdb module must be configured and and built.  Alternatively, the
   crash source RPM file may be installed and built, and the resultant crash
@@ -89,11 +89,14 @@
 
     $ crash
     
-    crash 4.0
-    Copyright (C) 2002, 2003, 2004, 2005  Red Hat, Inc.
-    Copyright (C) 2004, 2005  IBM Corporation
-    Copyright (C) 1999-2005  Hewlett-Packard Co
-    Copyright (C) 1999, 2002  Silicon Graphics, Inc.
+    crash 4.0-7.7p1
+    Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009  Red Hat, Inc.
+    Copyright (C) 2004, 2005, 2006  IBM Corporation
+    Copyright (C) 1999-2006  Hewlett-Packard Co
+    Copyright (C) 2005, 2006  Fujitsu Limited
+    Copyright (C) 2006, 2007  VA Linux Systems Japan K.K.
+    Copyright (C) 2005  NEC Corporation
+    Copyright (C) 1999, 2002, 2007  Silicon Graphics, Inc.
     Copyright (C) 1999, 2000, 2001, 2002  Mission Critical Linux, Inc.
     This program is free software, covered by the GNU General Public License,
     and you are welcome to change it and/or distribute copies of it under
@@ -111,7 +114,7 @@
           KERNEL: /boot/vmlinux
         DUMPFILE: /dev/mem
             CPUS: 1
-            DATE: Wed Jul 13 13:26:00 2005
+            DATE: Thu Feb 12 09:31:02 2009
           UPTIME: 10 days, 22:55:18
     LOAD AVERAGE: 0.08, 0.03, 0.01
            TASKS: 42
@@ -139,7 +142,7 @@
     exit           log            rd             task           
     extend         mach           repeat         timer          
     
-    crash version: 4.0      gdb version: 6.1
+    crash version: 4.0-7.7p1   gdb version: 6.1
     For help on any command above, enter "help <command>".
     For help on input options, enter "help input".
     For help on output options, enter "help output".
@@ -152,11 +155,14 @@
 
     $ crash vmlinux vmcore
  
-    crash 4.0
-    Copyright (C) 2002, 2003, 2004, 2005  Red Hat, Inc.
-    Copyright (C) 2004, 2005  IBM Corporation
-    Copyright (C) 1999-2005  Hewlett-Packard Co
-    Copyright (C) 1999, 2002  Silicon Graphics, Inc.
+    crash 4.0-7.7p1
+    Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009  Red Hat, Inc.
+    Copyright (C) 2004, 2005, 2006  IBM Corporation
+    Copyright (C) 1999-2006  Hewlett-Packard Co
+    Copyright (C) 2005, 2006  Fujitsu Limited
+    Copyright (C) 2006, 2007  VA Linux Systems Japan K.K.
+    Copyright (C) 2005  NEC Corporation
+    Copyright (C) 1999, 2002, 2007  Silicon Graphics, Inc.
     Copyright (C) 1999, 2000, 2001, 2002  Mission Critical Linux, Inc.
     This program is free software, covered by the GNU General Public License,
     and you are welcome to change it and/or distribute copies of it under
@@ -196,11 +202,14 @@
 
     $ crash vmlinux.17 lcore.cr.17
 
-    crash 4.0
-    Copyright (C) 2002, 2003, 2004, 2005  Red Hat, Inc.
-    Copyright (C) 2004, 2005  IBM Corporation
-    Copyright (C) 1999-2005  Hewlett-Packard Co
-    Copyright (C) 1999, 2002  Silicon Graphics, Inc.
+    crash 4.0-7.7p1
+    Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009  Red Hat, Inc.
+    Copyright (C) 2004, 2005, 2006  IBM Corporation
+    Copyright (C) 1999-2006  Hewlett-Packard Co
+    Copyright (C) 2005, 2006  Fujitsu Limited
+    Copyright (C) 2006, 2007  VA Linux Systems Japan K.K.
+    Copyright (C) 2005  NEC Corporation
+    Copyright (C) 1999, 2002, 2007  Silicon Graphics, Inc.
     Copyright (C) 1999, 2000, 2001, 2002  Mission Critical Linux, Inc.
     This program is free software, covered by the GNU General Public License,
     and you are welcome to change it and/or distribute copies of it under
--- crash/diskdump.h.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/diskdump.h	2007-04-10 10:24:02.000000000 -0400
@@ -1,8 +1,10 @@
 /* 
  * diskdump.h
  *
- * Copyright (C) 2004, 2005 David Anderson
- * Copyright (C) 2004, 2005 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2004, 2005, 2006  David Anderson
+ * Copyright (C) 2004, 2005, 2006  Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2005  FUJITSU LIMITED
+ * Copyright (C) 2005  NEC Corporation
  *
  * This software may be freely redistributed under the terms of the
  * GNU General Public License.
@@ -10,7 +12,65 @@
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * Author: David Anderson
  */
 
+#include <elf.h>
+
+#define divideup(x, y)	(((x) + ((y) - 1)) / (y))
+#define round(x, y)	(((x) / (y)) * (y))
+
+#define DUMP_PARTITION_SIGNATURE	"diskdump"
+#define SIG_LEN (sizeof(DUMP_PARTITION_SIGNATURE) - 1)
+#define DISK_DUMP_SIGNATURE		"DISKDUMP"
+#define KDUMP_SIGNATURE			"KDUMP   "
+
+#define DUMP_HEADER_COMPLETED	0
+#define DUMP_HEADER_INCOMPLETED 1
+#define DUMP_HEADER_COMPRESSED  8
+
+struct disk_dump_header {
+	char			signature[SIG_LEN];	/* = "DISKDUMP" */
+	int			header_version; /* Dump header version */
+	struct new_utsname	utsname;	/* copy of system_utsname */
+	struct timeval		timestamp;	/* Time stamp */
+	unsigned int		status; 	/* Above flags */
+	int			block_size;	/* Size of a block in byte */
+	int			sub_hdr_size;	/* Size of arch dependent
+						   header in blocks */
+	unsigned int		bitmap_blocks;	/* Size of Memory bitmap in
+						   block */
+	unsigned int		max_mapnr;	/* = max_mapnr */
+	unsigned int		total_ram_blocks;/* Number of blocks should be
+						   written */
+	unsigned int		device_blocks;	/* Number of total blocks in
+						 * the dump device */
+	unsigned int		written_blocks; /* Number of written blocks */
+	unsigned int		current_cpu;	/* CPU# which handles dump */
+	int			nr_cpus;	/* Number of CPUs */
+	struct task_struct	*tasks[0];
+};
+
+struct disk_dump_sub_header {
+	long		elf_regs;
+};
+
+struct kdump_sub_header {
+	unsigned long	phys_base;
+	int		dump_level;  /* header_version 1 and later */
+};
+
+/* page flags */
+#define DUMP_DH_COMPRESSED	0x1	/* page is compressed               */
+
+/* descriptor of each page for vmcore */
+typedef struct page_desc {
+	off_t			offset;		/* the offset of the page data*/
+	unsigned int		size;		/* the size of this dump page */
+	unsigned int		flags;		/* flags */
+	unsigned long long	page_flags;	/* page flags */
+} page_desc_t;
+
+#define DISKDUMP_CACHED_PAGES	(16)
+#define PAGE_VALID		(0x1)	/* flags */
+#define DISKDUMP_VALID_PAGE(flags)	((flags) & PAGE_VALID)
+
--- crash/s390x.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/s390x.c	2009-01-15 14:07:23.000000000 -0500
@@ -1,9 +1,9 @@
 /* s390.c - core analysis suite
  *
  * Copyright (C) 2001, 2002 Mission Critical Linux, Inc.
- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson
- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved.
- * Copyright (C) 2005 Michael Holzheu, IBM Corporation
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006 David Anderson
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2005, 2006 Michael Holzheu, IBM Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -20,24 +20,6 @@
 
 #define S390X_WORD_SIZE   8
 
-#define S390X_PAGE_SHIFT  12
-#define S390X_PAGE_SIZE   (1ULL << S390X_PAGE_SHIFT)
-#define S390X_PAGE_MASK   (~(S390X_PAGE_SIZE-1))
-
-#define S390X_PGDIR_SHIFT 31
-#define S390X_PGDIR_SIZE  (1ULL << S390X_PGDIR_SHIFT)
-#define S390X_PGDIR_MASK  (~(S390X_PGDIR_SIZE-1))
-
-#define S390X_PMD_SHIFT   20
-#define S390X_PMD_SIZE    (1ULL << S390X_PMD_SHIFT)
-#define S390X_PMD_MASK    (~(S390X_PMD_SIZE-1))
-
-#define S390X_PTRS_PER_PGD       2048
-#define S390X_PTRS_PER_PMD       2048
-#define S390X_PTRS_PER_PTE       256
-
-#define S390X_PMD_BASE_MASK      (~((1ULL<<12)-1))
-#define S390X_PT_BASE_MASK       (~((1ULL<<11)-1))
 #define S390X_PAGE_BASE_MASK     (~((1ULL<<12)-1))
 
 /* Flags used in entries of page dirs and page tables.
@@ -48,37 +30,11 @@
 #define S390X_PAGE_INVALID      0x400ULL /* HW invalid */
 #define S390X_PAGE_INVALID_MASK 0x601ULL /* for linux 2.6 */
 #define S390X_PAGE_INVALID_NONE 0x401ULL /* for linux 2.6 */
-#define S390X_PMD_ENTRY_INV     0x20ULL  /* invalid segment table entry      */
-#define S390X_PGD_ENTRY_INV     0x20ULL  /* invalid region table entry       */
-#define S390X_PMD_ENTRY         0x00
-#define S390X_PGD_ENTRY_FIRST   0x05     /* first part of pmd is valid */
-#define S390X_PGD_ENTRY_SECOND  0xc7     /* second part of pmd is valid */
-#define S390X_PGD_ENTRY_FULL    0x07     /* complete pmd is valid */
 
 /* bits 52, 55 must contain zeroes in a pte */
 #define S390X_PTE_INVALID_MASK  0x900ULL
 #define S390X_PTE_INVALID(x) ((x) & S390X_PTE_INVALID_MASK)
 
-/* pgd/pmd/pte query macros */
-#define s390x_pgd_none(x) ((x) & S390X_PGD_ENTRY_INV)
-#define s390x_pgd_bad(x) !( (((x) & S390X_PGD_ENTRY_FIRST) == \
-                                    S390X_PGD_ENTRY_FIRST) || \
-                                    (((x) & S390X_PGD_ENTRY_SECOND) == \
-                                    S390X_PGD_ENTRY_SECOND) || \
-                                    (((x) & S390X_PGD_ENTRY_FULL) == \
-                                    S390X_PGD_ENTRY_FULL))
-
-#define s390x_pmd_none(x) ((x) & S390X_PMD_ENTRY_INV)
-#define s390x_pmd_bad(x) (((x) & (~S390X_PT_BASE_MASK & \
-                                  ~S390X_PMD_ENTRY_INV)) != \
-                                  S390X_PMD_ENTRY)
-
-#define s390x_pte_none(x) (((x) & (S390X_PAGE_INVALID | \
-                                   S390X_PAGE_RO | \
-                                   S390X_PAGE_PRESENT)) == \
-                                   S390X_PAGE_INVALID)
-
-
 #define ASYNC_STACK_SIZE  STACKSIZE() // can be 8192 or 16384
 #define KERNEL_STACK_SIZE STACKSIZE() // can be 8192 or 16384
 
@@ -88,9 +44,6 @@
  * declarations of static functions
  */
 static void s390x_print_lowcore(char*, struct bt_info*,int);
-static unsigned long s390x_pgd_offset(unsigned long, unsigned long);
-static unsigned long s390x_pmd_offset(unsigned long, unsigned long);
-static unsigned long s390x_pte_offset(unsigned long, unsigned long);
 static int s390x_kvtop(struct task_context *, ulong, physaddr_t *, int);
 static int s390x_uvtop(struct task_context *, ulong, physaddr_t *, int);
 static int s390x_vtop(unsigned long, ulong, physaddr_t*, int);
@@ -173,7 +126,10 @@
 		machdep->nr_irqs = 0;  /* TBD */
 		machdep->vmalloc_start = s390x_vmalloc_start;
 		machdep->dump_irq = s390x_dump_irq;
-		machdep->hz = HZ;
+		if (!machdep->hz)
+			machdep->hz = HZ;
+		machdep->section_size_bits = _SECTION_SIZE_BITS;
+		machdep->max_physmem_bits = _MAX_PHYSMEM_BITS;
 		break;
 
 	case POST_INIT:
@@ -193,8 +149,6 @@
 	fprintf(fp, "              flags: %lx (", machdep->flags);
 	if (machdep->flags & KSYMS_START)
 		fprintf(fp, "%sKSYMS_START", others++ ? "|" : "");
-	if (machdep->flags & SYSRQ)
-		fprintf(fp, "%sSYSRQ", others++ ? "|" : "");
 	fprintf(fp, ")\n");
 
 	fprintf(fp, "             kvbase: %lx\n", machdep->kvbase);
@@ -207,7 +161,8 @@
 	fprintf(fp, "                 hz: %d\n", machdep->hz);
 	fprintf(fp, "                mhz: %ld\n", machdep->mhz);
 	fprintf(fp, "            memsize: %lld (0x%llx)\n", 
-		machdep->memsize, machdep->memsize);
+		(unsigned long long)machdep->memsize,
+		(unsigned long long)machdep->memsize);
 	fprintf(fp, "               bits: %d\n", machdep->bits);
 	fprintf(fp, "            nr_irqs: %d\n", machdep->nr_irqs);
 	fprintf(fp, "      eframe_search: s390x_eframe_search()\n");
@@ -241,23 +196,12 @@
 	fprintf(fp, "                pmd: %lx\n", (ulong)machdep->pmd);
 	fprintf(fp, "               ptbl: %lx\n", (ulong)machdep->ptbl);
 	fprintf(fp, "       ptrs_per_pgd: %d\n", machdep->ptrs_per_pgd);
+	fprintf(fp, "   max_physmem_bits: %ld\n", machdep->max_physmem_bits);
+	fprintf(fp, "  section_size_bits: %ld\n", machdep->section_size_bits);
 	fprintf(fp, "           machspec: %lx\n", (ulong)machdep->machspec);
 }
 
 /*
- * Check if address is in the vmalloc area
- */
-int
-s390x_IS_VMALLOC_ADDR(ulong addr)
-{
-	static unsigned long high_memory = 0;
-	if(!high_memory){
-		high_memory = s390x_vmalloc_start();
-	}
-	return (addr > high_memory);
-}
-
-/*
  * Check if address is in context's address space
  */
 static int 
@@ -308,7 +252,7 @@
 /*
  * Check if page is mapped
  */
-inline int s390x_pte_present(unsigned long x){
+static inline int s390x_pte_present(unsigned long x){
 	if(THIS_KERNEL_VERSION >= LINUX(2,6,0)){
 		return !((x) & S390X_PAGE_INVALID) ||
 			((x) & S390X_PAGE_INVALID_MASK) == S390X_PAGE_INVALID_NONE;
@@ -317,81 +261,102 @@
 	}
 }
 
-/* 
+/*
  * page table traversal functions 
  */
-unsigned long s390x_pgd_offset(unsigned long pgd_base, unsigned long vaddr)
-{
-	unsigned long pgd_off, pmd_base;
-
-	pgd_off = ((vaddr >> S390X_PGDIR_SHIFT) &
-		   (S390X_PTRS_PER_PGD - 1)) * 8;
-	readmem(pgd_base + pgd_off, PHYSADDR, &pmd_base, sizeof(long),
-		"pmd_base",FAULT_ON_ERROR);
-
-	return pmd_base;
-}
 
-unsigned long s390x_pmd_offset(unsigned long pmd_base, unsigned long vaddr)
-{
-	unsigned long pmd_off, pte_base;
-
-	pmd_off = ((vaddr >> S390X_PMD_SHIFT) & (S390X_PTRS_PER_PMD - 1))
-		* 8;
-	readmem(pmd_base + pmd_off, PHYSADDR, &pte_base, sizeof(long),
-		"pte_base",FAULT_ON_ERROR);
-	return pte_base;
-}
-
-unsigned long s390x_pte_offset(unsigned long pte_base, unsigned long vaddr)
-{
-	unsigned long pte_off, pte_val;
-
-	pte_off = ((vaddr >> S390X_PAGE_SHIFT) & (S390X_PTRS_PER_PTE - 1))
-		* 8;
-	readmem(pte_base + pte_off, PHYSADDR, &pte_val, sizeof(long),
-		"pte_val",FAULT_ON_ERROR);
-	return pte_val;
+/* Region or segment table traversal function */
+static ulong _kl_rsg_table_deref_s390x(ulong vaddr, ulong table,
+					 int len, int level)
+{
+	ulong offset, entry;
+
+	offset = ((vaddr >> (11*level + 20)) & 0x7ffULL) * 8;
+	if (offset >= (len + 1)*4096)
+		/* Offset is over the table limit. */
+		return 0;
+	readmem(table + offset, KVADDR, &entry, sizeof(entry), "entry",
+		FAULT_ON_ERROR);
+	/*
+	 * Check if the segment table entry could be read and doesn't have
+	 * any of the reserved bits set.
+	 */
+	if ((entry & 0xcULL) != (level << 2))
+		return 0;
+	/* Check if the region table entry has the invalid bit set. */
+	if (entry & 0x40ULL)
+		return 0;
+	/* Region table entry is valid and well formed. */
+	return entry;
 }
 
-/*
- * Generic vtop function for user and kernel addresses
- */
-static int
-s390x_vtop(unsigned long pgd_base, ulong kvaddr, physaddr_t *paddr, int verbose)
+/* Page table traversal function */
+static ulong _kl_pg_table_deref_s390x(ulong vaddr, ulong table)
 {
-	unsigned long pmd_base, pte_base, pte_val;
+	ulong offset, entry;
 
-	/* get the pgd entry */
-	pmd_base = s390x_pgd_offset(pgd_base,kvaddr);
-	if(s390x_pgd_bad(pmd_base) ||
-	   s390x_pgd_none(pmd_base)){
-		*paddr = 0;
-		return FALSE;
-	}
-	/* get the pmd */
-	pmd_base = pmd_base & S390X_PMD_BASE_MASK;
-	pte_base = s390x_pmd_offset(pmd_base,kvaddr);
-	if(s390x_pmd_bad(pte_base) ||
-	   s390x_pmd_none(pte_base)) {
-		*paddr = 0;
-		return FALSE;
-	}
-	/* get the pte */
-	pte_base = pte_base & S390X_PT_BASE_MASK;
-	pte_val = s390x_pte_offset(pte_base,kvaddr);
-	if (S390X_PTE_INVALID(pte_val) ||
-	    s390x_pte_none(pte_val)){
-		*paddr = 0;
+	offset = ((vaddr >> 12) & 0xffULL) * 8;
+	readmem(table + offset, KVADDR, &entry, sizeof(entry), "entry",
+		FAULT_ON_ERROR);
+	/*
+	 * Check if the page table entry could be read and doesn't have
+	 * any of the reserved bits set.
+	 */
+	if (entry & 0x900ULL)
+		return 0;
+	/* Check if the page table entry has the invalid bit set. */
+	if (entry & 0x400ULL)
+		return 0;
+	/* Page table entry is valid and well formed. */
+	return entry;
+}
+
+/* lookup virtual address in page tables */
+int s390x_vtop(ulong table, ulong vaddr, physaddr_t *phys_addr, int verbose)
+{
+	ulong entry, paddr;
+	int level, len;
+
+	/*
+	 * Walk the region and segment tables.
+	 * We assume that the table length field in the asce is set to the
+	 * maximum value of 3 (which translates to a region first, region
+	 * second, region third or segment table with 2048 entries) and that
+	 * the addressing mode is 64 bit.
+	 */
+	len = 3;
+	/* Read the first entry to find the number of page table levels. */
+	readmem(table, KVADDR, &entry, sizeof(entry), "entry", FAULT_ON_ERROR);
+	level = (entry & 0xcULL) >> 2;
+	if ((vaddr >> (31 + 11*level)) != 0ULL) {
+		/* Address too big for the number of page table levels. */
 		return FALSE;
 	}
-	if(!s390x_pte_present(pte_val)){
-		/* swapped out */ 
-		*paddr = pte_val;
+	while (level >= 0) {
+		entry = _kl_rsg_table_deref_s390x(vaddr, table, len, level);
+		if (!entry)
+			return 0;
+		table = entry & ~0xfffULL;
+		len = entry & 0x3ULL;
+		level--;
+	}
+
+	/* Check if this is a large page. */
+	if (entry & 0x400ULL)
+		/* Add the 1MB page offset and return the final value. */
+		return table + (vaddr & 0xfffffULL);
+
+	/* Get the page table entry */
+	entry = _kl_pg_table_deref_s390x(vaddr, entry & ~0x7ffULL);
+	if (!entry)
 		return FALSE;
-	}
-	*paddr = (pte_val & S390X_PAGE_BASE_MASK) |
-		(kvaddr & (~(S390X_PAGE_MASK)));
+
+	/* Isolate the page origin from the page table entry. */
+	paddr = entry & ~0xfffULL;
+
+	/* Add the page offset and return the final value. */
+	*phys_addr = paddr + (vaddr & 0xfffULL);
+
 	return TRUE;
 }
 
@@ -453,6 +418,10 @@
 	if (STREQ(name, "Letext") || STREQ(name, "gcc2_compiled."))
 		return FALSE;
 
+        /* reject L2^B symbols */
+	if (strstr(name, "L2\002") == name)
+	    	return FALSE;
+
 	/* throw away all symbols containing a '.' */
 	for(i = 0; i < strlen(name);i++){
 		if(name[i] == '.')
@@ -514,7 +483,7 @@
 		return FALSE;
 	}
 	fprintf(fp,"PTE      PHYSICAL  FLAGS\n");
-	fprintf(fp,"%08x %08x",pte, pte & S390X_PAGE_BASE_MASK);
+	fprintf(fp,"%08lx %08llx",pte, pte & S390X_PAGE_BASE_MASK);
 	fprintf(fp,"  (");
 	if(pte & S390X_PAGE_INVALID)
 		fprintf(fp,"INVALID ");
@@ -541,7 +510,7 @@
 /*
  * returns cpu number of task
  */ 
-int 
+static int 
 s390x_cpu_of_task(unsigned long task)
 {
 	unsigned int cpu;
@@ -583,12 +552,13 @@
 			return FALSE;
 	} else {
 		/* Linux 2.6 */
-		unsigned long runqueue_addr, runqueue_offset, per_cpu_offset;
+		unsigned long runqueue_addr, runqueue_offset;
 		unsigned long cpu_offset, per_cpu_offset_addr, running_task;
-		char runqueue[4096];
+		char *runqueue;
 		int cpu;
 
 		cpu = s390x_cpu_of_task(task);
+		runqueue = GETBUF(SIZE(runqueue));
 
 		runqueue_offset=symbol_value("per_cpu__runqueues");
 		per_cpu_offset_addr=symbol_value("__per_cpu_offset");
@@ -596,10 +566,10 @@
 			&cpu_offset, sizeof(long),"per_cpu_offset",
 			FAULT_ON_ERROR);
 		runqueue_addr=runqueue_offset + cpu_offset;
-		readmem(runqueue_addr,KVADDR,&runqueue,sizeof(runqueue),
+		readmem(runqueue_addr,KVADDR,runqueue,SIZE(runqueue),
 			"runqueue", FAULT_ON_ERROR);
-		running_task = *((unsigned long*)&runqueue[MEMBER_OFFSET(
-				"runqueue", "curr")]);
+		running_task = ULONG(runqueue + OFFSET(runqueue_curr));
+		FREEBUF(runqueue);
 		if(running_task == task)
 			return TRUE; 
 		else
@@ -733,7 +703,7 @@
 		} else if(skip_first_frame){
 			skip_first_frame=0;
 		} else {
-			fprintf(fp," #%i [%08x] ",i,backchain);
+			fprintf(fp," #%i [%08lx] ",i,backchain);
 			fprintf(fp,"%s at %x\n", closest_symbol(r14), r14);
 			if (bt->flags & BT_LINE_NUMBERS)
 				s390x_dump_line_number(r14);
@@ -743,22 +713,25 @@
 		backchain = ULONG(&stack[backchain - stack_base + bc_offset]);
 
 		/* print stack content if -f is specified */
-		if((bt->flags & BT_FULL) && !BT_REFERENCE_CHECK(bt)){
+		if ((bt->flags & BT_FULL) && !BT_REFERENCE_CHECK(bt)) {
 			int frame_size;
-			if(backchain == 0){
+			if (backchain == 0) {
 				frame_size = stack_base - old_backchain 
 					     + KERNEL_STACK_SIZE;
 			} else {
-				frame_size = backchain - old_backchain;
+				frame_size = MIN((backchain - old_backchain),
+					(stack_base - old_backchain +
+					KERNEL_STACK_SIZE));
 			}
-			for(j=0; j< frame_size; j+=4){
+			for (j = 0; j < frame_size; j += 8) {
 				if(j % 16 == 0){
-					fprintf(fp,"\n%08x: ",old_backchain+j);
+					fprintf(fp, "%s    %016lx: ", 
+                                            j ? "\n" : "", old_backchain + j);
 				}
-				fprintf(fp," %08x",ULONG(&stack[old_backchain -
-							 stack_base + j]));
+				fprintf(fp," %016lx",
+                                    ULONG(&stack[old_backchain - stack_base + j]));
 			}
-			fprintf(fp,"\n\n");
+			fprintf(fp, "\n");
 		}
 
 		/* Check for interrupt stackframe */
@@ -804,26 +777,26 @@
 		return;
 	}
 	fprintf(fp," LOWCORE INFO:\n");
-	fprintf(fp,"  -psw      : %#018x %#018x\n", tmp[0], tmp[1]);
+	fprintf(fp,"  -psw      : %#018lx %#018lx\n", tmp[0], tmp[1]);
 	if(show_symbols){
-		fprintf(fp,"  -function : %s at %x\n", 
+		fprintf(fp,"  -function : %s at %lx\n", 
 			closest_symbol(tmp[1]), tmp[1]);
 		if (bt->flags & BT_LINE_NUMBERS)
 			s390x_dump_line_number(tmp[1]);
 	}
 	ptr = lc + MEMBER_OFFSET("_lowcore","prefixreg_save_area");
 	tmp[0] = UINT(ptr);
-	fprintf(fp,"  -prefix   : %#010x\n", tmp[0]);
+	fprintf(fp,"  -prefix   : %#010lx\n", tmp[0]);
 	
 	ptr = lc + MEMBER_OFFSET("_lowcore","cpu_timer_save_area");
 	tmp[0]=UINT(ptr);
 	tmp[1]=UINT(ptr + S390X_WORD_SIZE);
-	fprintf(fp,"  -cpu timer: %#010x %#010x\n", tmp[0],tmp[1]);
+	fprintf(fp,"  -cpu timer: %#010lx %#010lx\n", tmp[0],tmp[1]);
 
 	ptr = lc + MEMBER_OFFSET("_lowcore","clock_comp_save_area");
 	tmp[0]=UINT(ptr);
 	tmp[1]=UINT(ptr + S390X_WORD_SIZE);
-	fprintf(fp,"  -clock cmp: %#010x %#010x\n", tmp[0], tmp[1]);
+	fprintf(fp,"  -clock cmp: %#010lx %#010lx\n", tmp[0], tmp[1]);
 
 	fprintf(fp,"  -general registers:\n");
 	ptr = lc + MEMBER_OFFSET("_lowcore","gpregs_save_area");
@@ -831,26 +804,26 @@
 	tmp[1]=ULONG(ptr + S390X_WORD_SIZE);
 	tmp[2]=ULONG(ptr + 2 * S390X_WORD_SIZE);
 	tmp[3]=ULONG(ptr + 3 * S390X_WORD_SIZE);
-	fprintf(fp,"     %#018x %#018x\n", tmp[0],tmp[1]);
-	fprintf(fp,"     %#018x %#018x\n", tmp[2],tmp[3]);
+	fprintf(fp,"     %#018lx %#018lx\n", tmp[0],tmp[1]);
+	fprintf(fp,"     %#018lx %#018lx\n", tmp[2],tmp[3]);
 	tmp[0]=ULONG(ptr + 4 * S390X_WORD_SIZE);
 	tmp[1]=ULONG(ptr + 5 * S390X_WORD_SIZE);
 	tmp[2]=ULONG(ptr + 6 * S390X_WORD_SIZE);
 	tmp[3]=ULONG(ptr + 7 * S390X_WORD_SIZE);
-	fprintf(fp,"     %#018x %#018x\n", tmp[0],tmp[1]);
-	fprintf(fp,"     %#018x %#018x\n", tmp[2],tmp[3]);
+	fprintf(fp,"     %#018lx %#018lx\n", tmp[0],tmp[1]);
+	fprintf(fp,"     %#018lx %#018lx\n", tmp[2],tmp[3]);
 	tmp[0]=ULONG(ptr + 8 * S390X_WORD_SIZE);
 	tmp[1]=ULONG(ptr + 9 * S390X_WORD_SIZE);
 	tmp[2]=ULONG(ptr + 10* S390X_WORD_SIZE);
 	tmp[3]=ULONG(ptr + 11* S390X_WORD_SIZE);
-	fprintf(fp,"     %#018x %#018x\n", tmp[0],tmp[1]);
-	fprintf(fp,"     %#018x %#018x\n", tmp[2],tmp[3]);
+	fprintf(fp,"     %#018lx %#018lx\n", tmp[0],tmp[1]);
+	fprintf(fp,"     %#018lx %#018lx\n", tmp[2],tmp[3]);
 	tmp[0]=ULONG(ptr + 12* S390X_WORD_SIZE);
 	tmp[1]=ULONG(ptr + 13* S390X_WORD_SIZE);
 	tmp[2]=ULONG(ptr + 14* S390X_WORD_SIZE);
 	tmp[3]=ULONG(ptr + 15* S390X_WORD_SIZE);
-	fprintf(fp,"     %#018x %#018x\n", tmp[0],tmp[1]);
-	fprintf(fp,"     %#018x %#018x\n", tmp[2],tmp[3]);
+	fprintf(fp,"     %#018lx %#018lx\n", tmp[0],tmp[1]);
+	fprintf(fp,"     %#018lx %#018lx\n", tmp[2],tmp[3]);
 
 	fprintf(fp,"  -access registers:\n");
 	ptr = lc + MEMBER_OFFSET("_lowcore","access_regs_save_area");
@@ -858,25 +831,25 @@
 	tmp[1]=ULONG(ptr + 4);
 	tmp[2]=ULONG(ptr + 2 * 4);
 	tmp[3]=ULONG(ptr + 3 * 4);
-	fprintf(fp,"     %#010x %#010x %#010x %#010x\n", 
+	fprintf(fp,"     %#010lx %#010lx %#010lx %#010lx\n", 
 		tmp[0], tmp[1], tmp[2], tmp[3]);
 	tmp[0]=ULONG(ptr + 4 * 4);
 	tmp[1]=ULONG(ptr + 5 * 4);
 	tmp[2]=ULONG(ptr + 6 * 4);
 	tmp[3]=ULONG(ptr + 7 * 4);
-	fprintf(fp,"     %#010x %#010x %#010x %#010x\n", 
+	fprintf(fp,"     %#010lx %#010lx %#010lx %#010lx\n", 
 		tmp[0], tmp[1], tmp[2], tmp[3]);
 	tmp[0]=ULONG(ptr + 8 * 4);
 	tmp[1]=ULONG(ptr + 9 * 4);
 	tmp[2]=ULONG(ptr + 10* 4);
 	tmp[3]=ULONG(ptr + 11* 4);
-	fprintf(fp,"     %#010x %#010x %#010x %#010x\n", 
+	fprintf(fp,"     %#010lx %#010lx %#010lx %#010lx\n", 
 		tmp[0], tmp[1], tmp[2], tmp[3]);
 	tmp[0]=ULONG(ptr + 12* 4);
 	tmp[1]=ULONG(ptr + 13* 4);
 	tmp[2]=ULONG(ptr + 14* 4);
 	tmp[3]=ULONG(ptr + 15* 4);
-	fprintf(fp,"     %#010x %#010x %#010x %#010x\n", 
+	fprintf(fp,"     %#010lx %#010lx %#010lx %#010lx\n", 
 		tmp[0], tmp[1], tmp[2], tmp[3]);
 
 	fprintf(fp,"  -control registers:\n");
@@ -885,26 +858,26 @@
 	tmp[1]=ULONG(ptr + S390X_WORD_SIZE);
 	tmp[2]=ULONG(ptr + 2 * S390X_WORD_SIZE);
 	tmp[3]=ULONG(ptr + 3 * S390X_WORD_SIZE);
-	fprintf(fp,"     %#018x %#018x\n", tmp[0],tmp[1]);
-	fprintf(fp,"     %#018x %#018x\n", tmp[2],tmp[3]);
+	fprintf(fp,"     %#018lx %#018lx\n", tmp[0],tmp[1]);
+	fprintf(fp,"     %#018lx %#018lx\n", tmp[2],tmp[3]);
 	tmp[0]=ULONG(ptr + 4 * S390X_WORD_SIZE);
 	tmp[1]=ULONG(ptr + 5 * S390X_WORD_SIZE);
 	tmp[2]=ULONG(ptr + 6 * S390X_WORD_SIZE);
 	tmp[3]=ULONG(ptr + 7 * S390X_WORD_SIZE);
-	fprintf(fp,"     %#018x %#018x\n", tmp[0],tmp[1]);
-	fprintf(fp,"     %#018x %#018x\n", tmp[2],tmp[3]);
+	fprintf(fp,"     %#018lx %#018lx\n", tmp[0],tmp[1]);
+	fprintf(fp,"     %#018lx %#018lx\n", tmp[2],tmp[3]);
 	tmp[0]=ULONG(ptr);
 	tmp[1]=ULONG(ptr + S390X_WORD_SIZE);
 	tmp[2]=ULONG(ptr + 2 * S390X_WORD_SIZE);
 	tmp[3]=ULONG(ptr + 3 * S390X_WORD_SIZE);
-	fprintf(fp,"     %#018x %#018x\n", tmp[0],tmp[1]);
-	fprintf(fp,"     %#018x %#018x\n", tmp[2],tmp[3]);
+	fprintf(fp,"     %#018lx %#018lx\n", tmp[0],tmp[1]);
+	fprintf(fp,"     %#018lx %#018lx\n", tmp[2],tmp[3]);
 	tmp[0]=ULONG(ptr + 4 * S390X_WORD_SIZE);
 	tmp[1]=ULONG(ptr + 5 * S390X_WORD_SIZE);
 	tmp[2]=ULONG(ptr + 6 * S390X_WORD_SIZE);
 	tmp[3]=ULONG(ptr + 7 * S390X_WORD_SIZE);
-	fprintf(fp,"     %#018x %#018x\n", tmp[0],tmp[1]);
-	fprintf(fp,"     %#018x %#018x\n", tmp[2],tmp[3]);
+	fprintf(fp,"     %#018lx %#018lx\n", tmp[0],tmp[1]);
+	fprintf(fp,"     %#018lx %#018lx\n", tmp[2],tmp[3]);
 
 	ptr = lc + MEMBER_OFFSET("_lowcore","floating_pt_save_area");
 	fprintf(fp,"  -floating point registers 0,2,4,6:\n");
@@ -912,26 +885,26 @@
 	tmp[1]=ULONG(ptr +  S390X_WORD_SIZE);
 	tmp[2]=ULONG(ptr + 2 * S390X_WORD_SIZE);
 	tmp[3]=ULONG(ptr + 3 * S390X_WORD_SIZE);
-	fprintf(fp,"     %#018x %#018x\n", tmp[0],tmp[1]);
-	fprintf(fp,"     %#018x %#018x\n", tmp[2],tmp[3]);
+	fprintf(fp,"     %#018lx %#018lx\n", tmp[0],tmp[1]);
+	fprintf(fp,"     %#018lx %#018lx\n", tmp[2],tmp[3]);
 	tmp[0]=ULONG(ptr + 4 * S390X_WORD_SIZE);
 	tmp[1]=ULONG(ptr + 5 * S390X_WORD_SIZE);
 	tmp[2]=ULONG(ptr + 6 * S390X_WORD_SIZE);
 	tmp[3]=ULONG(ptr + 7 * S390X_WORD_SIZE);
-	fprintf(fp,"     %#018x %#018x\n", tmp[0],tmp[1]);
-	fprintf(fp,"     %#018x %#018x\n", tmp[2],tmp[3]);
+	fprintf(fp,"     %#018lx %#018lx\n", tmp[0],tmp[1]);
+	fprintf(fp,"     %#018lx %#018lx\n", tmp[2],tmp[3]);
 	tmp[0]=ULONG(ptr + 6 * S390X_WORD_SIZE);
 	tmp[1]=ULONG(ptr + 7 * S390X_WORD_SIZE);
 	tmp[2]=ULONG(ptr + 8 * S390X_WORD_SIZE);
 	tmp[3]=ULONG(ptr + 9 * S390X_WORD_SIZE);
-	fprintf(fp,"     %#018x %#018x\n", tmp[0],tmp[1]);
-	fprintf(fp,"     %#018x %#018x\n", tmp[2],tmp[3]);
+	fprintf(fp,"     %#018lx %#018lx\n", tmp[0],tmp[1]);
+	fprintf(fp,"     %#018lx %#018lx\n", tmp[2],tmp[3]);
 	tmp[0]=ULONG(ptr + 10* S390X_WORD_SIZE);
 	tmp[1]=ULONG(ptr + 11* S390X_WORD_SIZE);
 	tmp[2]=ULONG(ptr + 12* S390X_WORD_SIZE);
 	tmp[3]=ULONG(ptr + 13* S390X_WORD_SIZE);
-	fprintf(fp,"     %#018x %#018x\n", tmp[0],tmp[1]);
-	fprintf(fp,"     %#018x %#018x\n", tmp[2],tmp[3]);
+	fprintf(fp,"     %#018lx %#018lx\n", tmp[0],tmp[1]);
+	fprintf(fp,"     %#018lx %#018lx\n", tmp[2],tmp[3]);
 }
 
 /*
--- crash/dev.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/dev.c	2007-12-11 14:54:56.000000000 -0500
@@ -91,13 +91,13 @@
                 switch(c)
                 {
 		case 'i':
-			if (machine_type("X86") || machine_type("S390X"))
+			if (machine_type("S390X"))
 				option_not_supported(c);
 			do_io();
 			return;
 
 		case 'p':
-			if (machine_type("X86") || machine_type("S390X"))
+			if (machine_type("S390X"))
 				option_not_supported(c);
 			do_pci();
 			return;
@@ -141,6 +141,8 @@
 	char *char_device_struct_buf;
 	ulong next, savenext, name, fops; 
 	int major;
+	int name_typecode;
+	size_t name_size;
 
 	if (!symbol_exists("chrdevs"))
 		error(FATAL, "chrdevs: symbol does not exist\n");
@@ -188,6 +190,8 @@
 
 	char_device_struct_buf = GETBUF(SIZE(char_device_struct));
 	cdp = (ulong *)&chrdevs[0];
+	name_typecode = MEMBER_TYPE("char_device_struct", "name");
+	name_size = (size_t)MEMBER_SIZE("char_device_struct", "name"); 
 
 	for (i = 0; i < MAX_DEV; i++, cdp++) {
 		if (!(*cdp))
@@ -201,11 +205,18 @@
 			OFFSET(char_device_struct_next));
 		name = ULONG(char_device_struct_buf + 
 			OFFSET(char_device_struct_name));
-                if (name) {
-                	if (!read_string(name, buf, BUFSIZE-1))
-                                 sprintf(buf, "(unknown)");
-                } else
-                        sprintf(buf, "(unknown)");
+		switch (name_typecode)
+		{
+		case TYPE_CODE_ARRAY:
+			snprintf(buf, name_size, char_device_struct_buf +	
+			    OFFSET(char_device_struct_name));
+			break;
+		case TYPE_CODE_PTR:
+		default:
+			if (!name || !read_string(name, buf, BUFSIZE-1))
+				break;
+		}
+
 		fops = ULONG(char_device_struct_buf + 
 			OFFSET(char_device_struct_fops));
 		major = INT(char_device_struct_buf + 
@@ -243,11 +254,19 @@
 	                        OFFSET(char_device_struct_next));
 	                name = ULONG(char_device_struct_buf +
 	                        OFFSET(char_device_struct_name));
-	                if (name) {
-	                        if (!read_string(name, buf, BUFSIZE-1))
-	                                 sprintf(buf, "(unknown)");
-	                } else
-	                        sprintf(buf, "(unknown)");
+			switch (name_typecode)
+			{
+			case TYPE_CODE_ARRAY:
+				snprintf(buf, name_size, char_device_struct_buf +	
+			    		OFFSET(char_device_struct_name));
+				break;
+			case TYPE_CODE_PTR:
+			default:
+				if (!name || !read_string(name, buf, BUFSIZE-1))
+					sprintf(buf, "(unknown)");
+				break;
+			}
+
 	                fops = ULONG(char_device_struct_buf +
 	                        OFFSET(char_device_struct_fops));
 	                major = INT(char_device_struct_buf +
@@ -1957,29 +1974,44 @@
 	unsigned int      class;
 	unsigned short    device, vendor;
 	unsigned char     busno;
-	ulong             *devlist, bus, devfn, tmp;
+	ulong             *devlist, bus, devfn, prev, next;
 	char 		  buf1[BUFSIZE];
 	char 		  buf2[BUFSIZE];
 	char 		  buf3[BUFSIZE];
 
-	fprintf(fp, "%s BU:SL.FN CLASS: VENDOR-DEVICE\n",
-		mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "PCI_DEV"));
+	if (!symbol_exists("pci_devices"))
+		error(FATAL, "no PCI devices found on this system.\n");
 
 	BZERO(&pcilist_data, sizeof(struct list_data));
 
 	if (VALID_MEMBER(pci_dev_global_list)) {
-                get_symbol_data("pci_devices", sizeof(void *), &tmp);
-                readmem(tmp + OFFSET(list_head_next), KVADDR,
-                        &pcilist_data.start, sizeof(void *), "pci devices",
-                        FAULT_ON_ERROR);
+                get_symbol_data("pci_devices", sizeof(void *), &pcilist_data.start);
                 pcilist_data.end = symbol_value("pci_devices");
                 pcilist_data.list_head_offset = OFFSET(pci_dev_global_list);
+		readmem(symbol_value("pci_devices") + OFFSET(list_head_prev),
+			KVADDR, &prev, sizeof(void *), "list head prev",
+			FAULT_ON_ERROR);
+                /*
+		 * Check if this system does not have any PCI devices.
+		 */
+		if ((pcilist_data.start == pcilist_data.end) &&
+ 		   (prev == pcilist_data.end))
+			error(FATAL, "no PCI devices found on this system.\n");
 
-	} else {
+	} else if (VALID_MEMBER(pci_dev_next)) {
 		get_symbol_data("pci_devices", sizeof(void *),
 				&pcilist_data.start);
 		pcilist_data.member_offset = OFFSET(pci_dev_next);
-	} 
+                /*
+		 * Check if this system does not have any PCI devices.
+		 */
+		readmem(pcilist_data.start + pcilist_data.member_offset,
+			KVADDR, &next, sizeof(void *), "pci dev next",
+			FAULT_ON_ERROR);
+		if (!next)
+			error(FATAL, "no PCI devices found on this system.\n");
+	} else
+		option_not_supported('p');
 
 	hq_open();
 	devcnt = do_list(&pcilist_data);
@@ -1987,6 +2019,9 @@
 	devcnt = retrieve_list(devlist, devcnt);
 	hq_close();
 
+	fprintf(fp, "%s BU:SL.FN CLASS: VENDOR-DEVICE\n",
+		mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "PCI_DEV"));
+
 	for (i = 0; i < devcnt; i++) {
 
 		/*
--- crash/task.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/task.c	2009-02-04 16:49:49.000000000 -0500
@@ -1,8 +1,8 @@
 /* task.c - core analysis suite
  *
  * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc.
- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson
- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 David Anderson
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 Red Hat, Inc. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -27,11 +27,18 @@
 static void refresh_pidhash_task_table(void);
 static void refresh_pid_hash_task_table(void);
 static void refresh_hlist_task_table(void);
+static void refresh_hlist_task_table_v2(void);
+static void refresh_hlist_task_table_v3(void);
+static void refresh_active_task_table(void);
 static struct task_context *store_context(struct task_context *, ulong, char *);
 static void refresh_context(ulong, ulong);
 static void parent_list(ulong);
 static void child_list(ulong);
+static void initialize_task_state(void);
 static void show_task_times(struct task_context *, ulong);
+static void show_task_args(struct task_context *);
+static void show_task_rlimit(struct task_context *);
+static void show_tgid_list(ulong);
 static int compare_start_time(const void *, const void *);
 static int start_time_timespec(void);
 static ulonglong convert_start_time(ulonglong, ulonglong);
@@ -46,11 +53,26 @@
 static void dump_runq(void);
 static void dump_runqueues(void);
 static void dump_prio_array(int, ulong, char *);
+struct rb_root;
+static struct rb_node *rb_first(struct rb_root *);
+struct rb_node;
+static struct rb_node *rb_next(struct rb_node *);
+static struct rb_node *rb_parent(struct rb_node *, struct rb_node *);
+static struct rb_node *rb_right(struct rb_node *, struct rb_node *);
+static struct rb_node *rb_left(struct rb_node *, struct rb_node *);
+static void dump_CFS_runqueues(void);
+static void dump_RT_prio_array(int, ulong, char *);
 static void task_struct_member(struct task_context *,ulong,struct reference *);
 static void signal_reference(struct task_context *, ulong, struct reference *);
-static void dump_signal_data(struct task_context *);
+static void do_sig_thread_group(ulong);
+static void dump_signal_data(struct task_context *, ulong);
+#define TASK_LEVEL         (0x1)
+#define THREAD_GROUP_LEVEL (0x2)
+#define TASK_INDENT        (0x4)
+static int sigrt_minmax(int *, int *);
 static void signame_list(void);
-static ulonglong task_signal(ulong);
+static void sigqueue_list(ulong);
+static ulonglong task_signal(ulong, ulong*);
 static ulonglong task_blocked(ulong);
 static void translate_sigset(ulonglong);
 static ulonglong sigaction_mask(ulong);
@@ -80,6 +102,12 @@
 	struct gnu_request req;
 	ulong active_pid;
 
+	if (!(tt->idle_threads = (ulong *)calloc(NR_CPUS, sizeof(ulong))))
+		error(FATAL, "cannot malloc idle_threads array");
+	if (DUMPFILE() &&
+	    !(tt->panic_threads = (ulong *)calloc(NR_CPUS, sizeof(ulong))))
+		error(FATAL, "cannot malloc panic_threads array");
+
         if (kernel_symbol_exists("nr_tasks")) {
 		/*
 		 *  Figure out what maximum NR_TASKS would be by getting the 
@@ -133,6 +161,15 @@
 			"thread_struct", "eip");
 		esp_offset = MEMBER_OFFSET_INIT(thread_struct_esp,
 			"thread_struct", "esp");
+		/*
+		 *  Handle x86/x86_64 merger.
+		 */
+		if (eip_offset == INVALID_OFFSET)
+			eip_offset = MEMBER_OFFSET_INIT(thread_struct_eip,
+				"thread_struct", "ip");
+		if (esp_offset == INVALID_OFFSET)
+			esp_offset = MEMBER_OFFSET_INIT(thread_struct_esp,
+				"thread_struct", "sp");
 		ksp_offset = MEMBER_OFFSET_INIT(thread_struct_ksp,
 			"thread_struct", "ksp");
 	        ASSIGN_OFFSET(task_struct_thread_eip) = 
@@ -151,8 +188,15 @@
 		get_idle_threads(&tt->idle_threads[0], kt->cpus);
 	}
 
-        MEMBER_OFFSET_INIT(task_struct_thread_info, "task_struct", 
-		"thread_info");
+	if (MEMBER_EXISTS("task_struct", "thread_info"))
+        	MEMBER_OFFSET_INIT(task_struct_thread_info, "task_struct", 
+			"thread_info");
+	else if (MEMBER_EXISTS("task_struct", "stack"))
+        	MEMBER_OFFSET_INIT(task_struct_thread_info, "task_struct", 
+			"stack");
+	else
+		ASSIGN_OFFSET(task_struct_thread_info) = INVALID_OFFSET;
+
 	if (VALID_MEMBER(task_struct_thread_info)) {
         	MEMBER_OFFSET_INIT(thread_info_task, "thread_info", "task"); 
         	MEMBER_OFFSET_INIT(thread_info_cpu, "thread_info", "cpu");
@@ -170,6 +214,9 @@
         MEMBER_OFFSET_INIT(task_struct_processor, "task_struct", "processor");
         MEMBER_OFFSET_INIT(task_struct_p_pptr, "task_struct", "p_pptr");
         MEMBER_OFFSET_INIT(task_struct_parent, "task_struct", "parent");
+	if (INVALID_MEMBER(task_struct_parent))
+		MEMBER_OFFSET_INIT(task_struct_parent, "task_struct", 
+			"real_parent");
         MEMBER_OFFSET_INIT(task_struct_has_cpu, "task_struct", "has_cpu");
         MEMBER_OFFSET_INIT(task_struct_cpus_runnable,  
 		"task_struct", "cpus_runnable");
@@ -184,8 +231,13 @@
         MEMBER_OFFSET_INIT(task_struct_pids, "task_struct", "pids");
         MEMBER_OFFSET_INIT(task_struct_last_run, "task_struct", "last_run");
         MEMBER_OFFSET_INIT(task_struct_timestamp, "task_struct", "timestamp");
+        MEMBER_OFFSET_INIT(task_struct_sched_info, "task_struct", "sched_info");
+	if (VALID_MEMBER(task_struct_sched_info))
+		MEMBER_OFFSET_INIT(sched_info_last_arrival, 
+			"sched_info", "last_arrival");
 	if (VALID_MEMBER(task_struct_last_run) || 
-	    VALID_MEMBER(task_struct_timestamp)) {
+	    VALID_MEMBER(task_struct_timestamp) ||
+	    VALID_MEMBER(sched_info_last_arrival)) {
 		char buf[BUFSIZE];
 	        strcpy(buf, "alias last ps -l");
         	alias_init(buf);
@@ -193,6 +245,17 @@
 	MEMBER_OFFSET_INIT(pid_link_pid, "pid_link", "pid");
 	MEMBER_OFFSET_INIT(pid_hash_chain, "pid", "hash_chain");
 
+	STRUCT_SIZE_INIT(pid_link, "pid_link");
+	STRUCT_SIZE_INIT(upid, "upid");
+	if (VALID_STRUCT(upid)) {
+		MEMBER_OFFSET_INIT(upid_nr, "upid", "nr");
+		MEMBER_OFFSET_INIT(upid_ns, "upid", "ns"); 
+		MEMBER_OFFSET_INIT(upid_pid_chain, "upid", "pid_chain");
+		MEMBER_OFFSET_INIT(pid_numbers, "pid", "numbers");
+		MEMBER_OFFSET_INIT(pid_tasks, "pid", "tasks");
+		tt->init_pid_ns = symbol_value("init_pid_ns");
+	}
+
 	MEMBER_OFFSET_INIT(pid_pid_chain, "pid", "pid_chain");
 
 	STRUCT_SIZE_INIT(task_struct, "task_struct");
@@ -207,6 +270,8 @@
 	 
 	MEMBER_OFFSET_INIT(signal_struct_count, "signal_struct", "count");
 	MEMBER_OFFSET_INIT(signal_struct_action, "signal_struct", "action");
+	MEMBER_OFFSET_INIT(signal_struct_shared_pending, "signal_struct",
+		"shared_pending");
 
 	MEMBER_OFFSET_INIT(k_sigaction_sa, "k_sigaction", "sa");
 	
@@ -217,17 +282,10 @@
 	if (INVALID_MEMBER(sigpending_head))
 		MEMBER_OFFSET_INIT(sigpending_list, "sigpending", "list");
 	MEMBER_OFFSET_INIT(sigpending_signal, "sigpending", "signal");
+	MEMBER_SIZE_INIT(sigpending_signal, "sigpending", "signal");
 
 	STRUCT_SIZE_INIT(sigqueue, "sigqueue");
-	if (VALID_STRUCT(sigqueue)) {
-        	MEMBER_OFFSET_INIT(sigqueue_next, "sigqueue", "next");
-        	MEMBER_OFFSET_INIT(sigqueue_list, "sigqueue", "list");
-        	MEMBER_OFFSET_INIT(sigqueue_info, "sigqueue", "info");
-	} else {
-        	STRUCT_SIZE_INIT(signal_queue, "signal_queue");
-                MEMBER_OFFSET_INIT(signal_queue_next, "signal_queue", "next");
-                MEMBER_OFFSET_INIT(signal_queue_info, "signal_queue", "info");
-        }
+       	STRUCT_SIZE_INIT(signal_queue, "signal_queue");
 
 	STRUCT_SIZE_INIT(sighand_struct, "sighand_struct");
 	if (VALID_STRUCT(sighand_struct))
@@ -249,6 +307,22 @@
 
 	STRUCT_SIZE_INIT(cputime_t, "cputime_t");
 
+	if (symbol_exists("cfq_slice_async")) {
+		uint cfq_slice_async;
+
+		get_symbol_data("cfq_slice_async", sizeof(int), 
+			&cfq_slice_async);
+
+		if (cfq_slice_async) {
+			machdep->hz = cfq_slice_async * 25; 
+
+			if (CRASHDEBUG(2))
+				fprintf(fp, 
+			    	    "cfq_slice_async exists: setting hz to %d\n", 
+					machdep->hz);
+		}
+	}
+
 	if (VALID_MEMBER(runqueue_arrays)) 
 		MEMBER_OFFSET_INIT(task_struct_run_list, "task_struct",
 			"run_list");
@@ -279,12 +353,6 @@
 		error(FATAL, 
         "pidhash and pid_hash both exist -- cannot distinquish between them\n");
 
-	/*
-	 *  NOTE: We rely on PIDTYPE_PID staying at enum value of 0, because
-         *        evan at the lowest level in gdb, I can't seem to find where 
-	 *        the actual value is stored via the struct type. (?)  
-	 *        Should be safe, though...
-	 */
 	if (symbol_exists("pid_hash") && symbol_exists("pidhash_shift")) {
 		int pidhash_shift;
 
@@ -302,7 +370,33 @@
                 	tt->refresh_task_table = refresh_pid_hash_task_table;
 		} else {
                 	tt->pidhash_addr = symbol_value("pid_hash");
-                	tt->refresh_task_table = refresh_hlist_task_table;
+			if (LKCD_KERNTYPES()) {
+				if (VALID_STRUCT(pid_link)) {
+					if (VALID_STRUCT(upid) && VALID_MEMBER(pid_numbers))
+						tt->refresh_task_table =
+							refresh_hlist_task_table_v3;
+					else
+						tt->refresh_task_table =
+							refresh_hlist_task_table_v2;
+ 				} else
+					tt->refresh_task_table =
+						refresh_hlist_task_table;
+				builtin_array_length("pid_hash",
+					tt->pidhash_len, NULL);
+			} else {
+				if (!get_array_length("pid_hash", NULL,
+				    sizeof(void *)) && VALID_STRUCT(pid_link)) {
+					if (VALID_STRUCT(upid) && VALID_MEMBER(pid_numbers))
+						tt->refresh_task_table =
+							refresh_hlist_task_table_v3;
+					else
+						tt->refresh_task_table =
+							refresh_hlist_task_table_v2;
+				}
+				else
+                			tt->refresh_task_table =
+						refresh_hlist_task_table;
+			}
 		}
 
                 tt->flags |= PID_HASH;
@@ -343,6 +437,10 @@
 		irqstacks_init();
 
 	get_active_set();
+
+	if (tt->flags & ACTIVE_ONLY)
+		tt->refresh_task_table = refresh_active_task_table;
+
 	tt->refresh_task_table(); 
 
 	if (tt->flags & TASK_REFRESH_OFF) 
@@ -353,11 +451,17 @@
 		set_context(NO_TASK, active_pid);
 		tt->this_task = pid_to_task(active_pid);
 	}
-	else
+	else {
+		please_wait("determining panic task");
 		set_context(get_panic_context(), NO_PID);
+		please_wait_done();
+	}
 
 	sort_context_array();
 
+	if (pc->flags & SILENT)
+		initialize_task_state();
+
 	tt->flags |= TASK_INIT_DONE;
 }
 
@@ -371,6 +475,15 @@
 	int i;
 	char *thread_info_buf;
 
+	if (!(tt->hardirq_ctx = (ulong *)calloc(NR_CPUS, sizeof(ulong))))
+		error(FATAL, "cannot malloc hardirq_ctx space.");
+	if (!(tt->hardirq_tasks = (ulong *)calloc(NR_CPUS, sizeof(ulong))))
+		error(FATAL, "cannot malloc hardirq_tasks space.");
+	if (!(tt->softirq_ctx = (ulong *)calloc(NR_CPUS, sizeof(ulong))))
+		error(FATAL, "cannot malloc softirq_ctx space.");
+	if (!(tt->softirq_tasks = (ulong *)calloc(NR_CPUS, sizeof(ulong))))
+		error(FATAL, "cannot malloc softirq_tasks space.");
+
 	thread_info_buf = GETBUF(SIZE(irq_ctx));
 
         i = get_array_length("hardirq_ctx", NULL, 0);
@@ -545,6 +658,7 @@
 static int
 verify_task(struct task_context *tc, int level)
 {
+	int i;
 	ulong next_task;
 	ulong readflag;
 
@@ -565,11 +679,27 @@
 
 		/* fall through */
 	case 2:
-        	if ((tc->processor < 0) || (tc->processor >= NR_CPUS))
+		if (!IS_TASK_ADDR(tc->ptask))
 			return FALSE;
 
-		if (!IS_TASK_ADDR(tc->ptask))
+		if ((tc->processor < 0) || (tc->processor >= NR_CPUS)) {
+			for (i = 0; i < NR_CPUS; i++) {
+				if (tc->task == tt->active_set[i]) {
+					error(WARNING, 
+			"active task %lx on cpu %d: corrupt cpu value: %d\n\n",
+						tc->task, i, tc->processor);
+					tc->processor = i;
+					return TRUE;
+				}
+			}
+
+			if (CRASHDEBUG(1))
+				error(INFO, 
+				    "verify_task: task: %lx invalid processor: %d",
+					tc->task, tc->processor);
 			return FALSE;
+		}
+
 		break;
 	}
 
@@ -987,9 +1117,7 @@
                 return;
 
         if (DUMPFILE()) {                                 /* impossible */
-                fprintf(fp, (pc->flags & SILENT) || !(pc->flags & TTY) ?
-                        "" : "\rplease wait... (gathering task table data)");
-                fflush(fp);
+		please_wait("gathering task table data");
                 if (!symbol_exists("panic_threads"))
                         tt->flags |= POPULATE_PANIC;
         }
@@ -1152,11 +1280,7 @@
 
         FREEBUF(pid_hash);
 
-	if (DUMPFILE()) {
-		fprintf(fp, (pc->flags & SILENT) || !(pc->flags & TTY) ? "" :
-                        "\r                                                \r");
-                fflush(fp);
-	}
+	please_wait_done();
 
         if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) 
 		refresh_context(curtask, curpid);
@@ -1176,12 +1300,14 @@
 {
 	int i;
 	ulong *pid_hash;
+	struct syment *sp;
 	ulong pidhash_array;
 	ulong kpp;
 	char *tp; 
 	ulong next, pnext, pprev;
 	char *nodebuf;
 	int plen, len, cnt;
+	long value;
         struct task_context *tc;
         ulong curtask;
         ulong curpid;
@@ -1192,9 +1318,7 @@
                 return;
 
         if (DUMPFILE()) {                                 /* impossible */
-                fprintf(fp, (pc->flags & SILENT) || !(pc->flags & TTY) ?
-                        "" : "\rplease wait... (gathering task table data)");
-                fflush(fp);
+		please_wait("gathering task table data");
                 if (!symbol_exists("panic_threads"))
                         tt->flags |= POPULATE_PANIC;
         }
@@ -1211,8 +1335,21 @@
                 curpid = CURRENT_PID();
         }
 
-	if (!(plen = get_array_length("pid_hash", NULL, sizeof(void *))))
-		error(FATAL, "cannot determine pid_hash array dimensions\n");
+	if (!(plen = get_array_length("pid_hash", NULL, sizeof(void *)))) {
+		/*
+		 *  Workaround for gcc omitting debuginfo data for pid_hash.
+		 */
+		if (enumerator_value("PIDTYPE_MAX", &value)) {
+			if ((sp = next_symbol("pid_hash", NULL)) &&
+		    	    (((sp->value - tt->pidhash_addr) / sizeof(void *)) < value))
+				error(WARNING, "possible pid_hash array mis-handling\n");
+			plen = (int)value;
+		} else {
+			error(WARNING, 
+			    "cannot determine pid_hash array dimensions\n");
+			plen = 1;
+		}
+	}
 
 	pid_hash = (ulong *)GETBUF(plen * sizeof(void *));
 
@@ -1228,6 +1365,16 @@
 	 *  The zero'th (PIDTYPE_PID) entry is the hlist_head array
 	 *  that we want.
 	 */
+	if (CRASHDEBUG(1)) {
+		if (!enumerator_value("PIDTYPE_PID", &value))
+			error(WARNING, 
+			    "possible pid_hash array mis-handling: PIDTYPE_PID: (unknown)\n");
+		else if (value != 0)
+			error(WARNING, 
+			    "possible pid_hash array mis-handling: PIDTYPE_PID: %d \n", 
+				value);
+	}
+
 	pidhash_array = pid_hash[0];
 	FREEBUF(pid_hash);
 
@@ -1345,6 +1492,15 @@
 		}
 	}
 
+        if (cnt > tt->max_tasks) {
+                tt->max_tasks = cnt + TASK_SLUSH;
+                allocate_task_space(tt->max_tasks);
+                hq_close();
+                if (!DUMPFILE())
+                        retries++;
+                goto retry_pid_hash;
+        }
+
         BZERO(tt->task_local, tt->max_tasks * sizeof(void *));
         cnt = retrieve_list((ulong *)tt->task_local, cnt);
 
@@ -1394,11 +1550,7 @@
         FREEBUF(pid_hash);
 	FREEBUF(nodebuf);
 
-	if (DUMPFILE()) {
-		fprintf(fp, (pc->flags & SILENT) || !(pc->flags & TTY) ? "" :
-                        "\r                                                \r");
-                fflush(fp);
-	}
+	please_wait_done();
 
         if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) 
 		refresh_context(curtask, curpid);
@@ -1406,138 +1558,738 @@
 	tt->retries = MAX(tt->retries, retries);
 }
 
-
 /*
- *  Fill a task_context structure with the data from a task.  If a NULL
- *  task_context pointer is passed in, use the next available one.
+ *  2.6.17 replaced:
+ *    static struct hlist_head *pid_hash[PIDTYPE_MAX];
+ *  with
+ *     static struct hlist_head *pid_hash;
  */
-static struct task_context *
-store_context(struct task_context *tc, ulong task, char *tp)
+static void
+refresh_hlist_task_table_v2(void)
 {
-        pid_t *pid_addr;
-        char *comm_addr;
-        int *processor_addr;
-        ulong *parent_addr;
-        ulong *mm_addr;
-        int has_cpu;
-	int do_verify;
-
-	if (tt->refresh_task_table == refresh_fixed_task_table)
-		do_verify = 1;
-	else if (tt->refresh_task_table == refresh_pid_hash_task_table)
-		do_verify = 2;
-	else
-		do_verify = 0;
+	int i;
+	ulong *pid_hash;
+	ulong pidhash_array;
+	ulong kpp;
+	char *tp; 
+	ulong next, pnext, pprev;
+	char *nodebuf;
+	int len, cnt;
+        struct task_context *tc;
+        ulong curtask;
+        ulong curpid;
+        ulong retries;
+	ulong *tlp;
 
-	if (!tc)
-		tc = tt->context_array + tt->running_tasks;
+        if (DUMPFILE() && (tt->flags & TASK_INIT_DONE))   /* impossible */
+                return;
 
-        pid_addr = (pid_t *)(tp + OFFSET(task_struct_pid));
-        comm_addr = (char *)(tp + OFFSET(task_struct_comm));
-	if (tt->flags & THREAD_INFO) {
-		tc->thread_info = ULONG(tp + OFFSET(task_struct_thread_info));
-		fill_thread_info(tc->thread_info);
-		processor_addr = (int *) (tt->thread_info + 
-			OFFSET(thread_info_cpu));
-	} else if (VALID_MEMBER(task_struct_processor))
-                processor_addr = (int *) (tp + OFFSET(task_struct_processor));
-        else if (VALID_MEMBER(task_struct_cpu))
-                processor_addr = (int *) (tp + OFFSET(task_struct_cpu));
-	if (VALID_MEMBER(task_struct_p_pptr))
-        	parent_addr = (ulong *)(tp + OFFSET(task_struct_p_pptr));
-	else
-        	parent_addr = (ulong *)(tp + OFFSET(task_struct_parent));
-        mm_addr = (ulong *)(tp + OFFSET(task_struct_mm));
-        has_cpu = task_has_cpu(task, tp);
+        if (DUMPFILE()) {                                 /* impossible */
+		please_wait("gathering task table data");
+                if (!symbol_exists("panic_threads"))
+                        tt->flags |= POPULATE_PANIC;
+        }
 
-        tc->pid = (ulong)(*pid_addr);
-        BCOPY(comm_addr, &tc->comm[0], 16);
-        tc->comm[16] = NULLCHAR;
-        tc->processor = *processor_addr;
-        tc->ptask = *parent_addr;
-        tc->mm_struct = *mm_addr;
-        tc->task = task;
-        tc->tc_next = NULL;
+        if (ACTIVE() && !(tt->flags & TASK_REFRESH))
+                return;
 
-        if (do_verify && !verify_task(tc, do_verify)) {
-		error(INFO, "invalid task address: %lx\n", tc->task);
-                BZERO(tc, sizeof(struct task_context));
-                return NULL;
+        /*
+         *  The current task's task_context entry may change,
+         *  or the task may not even exist anymore.
+         */
+        if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) {
+                curtask = CURRENT_TASK();
+                curpid = CURRENT_PID();
         }
 
-        if (has_cpu && (tt->flags & POPULATE_PANIC))
-                tt->panic_threads[tc->processor] = tc->task;
-
-	return tc;
-}
+	get_symbol_data("pid_hash", sizeof(void *), &pidhash_array);
 
-/*
- *  The current context may have moved to a new spot in the task table
- *  or have exited since the last command.  If it still exists, reset its
- *  new position.  If it doesn't exist, set the context back to the initial
- *  crash context.  If necessary, complain and show the restored context.
- */
-static void
-refresh_context(ulong curtask, ulong curpid)
-{
-	ulong value, complain;
-	struct task_context *tc;
+	len = tt->pidhash_len;
+	pid_hash = (ulong *)GETBUF(len * SIZE(hlist_head));
+	nodebuf = GETBUF(SIZE(pid_link));
+        retries = 0;
 
-	if (task_exists(curtask) && pid_exists(curpid)) {
-                set_context(curtask, NO_PID);
-        } else {
-                set_context(tt->this_task, NO_PID);
+retry_pid_hash:
+	if (retries && DUMPFILE())
+		error(FATAL,
+			"\ncannot gather a stable task list via pid_hash\n");
 
-                complain = TRUE;
-                if (STREQ(args[0], "set") && (argcnt == 2) &&
-                    IS_A_NUMBER(args[1])) {
+        if ((retries == MAX_UNLIMITED_TASK_RETRIES) &&
+            !(tt->flags & TASK_INIT_DONE)) 
+                error(FATAL, 
+	       "\ncannot gather a stable task list via pid_hash (%d retries)\n",
+			retries);
 
-	                switch (str_to_context(args[optind], &value, &tc))
-	                {
-	                case STR_PID:
-	                case STR_TASK:
-				complain = FALSE;
-	                        break;
-	                case STR_INVALID:
-				complain = TRUE;
-	                        break;
-	                }
-                }
+        if (!readmem(pidhash_array, KVADDR, pid_hash, 
+	    len * SIZE(hlist_head), "pid_hash contents", RETURN_ON_ERROR)) 
+		error(FATAL, "\ncannot read pid_hash array\n");
 
-                if (complain) {
-                        error(INFO, "current context no longer exists -- "
-                                    "restoring \"%s\" context:\n\n",
-                        	pc->program_name);
-                        show_context(CURRENT_CONTEXT());
-			fprintf(fp, "\n");
-                }
+        if (!hq_open()) {
+                error(INFO, "cannot hash task_struct entries\n");
+                if (!(tt->flags & TASK_INIT_DONE))
+                        clean_exit(1);
+                error(INFO, "using stale task_structs\n");
+                FREEBUF(pid_hash);
+                return;
         }
-}
 
-/*
- *  Sort the task_context array by PID number; for PID 0, sort by processor.
- */
-void
-sort_context_array(void)
-{
-        ulong curtask;
+	/*
+	 *  Get the idle threads first. 
+	 */
+	cnt = 0;
+	for (i = 0; i < kt->cpus; i++) {
+		if (hq_enter(tt->idle_threads[i]))
+			cnt++;
+		else
+			error(WARNING, "%sduplicate idle tasks?\n",
+				DUMPFILE() ? "\n" : "");
+	}
 
-	curtask = CURRENT_TASK();
-	qsort((void *)tt->context_array, (size_t)tt->running_tasks,
-        	sizeof(struct task_context), sort_by_pid);
-	set_context(curtask, NO_PID);
-}
+	for (i = 0; i < len; i++) {
+		if (!pid_hash[i])
+			continue;
 
-static int
-sort_by_pid(const void *arg1, const void *arg2)
-{
-	struct task_context *t1, *t2;
+        	if (!readmem(pid_hash[i], KVADDR, nodebuf, 
+	    	    SIZE(pid_link), "pid_hash node pid_link", RETURN_ON_ERROR|QUIET)) { 
+			error(INFO, "\ncannot read pid_hash node pid_link\n");
+                        if (DUMPFILE())
+                                continue;
+                        hq_close();
+                        retries++;
+                        goto retry_pid_hash;
+		}
 
-	t1 = (struct task_context *)arg1;
-	t2 = (struct task_context *)arg2;
+		kpp = pid_hash[i];
+		next = ULONG(nodebuf + OFFSET(pid_link_pid)); 
+		if (next)
+			next -= OFFSET(task_struct_pids);
+		pnext = ULONG(nodebuf + OFFSET(hlist_node_next));
+		pprev = ULONG(nodebuf + OFFSET(hlist_node_pprev));
 
-        if ((t1->pid == 0) && (t2->pid == 0))
-                return (t1->processor < t2->processor ? -1 :
+		if (CRASHDEBUG(1)) 
+			console("pid_hash[%d]: %lx task: %lx (node: %lx) next: %lx pprev: %lx\n",
+				i, pid_hash[i], next, kpp, pnext, pprev);
+
+		while (next) {
+                        if (!IS_TASK_ADDR(next)) {
+                                error(INFO,
+                                    "%sinvalid task address in pid_hash: %lx\n",
+                                        DUMPFILE() ? "\n" : "", next);
+                                if (DUMPFILE())
+                                        break;
+                                hq_close();
+                                retries++;
+                                goto retry_pid_hash;
+
+                        }
+
+                        if (!is_idle_thread(next) && !hq_enter(next)) {
+                                error(INFO,
+                                    "%sduplicate task in pid_hash: %lx\n",
+                                        DUMPFILE() ? "\n" : "", next);
+                                if (DUMPFILE())
+                                        break;
+                                hq_close();
+                                retries++;
+                                goto retry_pid_hash;
+                        }
+
+                        cnt++;
+
+			if (!pnext) 
+				break;
+
+                        if (!readmem((ulonglong)pnext, KVADDR, nodebuf,
+                                SIZE(pid_link), "task hlist_node pid_link", RETURN_ON_ERROR|QUIET)) {
+                                error(INFO, "\ncannot read hlist_node pid_link from node next\n");
+                                if (DUMPFILE())
+                                        break;
+                                hq_close();
+                                retries++;
+                                goto retry_pid_hash;
+                        }
+
+			kpp = (ulong)pnext;
+			next = ULONG(nodebuf + OFFSET(pid_link_pid));
+			if (next)
+				next -= OFFSET(task_struct_pids);
+			pnext = ULONG(nodebuf + OFFSET(hlist_node_next));
+			pprev = ULONG(nodebuf + OFFSET(hlist_node_pprev));
+
+			if (CRASHDEBUG(1)) 
+				console("  chained task: %lx (node: %lx) next: %lx pprev: %lx\n",
+					next, kpp, pnext, pprev);
+		}
+	}
+
+        if (cnt > tt->max_tasks) {
+                tt->max_tasks = cnt + TASK_SLUSH;
+                allocate_task_space(tt->max_tasks);
+                hq_close();
+                if (!DUMPFILE())
+                        retries++;
+                goto retry_pid_hash;
+        }
+
+        BZERO(tt->task_local, tt->max_tasks * sizeof(void *));
+        cnt = retrieve_list((ulong *)tt->task_local, cnt);
+
+	hq_close();
+
+	clear_task_cache();
+
+        for (i = 0, tlp = (ulong *)tt->task_local, 
+             tt->running_tasks = 0, tc = tt->context_array;
+             i < tt->max_tasks; i++, tlp++) {
+		if (!(*tlp))
+			continue;
+
+		if (!IS_TASK_ADDR(*tlp)) {
+			error(WARNING, 
+		            "%sinvalid task address found in task list: %lx\n", 
+				DUMPFILE() ? "\n" : "", *tlp);
+			if (DUMPFILE()) 
+				continue;
+			retries++;
+			goto retry_pid_hash;
+		}	
+	
+		if (task_exists(*tlp)) {
+			error(WARNING, 
+		           "%sduplicate task address found in task list: %lx\n",
+				DUMPFILE() ? "\n" : "", *tlp);
+			if (DUMPFILE())
+				continue;
+			retries++;
+			goto retry_pid_hash;
+		}
+
+		if (!(tp = fill_task_struct(*tlp))) {
+                        if (DUMPFILE())
+                                continue;
+                        retries++;
+                        goto retry_pid_hash;
+                }
+
+		if (store_context(tc, *tlp, tp)) {
+			tc++;
+			tt->running_tasks++;
+		}
+	}
+
+        FREEBUF(pid_hash);
+	FREEBUF(nodebuf);
+
+	please_wait_done();
+
+        if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) 
+		refresh_context(curtask, curpid);
+
+	tt->retries = MAX(tt->retries, retries);
+}
+
+
+/*
+ *  2.6.24: The pid_hash[] hlist_head entries were changed to point 
+ *  to the hlist_node structure embedded in a upid structure. 
+ */
+static void
+refresh_hlist_task_table_v3(void)
+{
+	int i;
+	ulong *pid_hash;
+	ulong pidhash_array;
+	ulong kpp;
+	char *tp; 
+	ulong next, pnext, pprev;
+	ulong upid;
+	char *nodebuf;
+	int len, cnt;
+        struct task_context *tc;
+        ulong curtask;
+        ulong curpid;
+        ulong retries;
+	ulong *tlp;
+	uint upid_nr;
+	ulong upid_ns;
+	int chained;
+	ulong pid;
+	ulong pid_tasks_0;
+
+        if (DUMPFILE() && (tt->flags & TASK_INIT_DONE))   /* impossible */
+                return;
+
+        if (DUMPFILE()) {                                 /* impossible */
+		please_wait("gathering task table data");
+                if (!symbol_exists("panic_threads"))
+                        tt->flags |= POPULATE_PANIC;
+        }
+
+        if (ACTIVE() && !(tt->flags & TASK_REFRESH))
+                return;
+
+        /*
+         *  The current task's task_context entry may change,
+         *  or the task may not even exist anymore.
+         */
+        if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) {
+                curtask = CURRENT_TASK();
+                curpid = CURRENT_PID();
+        }
+
+	get_symbol_data("pid_hash", sizeof(void *), &pidhash_array);
+
+	len = tt->pidhash_len;
+	pid_hash = (ulong *)GETBUF(len * SIZE(hlist_head));
+	nodebuf = GETBUF(SIZE(upid));
+        retries = 0;
+
+retry_pid_hash:
+	if (retries && DUMPFILE())
+		error(FATAL,
+			"\ncannot gather a stable task list via pid_hash\n");
+
+        if ((retries == MAX_UNLIMITED_TASK_RETRIES) &&
+            !(tt->flags & TASK_INIT_DONE)) 
+                error(FATAL, 
+	       "\ncannot gather a stable task list via pid_hash (%d retries)\n",
+			retries);
+
+        if (!readmem(pidhash_array, KVADDR, pid_hash, 
+	    len * SIZE(hlist_head), "pid_hash contents", RETURN_ON_ERROR)) 
+		error(FATAL, "\ncannot read pid_hash array\n");
+
+        if (!hq_open()) {
+                error(INFO, "cannot hash task_struct entries\n");
+                if (!(tt->flags & TASK_INIT_DONE))
+                        clean_exit(1);
+                error(INFO, "using stale task_structs\n");
+                FREEBUF(pid_hash);
+                return;
+        }
+
+	/*
+	 *  Get the idle threads first. 
+	 */
+	cnt = 0;
+	for (i = 0; i < kt->cpus; i++) {
+		if (hq_enter(tt->idle_threads[i]))
+			cnt++;
+		else
+			error(WARNING, "%sduplicate idle tasks?\n",
+				DUMPFILE() ? "\n" : "");
+	}
+
+	for (i = 0; i < len; i++) {
+		if (!pid_hash[i])
+			continue;
+
+		kpp = pid_hash[i];
+		upid = pid_hash[i] - OFFSET(upid_pid_chain);
+		chained = 0;
+do_chained:
+        	if (!readmem(upid, KVADDR, nodebuf, SIZE(upid), 
+		    "pid_hash upid", RETURN_ON_ERROR|QUIET)) { 
+			error(INFO, "\ncannot read pid_hash upid\n");
+                        if (DUMPFILE())
+                                continue;
+                        hq_close();
+                        retries++;
+                        goto retry_pid_hash;
+		}
+
+		pnext = ULONG(nodebuf + OFFSET(upid_pid_chain) + OFFSET(hlist_node_next));
+		pprev = ULONG(nodebuf + OFFSET(upid_pid_chain) + OFFSET(hlist_node_pprev));
+		upid_nr = UINT(nodebuf + OFFSET(upid_nr));
+		upid_ns = ULONG(nodebuf + OFFSET(upid_ns));
+		/*
+		 *  Use init_pid_ns level 0 (PIDTYPE_PID).
+		 */
+		if (upid_ns != tt->init_pid_ns)
+			continue;
+
+		pid = upid - OFFSET(pid_numbers);
+
+		if (!readmem(pid + OFFSET(pid_tasks), KVADDR, &pid_tasks_0, 
+		    sizeof(void *), "pid tasks", RETURN_ON_ERROR|QUIET)) {
+                        error(INFO, "\ncannot read pid.tasks[0]\n");
+                        if (DUMPFILE())
+                                continue;
+                        hq_close();
+                        retries++;
+                        goto retry_pid_hash;
+                }
+
+		if (pid_tasks_0 == 0)
+			continue;
+
+		next = pid_tasks_0 - OFFSET(task_struct_pids);
+
+		if (CRASHDEBUG(1)) {
+			if (chained)
+				console("                %lx upid: %lx nr: %d pid: %lx\n" 
+				    "                pnext/pprev: %.*lx/%lx task: %lx\n",
+				    kpp, upid, upid_nr, pid, VADDR_PRLEN, pnext, pprev, next);
+			else
+				console("pid_hash[%4d]: %lx upid: %lx nr: %d pid: %lx\n"
+				    "                pnext/pprev: %.*lx/%lx task: %lx\n",
+				    i, kpp, upid, upid_nr, pid, VADDR_PRLEN, pnext, pprev, next);
+		}
+
+		if (!IS_TASK_ADDR(next)) {
+ 			error(INFO, "%sinvalid task address in pid_hash: %lx\n",
+                        	DUMPFILE() ? "\n" : "", next);
+			 if (DUMPFILE())
+                                        break;
+ 			hq_close();
+ 			retries++;
+ 			goto retry_pid_hash;
+		}
+
+		if (!is_idle_thread(next) && !hq_enter(next)) {
+			error(INFO, "%sduplicate task in pid_hash: %lx\n",
+				DUMPFILE() ? "\n" : "", next);
+			if (DUMPFILE())
+				break;
+			hq_close();
+			retries++;
+			goto retry_pid_hash;
+		}
+
+		cnt++;
+
+		if (pnext) {
+			kpp = pnext;
+			upid = pnext - OFFSET(upid_pid_chain);
+			chained++;
+			goto do_chained;
+		}
+	}
+
+        if (cnt > tt->max_tasks) {
+                tt->max_tasks = cnt + TASK_SLUSH;
+                allocate_task_space(tt->max_tasks);
+                hq_close();
+                if (!DUMPFILE())
+                        retries++;
+                goto retry_pid_hash;
+        }
+
+        BZERO(tt->task_local, tt->max_tasks * sizeof(void *));
+        cnt = retrieve_list((ulong *)tt->task_local, cnt);
+
+	hq_close();
+
+	clear_task_cache();
+
+        for (i = 0, tlp = (ulong *)tt->task_local, 
+             tt->running_tasks = 0, tc = tt->context_array;
+             i < tt->max_tasks; i++, tlp++) {
+		if (!(*tlp))
+			continue;
+
+		if (!IS_TASK_ADDR(*tlp)) {
+			error(WARNING, 
+		            "%sinvalid task address found in task list: %lx\n", 
+				DUMPFILE() ? "\n" : "", *tlp);
+			if (DUMPFILE()) 
+				continue;
+			retries++;
+			goto retry_pid_hash;
+		}	
+	
+		if (task_exists(*tlp)) {
+			error(WARNING, 
+		           "%sduplicate task address found in task list: %lx\n",
+				DUMPFILE() ? "\n" : "", *tlp);
+			if (DUMPFILE())
+				continue;
+			retries++;
+			goto retry_pid_hash;
+		}
+
+		if (!(tp = fill_task_struct(*tlp))) {
+                        if (DUMPFILE())
+                                continue;
+                        retries++;
+                        goto retry_pid_hash;
+                }
+
+		if (store_context(tc, *tlp, tp)) {
+			tc++;
+			tt->running_tasks++;
+		}
+	}
+
+        FREEBUF(pid_hash);
+	FREEBUF(nodebuf);
+
+	please_wait_done();
+
+        if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) 
+		refresh_context(curtask, curpid);
+
+	tt->retries = MAX(tt->retries, retries);
+}
+
+static void
+refresh_active_task_table(void)
+{
+	int i;
+	char *tp; 
+	int cnt;
+        struct task_context *tc;
+        ulong curtask;
+        ulong curpid;
+        ulong retries;
+	ulong *tlp;
+
+        if (DUMPFILE() && (tt->flags & TASK_INIT_DONE))   /* impossible */
+                return;
+
+        if (DUMPFILE()) { 
+		please_wait("gathering task table data");
+                if (!symbol_exists("panic_threads"))
+                        tt->flags |= POPULATE_PANIC;
+        }
+
+        if (ACTIVE() && !(tt->flags & TASK_REFRESH))
+                return;
+
+	get_active_set();
+       	/*
+       	 *  The current task's task_context entry may change,
+         *  or the task may not even exist anymore.
+         */
+       	if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) {
+               	curtask = CURRENT_TASK();
+               	curpid = CURRENT_PID();
+       	}
+
+retry_active:
+
+        if (!hq_open()) {
+                error(INFO, "cannot hash task_struct entries\n");
+                if (!(tt->flags & TASK_INIT_DONE))
+                        clean_exit(1);
+                error(INFO, "using stale task_structs\n");
+                return;
+        }
+
+	/*
+	 *  Get the active tasks. 
+	 */
+	cnt = 0;
+	for (i = 0; i < kt->cpus; i++) {
+		if (hq_enter(tt->active_set[i]))
+			cnt++;
+		else
+			error(WARNING, "%sduplicate active tasks?\n",
+				DUMPFILE() ? "\n" : "");
+	}
+
+        BZERO(tt->task_local, tt->max_tasks * sizeof(void *));
+        cnt = retrieve_list((ulong *)tt->task_local, cnt);
+
+	hq_close();
+
+	clear_task_cache();
+
+        for (i = 0, tlp = (ulong *)tt->task_local, 
+             tt->running_tasks = 0, tc = tt->context_array;
+             i < tt->max_tasks; i++, tlp++) {
+		if (!(*tlp))
+			continue;
+
+		if (!IS_TASK_ADDR(*tlp)) {
+			error(WARNING, 
+		            "%sinvalid task address found in task list: %lx\n", 
+				DUMPFILE() ? "\n" : "", *tlp);
+			if (DUMPFILE()) 
+				continue;
+			retries++;
+			goto retry_active;
+		}	
+	
+		if (task_exists(*tlp)) {
+			error(WARNING, 
+		           "%sduplicate task address found in task list: %lx\n",
+				DUMPFILE() ? "\n" : "", *tlp);
+			if (DUMPFILE())
+				continue;
+			retries++;
+			goto retry_active;
+		}
+
+		if (!(tp = fill_task_struct(*tlp))) {
+                        if (DUMPFILE())
+                                continue;
+                        retries++;
+                        goto retry_active;
+                }
+
+		if (store_context(tc, *tlp, tp)) {
+			tc++;
+			tt->running_tasks++;
+		} else if (DUMPFILE())
+			error(WARNING, "corrupt/invalid active task: %lx\n",
+				*tlp);
+	}
+
+	if (!tt->running_tasks) {
+		if (DUMPFILE())
+			error(FATAL, "cannot determine any active tasks!\n");
+		retries++;
+		goto retry_active;
+	}
+
+	please_wait_done();
+
+        if (ACTIVE() && (tt->flags & TASK_INIT_DONE))
+		refresh_context(curtask, curpid);
+
+	tt->retries = MAX(tt->retries, retries);
+}
+
+/*
+ *  Fill a task_context structure with the data from a task.  If a NULL
+ *  task_context pointer is passed in, use the next available one.
+ */
+static struct task_context *
+store_context(struct task_context *tc, ulong task, char *tp)
+{
+        pid_t *pid_addr;
+        char *comm_addr;
+        int *processor_addr;
+        ulong *parent_addr;
+        ulong *mm_addr;
+        int has_cpu;
+	int do_verify;
+
+	if (tt->refresh_task_table == refresh_fixed_task_table)
+		do_verify = 1;
+	else if (tt->refresh_task_table == refresh_pid_hash_task_table)
+		do_verify = 2;
+	else if (tt->refresh_task_table == refresh_hlist_task_table)
+		do_verify = 2;
+	else if (tt->refresh_task_table == refresh_hlist_task_table_v2)
+		do_verify = 2;
+	else if (tt->refresh_task_table == refresh_hlist_task_table_v3)
+		do_verify = 2;
+	else if (tt->refresh_task_table == refresh_active_task_table)
+		do_verify = 2;
+	else
+		do_verify = 0;
+
+	if (!tc)
+		tc = tt->context_array + tt->running_tasks;
+
+        pid_addr = (pid_t *)(tp + OFFSET(task_struct_pid));
+        comm_addr = (char *)(tp + OFFSET(task_struct_comm));
+	if (tt->flags & THREAD_INFO) {
+		tc->thread_info = ULONG(tp + OFFSET(task_struct_thread_info));
+		fill_thread_info(tc->thread_info);
+		processor_addr = (int *) (tt->thread_info + 
+			OFFSET(thread_info_cpu));
+	} else if (VALID_MEMBER(task_struct_processor))
+                processor_addr = (int *) (tp + OFFSET(task_struct_processor));
+        else if (VALID_MEMBER(task_struct_cpu))
+                processor_addr = (int *) (tp + OFFSET(task_struct_cpu));
+	if (VALID_MEMBER(task_struct_p_pptr))
+        	parent_addr = (ulong *)(tp + OFFSET(task_struct_p_pptr));
+	else
+        	parent_addr = (ulong *)(tp + OFFSET(task_struct_parent));
+        mm_addr = (ulong *)(tp + OFFSET(task_struct_mm));
+        has_cpu = task_has_cpu(task, tp);
+
+        tc->pid = (ulong)(*pid_addr);
+        BCOPY(comm_addr, &tc->comm[0], 16);
+        tc->comm[16] = NULLCHAR;
+        tc->processor = *processor_addr;
+        tc->ptask = *parent_addr;
+        tc->mm_struct = *mm_addr;
+        tc->task = task;
+        tc->tc_next = NULL;
+
+        if (do_verify && !verify_task(tc, do_verify)) {
+		error(INFO, "invalid task address: %lx\n", tc->task);
+                BZERO(tc, sizeof(struct task_context));
+                return NULL;
+        }
+
+        if (has_cpu && (tt->flags & POPULATE_PANIC))
+                tt->panic_threads[tc->processor] = tc->task;
+
+	return tc;
+}
+
+/*
+ *  The current context may have moved to a new spot in the task table
+ *  or have exited since the last command.  If it still exists, reset its
+ *  new position.  If it doesn't exist, set the context back to the initial
+ *  crash context.  If necessary, complain and show the restored context.
+ */
+static void
+refresh_context(ulong curtask, ulong curpid)
+{
+	ulong value, complain;
+	struct task_context *tc;
+
+	if (task_exists(curtask) && pid_exists(curpid)) {
+                set_context(curtask, NO_PID);
+        } else {
+                set_context(tt->this_task, NO_PID);
+
+                complain = TRUE;
+                if (STREQ(args[0], "set") && (argcnt == 2) &&
+                    IS_A_NUMBER(args[1])) {
+
+	                switch (str_to_context(args[optind], &value, &tc))
+	                {
+	                case STR_PID:
+	                case STR_TASK:
+				complain = FALSE;
+	                        break;
+	                case STR_INVALID:
+				complain = TRUE;
+	                        break;
+	                }
+                }
+
+                if (complain) {
+                        error(INFO, "current context no longer exists -- "
+                                    "restoring \"%s\" context:\n\n",
+                        	pc->program_name);
+                        show_context(CURRENT_CONTEXT());
+			fprintf(fp, "\n");
+                }
+        }
+}
+
+/*
+ *  Sort the task_context array by PID number; for PID 0, sort by processor.
+ */
+void
+sort_context_array(void)
+{
+        ulong curtask;
+
+	curtask = CURRENT_TASK();
+	qsort((void *)tt->context_array, (size_t)tt->running_tasks,
+        	sizeof(struct task_context), sort_by_pid);
+	set_context(curtask, NO_PID);
+}
+
+static int
+sort_by_pid(const void *arg1, const void *arg2)
+{
+	struct task_context *t1, *t2;
+
+	t1 = (struct task_context *)arg1;
+	t2 = (struct task_context *)arg2;
+
+        if ((t1->pid == 0) && (t2->pid == 0))
+                return (t1->processor < t2->processor ? -1 :
                         t1->processor == t2->processor ? 0 : 1);
         else
                 return (t1->pid < t2->pid ? -1 :
@@ -1581,6 +2333,9 @@
 char *
 fill_task_struct(ulong task)
 {
+	if (XEN_HYPER_MODE())
+		return NULL;
+
 	if (!IS_LAST_TASK_READ(task)) { 
         	if (!readmem(task, KVADDR, tt->task_struct, 
 	     		SIZE(task_struct), "fill_task_struct", 
@@ -1632,6 +2387,9 @@
 				bt->stackbase);
 	} 
 
+	if (XEN_HYPER_MODE())
+		return;
+
 	if (!IS_LAST_TASK_READ(bt->task)) {
 		if (bt->stackbase == bt->task) {
 			BCOPY(bt->stackbuf, tt->task_struct, SIZE(task_struct));
@@ -1893,7 +2651,7 @@
 	BZERO(&psinfo, sizeof(struct psinfo));
 	flag = 0;
 
-        while ((c = getopt(argcnt, args, "stcpkul")) != EOF) {
+        while ((c = getopt(argcnt, args, "gstcpkular")) != EOF) {
                 switch(c)
 		{
 		case 'k':
@@ -1907,39 +2665,55 @@
 			break;
 
 		/*
-		 *  The remaining flags are all mutually-exclusive.
+		 *  The a, t, c, p, g and l flags are all mutually-exclusive.
 		 */
+		case 'g':
+			flag &= ~(PS_EXCLUSIVE);
+			flag |= PS_TGID_LIST;
+			break;
+
+		case 'a':
+			flag &= ~(PS_EXCLUSIVE);
+			flag |= PS_ARGV_ENVP;
+			break;
+
 		case 't':
+			flag &= ~(PS_EXCLUSIVE);
 			flag |= PS_TIMES;
-			flag &= ~(PS_CHILD_LIST|PS_PPID_LIST|PS_LAST_RUN);
 			break;
 
 		case 'c': 
+			flag &= ~(PS_EXCLUSIVE);
 			flag |= PS_CHILD_LIST;
-			flag &= ~(PS_PPID_LIST|PS_TIMES|PS_LAST_RUN);
 			break;
 
 		case 'p':
+			flag &= ~(PS_EXCLUSIVE);
 			flag |= PS_PPID_LIST;
-			flag &= ~(PS_CHILD_LIST|PS_TIMES|PS_LAST_RUN);
 			break;
 			
 		case 'l':
 			if (INVALID_MEMBER(task_struct_last_run) &&
-			    INVALID_MEMBER(task_struct_timestamp)) {
+			    INVALID_MEMBER(task_struct_timestamp) &&
+			    INVALID_MEMBER(sched_info_last_arrival)) {
 				error(INFO, 
-"neither task_struct.last_run nor task_struct.timestamp exist in this kernel\n");
+                            "last-run timestamps do not exist in this kernel\n");
 				argerrs++;
 				break;
 			}
+			flag &= ~(PS_EXCLUSIVE);
 			flag |= PS_LAST_RUN;
-			flag &= ~(PS_CHILD_LIST|PS_TIMES|PS_PPID_LIST);
 			break;
 
 		case 's':
 			flag |= PS_KSTACKP;
 			break;
 
+		case 'r':
+			flag &= ~(PS_EXCLUSIVE);
+			flag |= PS_RLIMIT;
+			break;
+
 		default:
 			argerrs++;
 			break;
@@ -2020,6 +2794,18 @@
                 show_last_run(tc);                                    \
                 continue;                                             \
         }                                                             \
+        if (flag & PS_ARGV_ENVP) {                                    \
+                show_task_args(tc);                                   \
+                continue;                                             \
+        }                                                             \
+        if (flag & PS_RLIMIT) {                                       \
+                show_task_rlimit(tc);                                 \
+                continue;                                             \
+        }                                                             \
+        if (flag & PS_TGID_LIST) {                                    \
+                show_tgid_list(tc->task);                             \
+                continue;                                             \
+        }                                                             \
         get_task_mem_usage(tc->task, tm);                             \
         fprintf(fp, "%s", is_task_active(tc->task) ? "> " : "  ");    \
         fprintf(fp, "%5ld  %5ld  %2s  %s %3s",                        \
@@ -2050,7 +2836,7 @@
 	char buf2[BUFSIZE];
 	char buf3[BUFSIZE];
 
-	if (!(flag & (PS_PPID_LIST|PS_CHILD_LIST|PS_TIMES|PS_LAST_RUN))) 
+	if (!(flag & PS_EXCLUSIVE)) 
 		fprintf(fp, 
 		    "   PID    PPID  CPU %s  ST  %%MEM     VSZ    RSS  COMM\n",
 			flag & PS_KSTACKP ?
@@ -2076,6 +2862,8 @@
 		return;
 	}
 
+	pc->curcmd_flags |= TASK_SPECIFIED;
+
 	for (ac = 0; ac < psi->argc; ac++) {
 		tm = &task_mem_usage;
 		tc = FIRST_CONTEXT();
@@ -2095,53 +2883,283 @@
 					print = TRUE;
 				break;
 
-			case PS_BY_CMD:
-				if (STREQ(tc->comm, psi->comm[ac]))
-					print = TRUE;
-				break;
-			}
+			case PS_BY_CMD:
+				if (STREQ(tc->comm, psi->comm[ac])) {
+					if (flag & PS_TGID_LIST) {
+						if (tc->pid == task_tgid(tc->task))
+							print = TRUE;
+						else
+							print = FALSE;
+					} else
+						print = TRUE;
+				}
+				break;
+			}
+
+			if (print) {
+				if (flag & PS_TIMES) 
+					show_task_times(tc, flag);
+				else if (flag & PS_LAST_RUN)
+					show_last_run(tc);
+				else {
+					SHOW_PS_DATA();
+				}
+			}
+		}
+	}
+}
+
+/*
+ *  Display the task preceded by the last_run stamp.
+ */
+static void
+show_last_run(struct task_context *tc)
+{
+	int i, c;
+	struct task_context *tcp;
+	char format[15];
+	char buf[BUFSIZE];
+
+       	tcp = FIRST_CONTEXT();
+	sprintf(buf, pc->output_radix == 10 ? "%lld" : "%llx", 
+		task_last_run(tcp->task));
+	c = strlen(buf);
+	sprintf(format, "[%c%dll%c]  ", '%', c, 
+		pc->output_radix == 10 ? 'u' : 'x');
+
+	if (tc) {
+		fprintf(fp, format, task_last_run(tc->task));
+		print_task_header(fp, tc, FALSE);
+	} else {
+        	tcp = FIRST_CONTEXT();
+        	for (i = 0; i < RUNNING_TASKS(); i++, tcp++) {
+			fprintf(fp, format, task_last_run(tcp->task));
+			print_task_header(fp, tcp, FALSE);
+		}
+	}
+}
+
+/*
+ *  Show the argv and envp strings pointed to by mm_struct->arg_start 
+ *  and mm_struct->env_start.  The user addresses need to broken up
+ *  into physical on a page-per-page basis because we typically are
+ *  not going to be working in the context of the target task. 
+ */
+static void
+show_task_args(struct task_context *tc)
+{
+	ulong arg_start, arg_end, env_start, env_end;
+	char *buf, *bufptr, *p1;
+	char *as, *ae, *es, *ee;
+	physaddr_t paddr;
+	ulong uvaddr, size, cnt;
+	int c, d;
+
+	print_task_header(fp, tc, 0);
+
+        if (!tc || !tc->mm_struct) {     /* probably a kernel thread */
+               	error(INFO, "no user stack\n\n");
+                return;
+	}
+
+        if (!task_mm(tc->task, TRUE))
+                return;
+
+	if (INVALID_MEMBER(mm_struct_arg_start)) {
+		MEMBER_OFFSET_INIT(mm_struct_arg_start, "mm_struct", "arg_start");
+		MEMBER_OFFSET_INIT(mm_struct_arg_end, "mm_struct", "arg_end");
+		MEMBER_OFFSET_INIT(mm_struct_env_start, "mm_struct", "env_start");
+		MEMBER_OFFSET_INIT(mm_struct_env_end, "mm_struct", "env_end");
+	}
+	
+	arg_start = ULONG(tt->mm_struct + OFFSET(mm_struct_arg_start));
+	arg_end = ULONG(tt->mm_struct + OFFSET(mm_struct_arg_end));
+	env_start = ULONG(tt->mm_struct + OFFSET(mm_struct_env_start));
+	env_end = ULONG(tt->mm_struct + OFFSET(mm_struct_env_end));
+
+	if (CRASHDEBUG(1)) {
+		fprintf(fp, "arg_start: %lx arg_end: %lx (%ld)\n", 
+			arg_start, arg_end, arg_end - arg_start);
+		fprintf(fp, "env_start: %lx env_end: %lx (%ld)\n", 
+			env_start, env_end, env_end - env_start);
+	}
+
+	buf = GETBUF(env_end - arg_start + 1);
+
+	uvaddr = arg_start;
+	size = env_end - arg_start;
+	bufptr = buf;
+
+	while (size > 0) {
+        	if (!uvtop(tc, uvaddr, &paddr, 0)) {
+                	error(INFO, "cannot access user stack address: %lx\n\n",
+                        	uvaddr);
+			goto bailout;
+        	}
+
+		cnt = PAGESIZE() - PAGEOFFSET(uvaddr);
 
-			if (print) {
-				if (flag & PS_TIMES) 
-					show_task_times(tc, flag);
-				else if (flag & PS_LAST_RUN)
-					show_last_run(tc);
-				else {
-					SHOW_PS_DATA();
-				}
-			}
+		if (cnt > size)
+			cnt = size;
+
+        	if (!readmem(paddr, PHYSADDR, bufptr, cnt,
+                    "user stack contents", RETURN_ON_ERROR|QUIET)) {
+                	error(INFO, "cannot access user stack address: %lx\n\n",
+                        	uvaddr);
+			goto bailout;
+        	}
+		
+		uvaddr += cnt;
+                bufptr += cnt;
+                size -= cnt;
+	}
+
+	as = buf;
+	ae = &buf[arg_end - arg_start];
+	es = &buf[env_start - arg_start];
+	ee = &buf[env_end - arg_start];
+
+	fprintf(fp, "ARG: ");
+	for (p1 = as, c = 0; p1 < ae; p1++) {
+		if (*p1 == NULLCHAR) {
+			if (c)
+				fprintf(fp, " ");
+			c = 0;
+		} else {
+			fprintf(fp, "%c", *p1);
+			c++;
+		}
+	}
+
+	fprintf(fp, "\nENV: ");
+	for (p1 = es, c = d = 0; p1 < ee; p1++) {
+		if (*p1 == NULLCHAR) {
+			if (c)
+				fprintf(fp, "\n");
+			c = 0;
+		} else {
+			fprintf(fp, "%s%c", !c && (p1 != es) ? "     " : "", *p1);
+			c++, d++;
 		}
 	}
+	fprintf(fp, "\n%s", d ? "" : "\n");
+
+bailout:
+	FREEBUF(buf);
 }
 
+char *rlim_names[] = {
+	/* 0 */	 "CPU",  
+	/* 1 */  "FSIZE",
+	/* 2 */  "DATA",
+	/* 3 */  "STACK",
+	/* 4 */  "CORE",
+	/* 5 */  "RSS",
+	/* 6 */  "NPROC",
+	/* 7 */  "NOFILE",
+	/* 8 */  "MEMLOCK",
+	/* 9 */  "AS",
+	/* 10 */ "LOCKS",
+	/* 11 */ "SIGPENDING",
+	/* 12 */ "MSGQUEUE",
+	/* 13 */ "NICE",
+	/* 14 */ "RTPRIO",
+	NULL,
+};
+
+#ifndef RLIM_INFINITY
+#define RLIM_INFINITY (~0UL)
+#endif
+
 /*
- *  Display the task preceded by the last_run stamp.
+ *  Show the current and maximum rlimit values.
  */
 static void
-show_last_run(struct task_context *tc)
+show_task_rlimit(struct task_context *tc)
 {
-	int i, c;
-	struct task_context *tcp;
-	char format[10];
-	char buf[BUFSIZE];
+	int i, j, len1, len2, rlimit_index;
+	int in_task_struct, in_signal_struct;
+	char *rlimit_buffer;
+	ulong *p1, rlim_addr;
+	char buf1[BUFSIZE];
+	char buf2[BUFSIZE];
+	char buf3[BUFSIZE];
 
-       	tcp = FIRST_CONTEXT();
-	sprintf(buf, pc->output_radix == 10 ? "%lld" : "%llx", 
-		task_last_run(tcp->task));
-	c = strlen(buf);
-	sprintf(format, "[%c%dll%c]  ", '%', c, 
-		pc->output_radix == 10 ? 'u' : 'x');
+	if (!VALID_MEMBER(task_struct_rlim) && !VALID_MEMBER(signal_struct_rlim)) {
+		MEMBER_OFFSET_INIT(task_struct_rlim, "task_struct", "rlim");
+		MEMBER_OFFSET_INIT(signal_struct_rlim, "signal_struct", "rlim");
+		STRUCT_SIZE_INIT(rlimit, "rlimit");
+		if (!VALID_MEMBER(task_struct_rlim) && 
+	  	    !VALID_MEMBER(signal_struct_rlim))
+			error(FATAL, "cannot determine rlimit array location\n");
+	} else if (!VALID_STRUCT(rlimit))
+		error(FATAL, "cannot determine rlimit structure definition\n");
+
+	in_task_struct = in_signal_struct = FALSE;
+
+	if (VALID_MEMBER(task_struct_rlim)) {
+		rlimit_index = get_array_length("task_struct.rlim", NULL, 0);
+		in_task_struct = TRUE;
+	} else if (VALID_MEMBER(signal_struct_rlim)) {
+		if (!VALID_MEMBER(task_struct_signal))
+			error(FATAL, "cannot determine rlimit array location\n");
+		rlimit_index = get_array_length("signal_struct.rlim", NULL, 0);
+		in_signal_struct = TRUE;
+	}
+
+	if (!rlimit_index)
+		error(FATAL, "cannot determine rlimit array size\n");
+
+	for (i = len1 = 0; i < rlimit_index; i++) {
+		if ((j = strlen(rlim_names[i])) > len1)
+			len1 = j;
+	}
+	len2 = strlen("(unlimited)");
 
-	if (tc) {
-		fprintf(fp, format, task_last_run(tc->task));
-		print_task_header(fp, tc, FALSE);
-	} else {
-        	tcp = FIRST_CONTEXT();
-        	for (i = 0; i < RUNNING_TASKS(); i++, tcp++) {
-			fprintf(fp, format, task_last_run(tcp->task));
-			print_task_header(fp, tcp, FALSE);
+	rlimit_buffer = GETBUF(rlimit_index * SIZE(rlimit));
+
+	print_task_header(fp, tc, 0);
+
+	fill_task_struct(tc->task);
+
+	if (in_task_struct) {
+		BCOPY(tt->task_struct + OFFSET(task_struct_rlim),
+			rlimit_buffer, rlimit_index * SIZE(rlimit));
+	} else if (in_signal_struct) {
+		rlim_addr = ULONG(tt->task_struct + OFFSET(task_struct_signal));
+        	if (!readmem(rlim_addr + OFFSET(signal_struct_rlim), 
+		    KVADDR, rlimit_buffer, rlimit_index * SIZE(rlimit),
+                    "signal_struct rlimit array", RETURN_ON_ERROR)) {
+			FREEBUF(rlimit_buffer);
+			return;
 		}
 	}
+	
+	fprintf(fp, "  %s   %s   %s\n",
+		mkstring(buf1, len1, RJUST, "RLIMIT"),
+		mkstring(buf2, len2, CENTER|RJUST, "CURRENT"),
+		mkstring(buf3, len2, CENTER|RJUST, "MAXIMUM"));
+		
+	for (p1 = (ulong *)rlimit_buffer, i = 0; i < rlimit_index; i++) {
+		fprintf(fp, "  %s   ", mkstring(buf1, len1, RJUST, 
+			rlim_names[i] ? rlim_names[i] : "(unknown)"));
+		if (*p1 == (ulong)RLIM_INFINITY)
+			fprintf(fp, "(unlimited)   ");
+		else
+			fprintf(fp, "%s   ", mkstring(buf1, len2, 
+				CENTER|LJUST|LONG_DEC, MKSTR(*p1)));
+		p1++;
+		if (*p1 == (ulong)RLIM_INFINITY)
+			fprintf(fp, "(unlimited)\n");
+		else
+			fprintf(fp, "%s\n", mkstring(buf1, len2, 
+				CENTER|LJUST|LONG_DEC, MKSTR(*p1)));
+		p1++;
+	}
+
+	fprintf(fp, "\n");
+
+	FREEBUF(rlimit_buffer);
 }
 
 /*
@@ -2229,11 +3247,8 @@
  
 	use_kernel_timeval = STRUCT_EXISTS("kernel_timeval");
         get_symbol_data("jiffies", sizeof(long), &jiffies);
-	if (symbol_exists("jiffies_64")) {
-        	get_symbol_data("jiffies_64", sizeof(long long), &jiffies_64);
-		if ((jiffies_64 & 0xffffffff00000000ULL) == 0x100000000ULL) 
-			jiffies_64 &= 0xffffffffULL;
-	}
+	if (symbol_exists("jiffies_64"))
+		get_uptime(NULL, &jiffies_64);
 	tsp = task_start_times;
 	tc = tcp ? tcp : FIRST_CONTEXT();
 
@@ -2330,8 +3345,7 @@
         for (i = 0, tsp = task_start_times; i < tasks; i++, tsp++) {
 		print_task_header(fp, tsp->tc, 0);
 		fprintf(fp, "    RUN TIME: %s\n", symbol_exists("jiffies_64") ? 
-			convert_time(jiffies_64 - 
-			convert_start_time(tsp->start_time, jiffies_64), buf1) :
+			convert_time(convert_start_time(tsp->start_time, jiffies_64), buf1) :
 			convert_time(jiffies - tsp->start_time, buf1));
 		fprintf(fp, "  START TIME: %llu\n", tsp->start_time); 
 		if (VALID_MEMBER(task_struct_times)) {
@@ -2397,15 +3411,33 @@
 static ulonglong
 convert_start_time(ulonglong start_time, ulonglong current)
 {
+	ulong tmp1, tmp2;
+	ulonglong wrapped;
+
         switch(tt->flags & (TIMESPEC | NO_TIMESPEC))
         {
         case TIMESPEC:
-		if ((start_time * (ulonglong)machdep->hz) > current) 
-			return current;
+		if ((start_time * (ulonglong)machdep->hz) > current)
+			return 0;
 		else
-                	return start_time * (ulonglong)machdep->hz; 
+                	return current - (start_time * (ulonglong)machdep->hz); 
 
         case NO_TIMESPEC:
+                if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) {
+                        wrapped = (start_time & 0xffffffff00000000ULL);
+                        if (wrapped) {
+                                wrapped -= 0x100000000ULL;
+                                start_time &= 0x00000000ffffffffULL;
+                                start_time |= wrapped;
+                                start_time += (ulonglong)(300*machdep->hz);
+                        } else {
+                                tmp1 = (ulong)(uint)(-300*machdep->hz);
+                                tmp2 = (ulong)start_time;
+                                start_time = (ulonglong)(tmp2 - tmp1);
+                        }
+                }
+		break;
+
         default:
                 break;
         }
@@ -2511,6 +3543,54 @@
 }
 
 /*
+ *  Dump the children of a task.
+ */
+static void
+show_tgid_list(ulong task)
+{
+        int i;
+        int cnt;
+        struct task_context *tc;
+	ulong tgid;
+
+        tc = task_to_context(task);
+	tgid = task_tgid(task);
+
+	if (tc->pid != tgid) {
+		if (pc->curcmd_flags & TASK_SPECIFIED) {
+			if (!(tc = tgid_to_context(tgid)))
+				return;
+			task = tc->task;
+		} else
+			return;
+	}
+
+	if ((tc->pid == 0) && (pc->curcmd_flags & IDLE_TASK_SHOWN))
+		return;
+
+       	print_task_header(fp, tc, 0);
+
+        tc = FIRST_CONTEXT();
+        for (i = cnt = 0; i < RUNNING_TASKS(); i++, tc++) {
+		if (tc->task == task)
+			continue;
+
+		if (task_tgid(tc->task)	== tgid) {
+                        INDENT(2);
+                        print_task_header(fp, tc, 0);
+                        cnt++;
+			if (tc->pid == 0)
+				pc->curcmd_flags |= IDLE_TASK_SHOWN;
+                }
+        }
+
+        if (!cnt)
+                fprintf(fp, "  (no threads)\n");
+
+	fprintf(fp, "\n");
+}
+
+/*
  * Return the first task found that belongs to a pid. 
  */
 ulong
@@ -2580,6 +3660,26 @@
         return NULL;
 }
 
+/*
+ *  Return a tgid's parent task_context structure.
+ */
+struct task_context *
+tgid_to_context(ulong parent_tgid)
+{
+        int i;
+        struct task_context *tc;
+	ulong tgid;
+
+        tc = FIRST_CONTEXT();
+        for (i = 0; i < RUNNING_TASKS(); i++, tc++) {
+		tgid = task_tgid(tc->task);
+		if ((tgid == parent_tgid) && (tgid == tc->pid))
+                        return tc;
+	}
+
+        return NULL;
+}
+
 
 /*
  *  Return the task_context structure of the first task found with a pid,
@@ -2816,20 +3916,39 @@
 
 
 /*
+ *  Return the task if the vaddr is part of a task's task_struct.
+ */
+ulong
+vaddr_in_task_struct(ulong vaddr)
+{
+        int i;
+        struct task_context *tc;
+
+        tc = FIRST_CONTEXT();
+        for (i = 0; i < RUNNING_TASKS(); i++, tc++) {
+		if ((vaddr >= tc->task) && 
+		    (vaddr < (tc->task + SIZE(task_struct))))
+                        return tc->task;
+        }
+
+	return NO_TASK;
+}
+
+/*
  *  Verify whether any task is running a command.
  */
 int
 comm_exists(char *s)
 {
-        int i;
+        int i, cnt;
         struct task_context *tc;
 
         tc = FIRST_CONTEXT();
-        for (i = 0; i < RUNNING_TASKS(); i++, tc++) 
+        for (i = cnt = 0; i < RUNNING_TASKS(); i++, tc++) 
                 if (STREQ(tc->comm, s))
-                        return TRUE;
+                        cnt++;
         
-        return FALSE;
+        return cnt;
 }
 
 /*
@@ -2925,7 +4044,11 @@
 	fprintf(fp, "COMMAND: \"%s\"\n", tc->comm);
 	INDENT(indent);
 	fprintf(fp, "   TASK: %lx  ", tc->task);
-	if ((cnt = TASKS_PER_PID(tc->pid)) > 1)
+	if ((machdep->flags & (INIT|MCA)) && (tc->pid == 0))
+		cnt = comm_exists(tc->comm);
+	else
+		cnt = TASKS_PER_PID(tc->pid);
+	if (cnt > 1)
 		fprintf(fp, "(1 of %d)  ", cnt);
 	if (tt->flags & THREAD_INFO)
 		fprintf(fp, "[THREAD_INFO: %lx]", tc->thread_info);
@@ -2938,19 +4061,27 @@
 	if (is_task_active(tc->task)) {
 		if (machdep->flags & HWRESET)
 			fprintf(fp, "(HARDWARE RESET)");
-		else if (machdep->flags & SYSRQ)
+		else if ((pc->flags & SYSRQ) && (tc->task == tt->panic_task))
 			fprintf(fp, "(SYSRQ)");
 		else if (machdep->flags & INIT)
 			fprintf(fp, "(INIT)");
-		else if (kt->cpu_flags[tc->processor] & NMI)
+		else if ((machdep->flags & MCA) && (tc->task == tt->panic_task))
+			fprintf(fp, "(MCA)");
+		else if ((tc->processor >= 0) && 
+		        (tc->processor < NR_CPUS) && 
+			(kt->cpu_flags[tc->processor] & NMI))
 			fprintf(fp, "(NMI)");
+		else if ((tc->task == tt->panic_task) &&
+			XENDUMP_DUMPFILE() && (kt->xen_flags & XEN_SUSPEND))
+			fprintf(fp, "(SUSPEND)");
 		else if (tc->task == tt->panic_task)
 			fprintf(fp, "(PANIC)");
 		else
 			fprintf(fp, "(ACTIVE)");
 	}
 
-	if (!(pc->flags & RUNTIME) && (tt->flags & PANIC_TASK_NOT_FOUND) &&
+	if (!(pc->flags & RUNTIME) && !ACTIVE() && 
+	    (tt->flags & PANIC_TASK_NOT_FOUND) &&
 	    !SYSRQ_TASK(tc->task)) {
 		fprintf(fp, "\n"); INDENT(indent);
 		if (machine_type("S390") || machine_type("S390X"))
@@ -3006,6 +4137,10 @@
                		cnt++ ? "" : "\n", tc->comm);
 		break;
 	}
+
+	if (!(pc->flags & RUNTIME) && (tt->flags & ACTIVE_ONLY))
+		error(WARNING, 
+		    "\nonly the active tasks on each cpu are being tracked\n");
 }
 
 
@@ -3182,6 +4317,22 @@
 	return flags;
 }
 
+/*
+ *  Return a task's tgid.
+ */
+ulong
+task_tgid(ulong task)
+{
+        uint tgid;
+
+        fill_task_struct(task);
+
+        tgid = tt->last_task_read ?
+                 UINT(tt->task_struct + OFFSET(task_struct_tgid)) : 0;
+
+        return (ulong)tgid;
+}
+
 ulonglong
 task_last_run(ulong task)
 {
@@ -3197,6 +4348,10 @@
 	} else if (VALID_MEMBER(task_struct_timestamp))
         	timestamp = tt->last_task_read ?  ULONGLONG(tt->task_struct + 
 			OFFSET(task_struct_timestamp)) : 0;
+	else if (VALID_MEMBER(sched_info_last_arrival))
+        	timestamp = tt->last_task_read ?  ULONGLONG(tt->task_struct + 
+			OFFSET(task_struct_sched_info) + 
+			OFFSET(sched_info_last_arrival)) : 0;
 	
         return timestamp;
 }
@@ -3368,6 +4523,12 @@
 	task = NO_TASK;
         tc = FIRST_CONTEXT();
 
+	/* 
+	 *  --no_panic command line option
+	 */
+	if (tt->flags & PANIC_TASK_NOT_FOUND) 
+		goto use_task_0;
+
 	if (symbol_exists("panic_threads") &&
 	    symbol_exists("panicmsg") &&
 	    symbol_exists("panic_processor")) {
@@ -3384,6 +4545,10 @@
 		task = tt->panic_threads[tt->panic_processor];
 
 		if (symbol_exists("panic_ksp")) {
+			if (!(tt->panic_ksp = (ulong *)
+			     calloc(NR_CPUS, sizeof(void *))))
+				error(FATAL, 
+					"cannot malloc panic_ksp array.\n");
 		    	readmem(symbol_value("panic_ksp"), KVADDR, 
 			    tt->panic_ksp,
 		            sizeof(void *)*NR_CPUS, "panic_ksp array", 
@@ -3411,6 +4576,9 @@
 
 use_task_0:
 
+	if (CRASHDEBUG(1))
+		error(INFO, "get_panic_context: panic task not found\n");
+
 	tt->flags |= PANIC_TASK_NOT_FOUND;
 	tc = FIRST_CONTEXT();
         return(tc->task);
@@ -3426,7 +4594,7 @@
 	ulong task;
         struct task_context *tc;
 
-	if ((task = tt->panic_threads[cpu]))
+	if (DUMPFILE() && (task = tt->panic_threads[cpu]))
 		return task;
 
         tc = FIRST_CONTEXT();
@@ -3448,50 +4616,74 @@
 	int msg_found;
 
         BZERO(buf, BUFSIZE);
+	msg_found = FALSE;
 
-	if (tt->panicmsg)
+	if (tt->panicmsg) {
 		read_string(tt->panicmsg, buf, BUFSIZE-1);
-	else if (LKCD_DUMPFILE())
+		msg_found = TRUE;
+	} else if (LKCD_DUMPFILE()) {
 		get_lkcd_panicmsg(buf);
-	else { 
-		msg_found = FALSE;
+		msg_found = TRUE;
+	}
 
-	        open_tmpfile();
-	        dump_log(FALSE);
+        if (msg_found == TRUE)
+                return(buf);
 
-	        rewind(pc->tmpfile);
-	        while (!msg_found && fgets(buf, BUFSIZE, pc->tmpfile)) {
-	                if (strstr(buf, "Kernel panic: ")) 
-				msg_found = TRUE;
-	        }
-	        rewind(pc->tmpfile);
-	        while (!msg_found && fgets(buf, BUFSIZE, pc->tmpfile)) {
-	                if (strstr(buf, "Oops: ") || 
-			    strstr(buf, "kernel BUG at")) 
-	                        msg_found = TRUE;
-	        }
-                rewind(pc->tmpfile);
-                while (!msg_found && fgets(buf, BUFSIZE, pc->tmpfile)) {
-                        if (strstr(buf, "SysRq : Netdump") ||
-			    strstr(buf, "SysRq : Crash")) {
-				machdep->flags |= SYSRQ;
-                                msg_found = TRUE;
-			}
-                }
-                rewind(pc->tmpfile);
-                while (!msg_found && fgets(buf, BUFSIZE, pc->tmpfile)) {
-                        if (strstr(buf, "sysrq") && 
-		            symbol_exists("sysrq_pressed")) 
-				get_symbol_data("sysrq_pressed", sizeof(int), 
-					&msg_found);
-                }
+	open_tmpfile();
+	dump_log(FALSE);
 
-	        close_tmpfile();
+	/*
+	 *  First check for a SYSRQ-generated crash, and set the
+	 *  active-task flag appropriately.  The message may or
+	 *  may not be used as the panic message.
+	 */
+        rewind(pc->tmpfile);
+        while (fgets(buf, BUFSIZE, pc->tmpfile)) {
+                if (strstr(buf, "SysRq : Crash") ||
+		    strstr(buf, "SysRq : Trigger a crashdump")) {
+			pc->flags |= SYSRQ;
+			break;
+		}
+	}
 
-		if (!msg_found)
-        		BZERO(buf, BUFSIZE);
+	rewind(pc->tmpfile);
+	while (!msg_found && fgets(buf, BUFSIZE, pc->tmpfile)) {
+		if (strstr(buf, "Kernel panic: ")) 
+			msg_found = TRUE;
+	}
+	rewind(pc->tmpfile);
+	while (!msg_found && fgets(buf, BUFSIZE, pc->tmpfile)) {
+	        if (strstr(buf, "Oops: ") || 
+		    strstr(buf, "kernel BUG at")) 
+	        	msg_found = TRUE;
+	}
+        rewind(pc->tmpfile);
+        while (!msg_found && fgets(buf, BUFSIZE, pc->tmpfile)) {
+                if (strstr(buf, "SysRq : Netdump") ||
+		    strstr(buf, "SysRq : Trigger a crashdump") ||
+		    strstr(buf, "SysRq : Crash")) {
+			pc->flags |= SYSRQ;
+                        msg_found = TRUE;
+		}
+        }
+        rewind(pc->tmpfile);
+        while (!msg_found && fgets(buf, BUFSIZE, pc->tmpfile)) {
+                if (strstr(buf, "sysrq") && 
+		    symbol_exists("sysrq_pressed")) 
+			get_symbol_data("sysrq_pressed", sizeof(int), 
+				&msg_found);
+        }
+	rewind(pc->tmpfile);
+	while (!msg_found && fgets(buf, BUFSIZE, pc->tmpfile)) {
+		if (strstr(buf, "Kernel panic - ")) 
+			msg_found = TRUE;
 	}
 
+        close_tmpfile();
+
+	if (!msg_found)
+       		BZERO(buf, BUFSIZE);
+
 	return(buf);
 }
 
@@ -3517,7 +4709,7 @@
 	BZERO(&foreach_data, sizeof(struct foreach_data));
 	fd = &foreach_data;
 
-        while ((c = getopt(argcnt, args, "R:vomlgersStpukcf")) != EOF) {
+        while ((c = getopt(argcnt, args, "R:vomlgersStTpukcf")) != EOF) {
                 switch(c)
 		{
 		case 'R':
@@ -3560,6 +4752,10 @@
 			fd->flags |= FOREACH_r_FLAG;
 			break;
 
+		case 'T':
+			fd->flags |= FOREACH_T_FLAG;
+			break;
+
 		case 't':
 			fd->flags |= FOREACH_t_FLAG;
 			break;
@@ -3754,12 +4950,14 @@
 foreach(struct foreach_data *fd)
 {
         int i, j, k, a;
-        struct task_context *tc;
+        struct task_context *tc, *tgc;
 	int specified;
 	int doit;
 	int subsequent;
 	ulong cmdflags; 
+	ulong tgid;
 	struct reference reference, *ref;
+	int print_header;
 	struct bt_info bt_info, *bt;
 
 	/* 
@@ -3797,6 +4995,8 @@
 			fd->reference ?  fd->reference : "");
 	}
 
+	print_header = TRUE;
+
         for (k = 0; k < fd->keys; k++) {
         	switch(fd->keyword_array[k])
                 {
@@ -3881,6 +5081,14 @@
 				error(FATAL, 
 			    	 "sig: -l and -s options are not applicable\n");
 			}
+			if (fd->flags & FOREACH_g_FLAG) {
+				if (!hq_open()) {
+                			error(INFO, 
+					   "cannot hash thread group tasks\n");
+					fd->flags &= ~FOREACH_g_FLAG;
+				} else
+					print_header = FALSE;
+			}
                         break;
 
 		case FOREACH_TEST:
@@ -3941,7 +5149,7 @@
 		if (fd->reference) {
 			BZERO(ref, sizeof(struct reference));
 			ref->str = fd->reference;
-		} else
+		} else if (print_header)
 			print_task_header(fp, tc, subsequent++);
 
 		for (k = 0; k < fd->keys; k++) {
@@ -3962,7 +5170,12 @@
 					bt->flags |= BT_SYMBOLIC_ARGS;
 				if (fd->flags & FOREACH_t_FLAG)
 					bt->flags |= BT_TEXT_SYMBOLS;
-				if (fd->flags & FOREACH_o_FLAG)
+				if (fd->flags & FOREACH_T_FLAG) {
+					bt->flags |= BT_TEXT_SYMBOLS;
+					bt->flags |= BT_TEXT_SYMBOLS_ALL;
+				}
+				if ((fd->flags & FOREACH_o_FLAG) ||
+				    (kt->flags & USE_OLD_BT))
 					bt->flags |= BT_OLD_BACK_TRACE;
                                 if (fd->flags & FOREACH_e_FLAG)
                                         bt->flags |= BT_EFRAME_SEARCH;
@@ -4010,8 +5223,14 @@
 
                         case FOREACH_SIG:
 				pc->curcmd = "sig";
-                                do_sig(tc->task, FOREACH_SIG,
-                                        fd->reference ? ref : NULL);
+				if (fd->flags & FOREACH_g_FLAG) {
+					tgid = task_tgid(tc->task);	
+					tgc = tgid_to_context(tgid);
+					if (hq_enter(tgc->task))
+						do_sig_thread_group(tgc->task);
+				} else 
+                                	do_sig(tc->task, FOREACH_SIG,
+                                        	fd->reference ? ref : NULL);
                                 break;
 
 			case FOREACH_SET:
@@ -4075,6 +5294,11 @@
 				nlm_files_dump();
 			}
 			break;
+
+		case FOREACH_SIG:
+                        if (fd->flags & FOREACH_g_FLAG)
+				hq_close();
+			break;
 		}
 	}
 
@@ -4161,7 +5385,7 @@
         fd = &foreach_data;
 	fd->keys = 1;
 	fd->keyword_array[0] = FOREACH_BT; 
-	fd->flags |= FOREACH_t_FLAG;
+	fd->flags |= (FOREACH_t_FLAG|FOREACH_o_FLAG);
 
 	dietask = lasttask = NO_TASK;
 	
@@ -4188,6 +5412,12 @@
 			break;	
 		}
 
+		if (strstr(buf, " crash_kexec at ") ||
+		    strstr(buf, " .crash_kexec at ")) {
+			found = TRUE;
+			break;	
+		}
+
                 if (strstr(buf, " die at ")) {
 			switch (dietask)
 			{
@@ -4211,6 +5441,10 @@
 	if (dietask == (NO_TASK+1))
 		error(WARNING, "multiple active tasks have called die\n\n");
 
+	if (CRASHDEBUG(1) && found)
+		error(INFO, "panic_search: %lx (via foreach bt)\n", 
+			lasttask);
+
 found_panic_task:
 	populate_panic_threads();
 
@@ -4229,6 +5463,9 @@
 		}
 	} 
 
+	if (CRASHDEBUG(1))
+		error(INFO, "panic_search: failed (via foreach bt)\n");
+
 	return NULL;
 }
 
@@ -4240,25 +5477,28 @@
 {
 	ulong task;
 
-	if (LKCD_DUMPFILE())
-		return(get_lkcd_panic_task());
-
 	if (NETDUMP_DUMPFILE()) {
 		task = pc->flags & REM_NETDUMP ?
 			tt->panic_task : get_netdump_panic_task();
 		if (task) 
 			return task;
-		if (get_active_set())
-			return(get_active_set_panic_task());
-	}
-
-        if (DISKDUMP_DUMPFILE()) {
+	} else if (KDUMP_DUMPFILE()) {
+                task = get_kdump_panic_task();
+                if (task)
+                        return task;
+        } else if (DISKDUMP_DUMPFILE()) {
                 task = get_diskdump_panic_task();
                 if (task)
                         return task;
-                if (get_active_set())
-                        return(get_active_set_panic_task());
-        }
+	} else if (XENDUMP_DUMPFILE()) {
+                task = get_xendump_panic_task();
+                if (task)
+                        return task;
+        } else if (LKCD_DUMPFILE())
+		return(get_lkcd_panic_task());
+
+	if (get_active_set())
+		return(get_active_set_panic_task());
 
 	return NO_TASK;
 }
@@ -4298,14 +5538,17 @@
 
         tc = FIRST_CONTEXT();
         for (i = 0; i < RUNNING_TASKS(); i++, tc++) {
-		if (task_has_cpu(tc->task, NULL)) {
+		if (task_has_cpu(tc->task, NULL) && 
+		    (tc->processor >= 0) && 
+		    (tc->processor < NR_CPUS)) {
 			tt->panic_threads[tc->processor] = tc->task;
 			found++;
 		}
 	}
 
 	if (!found && !(kt->flags & SMP) &&
-	    (LKCD_DUMPFILE() || NETDUMP_DUMPFILE() || DISKDUMP_DUMPFILE())) 
+	    (LKCD_DUMPFILE() || NETDUMP_DUMPFILE() || 
+	     KDUMP_DUMPFILE() || DISKDUMP_DUMPFILE())) 
 		tt->panic_threads[0] = get_dumpfile_panic_task();
 }
 	
@@ -4331,7 +5574,7 @@
 void
 dump_task_table(int verbose)
 {
-	int i;
+	int i, j, more, nr_cpus;
 	struct task_context *tc;
 	char buf[BUFSIZE];
 	int others, wrap, flen;
@@ -4363,6 +5606,12 @@
                 fprintf(fp, "refresh_pid_hash_task_table()\n");
         else if (tt->refresh_task_table == refresh_hlist_task_table)
                 fprintf(fp, "refresh_hlist_task_table()\n");
+        else if (tt->refresh_task_table == refresh_hlist_task_table_v2)
+                fprintf(fp, "refresh_hlist_task_table_v2()\n");
+        else if (tt->refresh_task_table == refresh_hlist_task_table_v3)
+                fprintf(fp, "refresh_hlist_task_table_v3()\n");
+        else if (tt->refresh_task_table == refresh_active_task_table)
+                fprintf(fp, "refresh_active_task_table()\n");
 	else
 		fprintf(fp, "%lx\n", (ulong)tt->refresh_task_table);
 
@@ -4411,6 +5660,9 @@
         if (tt->flags & NO_TIMESPEC)
                 sprintf(&buf[strlen(buf)], 
 			"%sNO_TIMESPEC", others++ ? "|" : "");
+        if (tt->flags & ACTIVE_ONLY)
+                sprintf(&buf[strlen(buf)], 
+			"%sACTIVE_ONLY", others++ ? "|" : "");
 	sprintf(&buf[strlen(buf)], ")");
 
         if (strlen(buf) > 54)
@@ -4436,76 +5688,199 @@
 	fprintf(fp, "      last_mm_read: %lx\n", tt->last_mm_read);
 	fprintf(fp, "       task_struct: %lx\n", (ulong)tt->task_struct);
 	fprintf(fp, "         mm_struct: %lx\n", (ulong)tt->mm_struct);
+	fprintf(fp, "       init_pid_ns: %lx\n", tt->init_pid_ns);
 
 
-        fprintf(fp, "     panic_threads:");
-
 	wrap = sizeof(void *) == SIZEOF_32BIT ? 8 : 4;
 	flen = sizeof(void *) == SIZEOF_32BIT ? 8 : 16;
 
-        for (i = 0; i < NR_CPUS; i++) {
-                if ((i % wrap) == 0)
+	nr_cpus = kt->kernel_NR_CPUS ? kt->kernel_NR_CPUS : NR_CPUS;
+
+
+        fprintf(fp, "      idle_threads:");
+        for (i = 0; i < nr_cpus; i++) {
+		if (!tt->idle_threads) {
+			fprintf(fp, " (unused)");
+			break;
+		}
+                if ((i % wrap) == 0) {
+                        fprintf(fp, "\n        ");
+			for (j = i, more = FALSE; j < nr_cpus; j++) {
+				if (tt->idle_threads[j]) {
+					more = TRUE;
+					break;
+				}
+			}
+		}
+                fprintf(fp, "%.*lx ", flen, tt->idle_threads[i]);
+		if (!more) {
+			fprintf(fp, "...");
+			break;
+		}
+        }
+        fprintf(fp, "\n");
+
+	fprintf(fp, "        active_set:");
+	for (i = 0; i < nr_cpus; i++) {
+		if (!tt->active_set) {
+			fprintf(fp, " (unused)");
+			break;
+		}
+		if ((i % wrap) == 0) {
+	        	fprintf(fp, "\n        ");
+			for (j = i, more = FALSE; j < nr_cpus; j++) {
+				if (tt->active_set[j]) {
+					more = TRUE;
+					break;
+				}
+			}
+		}
+	        fprintf(fp, "%.*lx ", flen, tt->active_set[i]);
+		if (!more) {
+			fprintf(fp, "...");
+			break;
+		}
+	}
+	fprintf(fp, "\n");
+
+        fprintf(fp, "     panic_threads:");
+        for (i = 0; i < nr_cpus; i++) {
+		if (!tt->panic_threads) {
+			fprintf(fp, " (unused)");
+			break;
+		}
+                if ((i % wrap) == 0) {
                         fprintf(fp, "\n        ");
-                fprintf(fp, "%.*lx ", flen, tt->panic_threads[i]); 
+			for (j = i, more = FALSE; j < nr_cpus; j++) {
+				if (tt->panic_threads[j]) {
+					more = TRUE;
+					break;
+				}
+			}
+		}
+               	fprintf(fp, "%.*lx ", flen, tt->panic_threads[i]); 
+		if (!more) {
+			fprintf(fp, "...");
+			break;
+		}
         }
         fprintf(fp, "\n");
 
         fprintf(fp, "         panic_ksp:");
-        for (i = 0; i < NR_CPUS; i++) {
-                if ((i % wrap) == 0)
+        for (i = 0; i < nr_cpus; i++) {
+		if (!tt->panic_ksp) {
+			fprintf(fp, " (unused)");
+			break;
+		}
+                if ((i % wrap) == 0) {
                         fprintf(fp, "\n        ");
+			for (j = i, more = FALSE; j < nr_cpus; j++) {
+				if (tt->panic_ksp[j]) {
+					more = TRUE;
+					break;
+				}
+			}
+		}
                 fprintf(fp, "%.*lx ", flen, tt->panic_ksp[i]);
+		if (!more) {
+			fprintf(fp, "...");
+			break;
+		}
         }
         fprintf(fp, "\n");
 
         fprintf(fp, "       hardirq_ctx:");
-        for (i = 0; i < NR_CPUS; i++) {
-                if ((i % wrap) == 0)
+        for (i = 0; i < nr_cpus; i++) {
+		if (!tt->hardirq_ctx) {
+			fprintf(fp, " (unused)");
+			break;
+		}
+                if ((i % wrap) == 0) {
                         fprintf(fp, "\n        ");
+			for (j = i, more = FALSE; j < nr_cpus; j++) {
+				if (tt->hardirq_ctx[j]) {
+					more = TRUE;
+					break;
+				}
+			}
+		}
                 fprintf(fp, "%.*lx ", flen, tt->hardirq_ctx[i]);
+		if (!more) {
+			fprintf(fp, "...");
+			break;
+		}
         }
         fprintf(fp, "\n");
 
         fprintf(fp, "     hardirq_tasks:");
-        for (i = 0; i < NR_CPUS; i++) {
-                if ((i % wrap) == 0)
+        for (i = 0; i < nr_cpus; i++) {
+		if (!tt->hardirq_tasks) {
+			fprintf(fp, " (unused)");
+			break;
+		}
+                if ((i % wrap) == 0) {
                         fprintf(fp, "\n        ");
+			for (j = i, more = FALSE; j < nr_cpus; j++) {
+				if (tt->hardirq_tasks[j]) {
+					more = TRUE;
+					break;
+				}
+			}
+		}
                 fprintf(fp, "%.*lx ", flen, tt->hardirq_tasks[i]);
+		if (!more) {
+			fprintf(fp, "...");
+			break;
+		}
         }
         fprintf(fp, "\n");
 
         fprintf(fp, "       softirq_ctx:");
-        for (i = 0; i < NR_CPUS; i++) {
-                if ((i % wrap) == 0)
+        for (i = 0; i < nr_cpus; i++) {
+		if (!tt->softirq_ctx) {
+			fprintf(fp, " (unused)");
+			break;
+		}
+                if ((i % wrap) == 0) {
                         fprintf(fp, "\n        ");
+			for (j = i, more = FALSE; j < nr_cpus; j++) {
+				if (tt->softirq_ctx[j]) {
+					more = TRUE;
+					break;
+				}
+			}
+		}
                 fprintf(fp, "%.*lx ", flen, tt->softirq_ctx[i]);
+		if (!more) {
+			fprintf(fp, "...");
+			break;
+		}
         }
         fprintf(fp, "\n");
 
         fprintf(fp, "     softirq_tasks:");
-        for (i = 0; i < NR_CPUS; i++) {
-                if ((i % wrap) == 0)
+        for (i = 0; i < nr_cpus; i++) {
+		if (!tt->softirq_tasks) {
+			fprintf(fp, " (unused)");
+			break;
+		}
+                if ((i % wrap) == 0) {
                         fprintf(fp, "\n        ");
+			for (j = i, more = FALSE; j < nr_cpus; j++) {
+				if (tt->softirq_tasks[j]) {
+					more = TRUE;
+					break;
+				}
+			}
+		}
                 fprintf(fp, "%.*lx ", flen, tt->softirq_tasks[i]);
+		if (!more) {
+			fprintf(fp, "...");
+			break;
+		}
         }
         fprintf(fp, "\n");
 
-        fprintf(fp, "      idle_threads:");
-        for (i = 0; i < NR_CPUS; i++) {
-                if ((i % wrap) == 0)
-                        fprintf(fp, "\n        ");
-                fprintf(fp, "%.*lx ", flen, tt->idle_threads[i]);
-        }
-        fprintf(fp, "\n");
-
-	fprintf(fp, "        active_set:");
-	for (i = 0; i < NR_CPUS; i++) {
-		if ((i % wrap) == 0)
-	        	fprintf(fp, "\n        ");
-	        fprintf(fp, "%.*lx ", flen, tt->active_set[i]);
-	}
-	fprintf(fp, "\n");
-
 
 	if (!verbose)
 		return;
@@ -4546,6 +5921,9 @@
 	if ((tc->pid == 0) && !STREQ(tc->comm, pc->program_name))
 		return TRUE;
 
+        if (_ZOMBIE_ == TASK_STATE_UNINITIALIZED)
+                initialize_task_state();
+
 	if (IS_ZOMBIE(task) || IS_EXITING(task))
                 return FALSE;
 
@@ -4641,6 +6019,16 @@
 			cnt++;
 		else
                 	BZERO(tasklist, sizeof(ulong) * NR_CPUS);
+	} else if (OPENVZ()) {
+		runq = symbol_value("pcpu_info");
+		runqbuf = GETBUF(SIZE(pcpu_info));
+		for (i = 0; i < nr_cpus; i++, runq += SIZE(pcpu_info)) {
+			readmem(runq, KVADDR, runqbuf, SIZE(pcpu_info),
+				"pcpu info", FAULT_ON_ERROR);
+			tasklist[i] = ULONG(runqbuf + OFFSET(pcpu_info_idle));
+			if (IS_KVADDR(tasklist[i]))
+				cnt++;
+		}
 	}
 
 	if (runqbuf)
@@ -4661,6 +6049,8 @@
 {
 	if (kt->runq_siblings == 1)
 		return cpu;
+	else if (!(kt->__rq_idx))
+		return 0;
 	else
 		return kt->__rq_idx[cpu];
 }
@@ -4673,6 +6063,8 @@
 {
         if (kt->runq_siblings == 1)
                 return 0;
+	else if (!(kt->__cpu_idx))
+		return 0;
         else
                 return kt->__cpu_idx[cpu];
 }
@@ -4734,14 +6126,41 @@
 	} else if (symbol_exists("per_cpu__runqueues")) {
 		runq = symbol_value("per_cpu__runqueues");
 		per_cpu = TRUE;
-	} else
+	} else if (OPENVZ())
+		runq = symbol_value("pcpu_info");
+	else
 		return FALSE;
 
-        BZERO(tt->active_set, sizeof(ulong) * NR_CPUS);
+
+	if (!(tt->active_set = (ulong *)calloc(NR_CPUS, sizeof(ulong))))	
+		error(FATAL, "cannot malloc active_set array");
+
         runqbuf = GETBUF(SIZE(runqueue));
 	cnt = 0;
 
-	if (VALID_MEMBER(runqueue_curr) && per_cpu) {
+	if (OPENVZ()) {
+		ulong vcpu_struct; 
+		char *pcpu_info_buf, *vcpu_struct_buf;
+
+		pcpu_info_buf   = GETBUF(SIZE(pcpu_info));
+		vcpu_struct_buf = GETBUF(SIZE(vcpu_struct));
+
+		for (i = 0; i < kt->cpus; i++, runq += SIZE(pcpu_info)) {
+			readmem(runq, KVADDR, pcpu_info_buf, 
+				SIZE(pcpu_info), "pcpu_info", FAULT_ON_ERROR);
+			vcpu_struct= ULONG(pcpu_info_buf +
+				OFFSET(pcpu_info_vcpu));
+			readmem(vcpu_struct, KVADDR, vcpu_struct_buf, 
+				SIZE(vcpu_struct), "pcpu_info->vcpu",
+				FAULT_ON_ERROR);
+			tt->active_set[i] = ULONG(vcpu_struct_buf +
+				OFFSET(vcpu_struct_rq) + OFFSET(runqueue_curr));
+			if (IS_KVADDR(tt->active_set[i]))
+				cnt++;
+		}
+		FREEBUF(pcpu_info_buf);
+		FREEBUF(vcpu_struct_buf);
+	} else if (VALID_MEMBER(runqueue_curr) && per_cpu) {
                	for (i = 0; i < kt->cpus; i++) {
                         if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) {
                                 runq = symbol_value("per_cpu__runqueues") +
@@ -4759,7 +6178,8 @@
 				cnt++;
 		}
 	} else if (VALID_MEMBER(runqueue_curr)) {
-	        for (i = 0; i < NR_CPUS; i++, runq += SIZE(runqueue)) {
+	        for (i = 0; i < MAX(kt->cpus, kt->kernel_NR_CPUS); i++, 
+		    runq += SIZE(runqueue)) {
 	                readmem(runq, KVADDR, runqbuf,
 	                	SIZE(runqueue), "(old) runqueues curr",
 	                        FAULT_ON_ERROR);
@@ -4799,23 +6219,55 @@
                 tt->flags &= ~ACTIVE_SET;
 }
 
-#define RESOLVE_PANIC_AND_DIE_CALLERS()               \
-        if ((panic_task > (NO_TASK+1)) && !die_task)  \
-                return panic_task;                    \
-                                                      \
-        if (panic_task && die_task) {                 \
-                error(WARNING,                        \
-	"multiple active tasks have called die and/or panic\n\n"); \
-                return NO_TASK;                       \
-        }                                             \
-                                                      \
-        if (die_task > (NO_TASK+1))                   \
-                return die_task;                      \
-        else if (die_task == (NO_TASK+1))             \
-                error(WARNING,                        \
+#define RESOLVE_PANIC_AND_DIE_CALLERS()               		\
+	if (xen_panic_task) {					\
+                if (CRASHDEBUG(1))                              \
+                        error(INFO,                             \
+         "get_active_set_panic_task: %lx (xen_panic_event)\n",  \
+                                xen_panic_task);		\
+		return xen_panic_task;				\
+	}							\
+	if (crash_kexec_task) {					\
+		if (CRASHDEBUG(1))				\
+			error(INFO,				\
+	    "get_active_set_panic_task: %lx (crash_kexec)\n",   \
+				crash_kexec_task);	  	\
+		return crash_kexec_task;			\
+	}							\
+        if ((panic_task > (NO_TASK+1)) && !die_task) {		\
+		if (CRASHDEBUG(1))				\
+			fprintf(fp, 				\
+		    "get_active_set_panic_task: %lx (panic)\n", \
+				panic_task);			\
+                return panic_task;                    		\
+	}							\
+                                                      		\
+        if (panic_task && die_task) {                 		\
+		if ((panic_task > (NO_TASK+1)) &&               \
+		    (panic_task == die_task)) {                 \
+		        if (CRASHDEBUG(1))			\
+				fprintf(fp, 			\
+		    "get_active_set_panic_task: %lx (panic)\n", \
+					panic_task);		\
+			return panic_task;			\
+		}                                               \
+                error(WARNING,                        		\
+     "multiple active tasks have called die and/or panic\n\n"); \
+		goto no_panic_task_found;			\
+        }                                             		\
+                                                      		\
+        if (die_task > (NO_TASK+1)) {                 		\
+		if (CRASHDEBUG(1))				\
+			fprintf(fp, 				\
+		    "get_active_set_panic_task: %lx (die)\n", 	\
+				die_task);			\
+                return die_task;                      		\
+	}							\
+        else if (die_task == (NO_TASK+1))             		\
+                error(WARNING,                        		\
 	"multiple active tasks have called die\n\n"); 
 
-#define SEARCH_STACK_FOR_PANIC_AND_DIE_CALLERS()        \
+#define SEARCH_STACK_FOR_PANIC_DIE_AND_KEXEC_CALLERS()  \
 	while (fgets(buf, BUFSIZE, pc->tmpfile)) {      \
                 if (strstr(buf, " die+")) {             \
                         switch (die_task)               \
@@ -4833,12 +6285,30 @@
                         {                               \
                         case NO_TASK:                   \
                                 panic_task = task;      \
+				if (XENDUMP_DUMPFILE()) \
+					xendump_panic_hook(buf); \
                                 break;                  \
                         default:                        \
                                 panic_task = NO_TASK+1; \
                                 break;                  \
                         }                               \
                 }                                       \
+                if (strstr(buf, " crash_kexec+") ||     \
+                    strstr(buf, " .crash_kexec+")) {    \
+			crash_kexec_task = task;	\
+                }                                       \
+                if (strstr(buf, " machine_kexec+") ||     \
+                    strstr(buf, " .machine_kexec+")) {    \
+			crash_kexec_task = task;	\
+                }                                       \
+                if (strstr(buf, " xen_panic_event+") || \
+                    strstr(buf, " .xen_panic_event+")){ \
+			xen_panic_task = task;	        \
+			xendump_panic_hook(buf);	\
+		}					\
+                if (machine_type("IA64") && XENDUMP_DUMPFILE() && !xen_panic_task && \
+                    strstr(buf, " sysrq_handle_crashdump+")) \
+			xen_sysrq_task = task;	        \
 	}
 
 /*
@@ -4850,11 +6320,14 @@
 	int i, j, found;
 	ulong task;
 	char buf[BUFSIZE];
-	ulong panic_task, die_task;
+	ulong panic_task, die_task, crash_kexec_task;
+	ulong xen_panic_task;
+	ulong xen_sysrq_task;
 	char *tp;
 	struct task_context *tc;
 
-	panic_task = die_task = NO_TASK;
+	panic_task = die_task = crash_kexec_task = xen_panic_task = NO_TASK;
+	xen_sysrq_task = NO_TASK;
 
         for (i = 0; i < NR_CPUS; i++) {
                 if (!(task = tt->active_set[i]))
@@ -4867,15 +6340,16 @@
                 	if ((tp = fill_task_struct(task))) {
                         	if ((tc = store_context(NULL, task, tp))) 
                                 	tt->running_tasks++;
+				else
+					continue;
                 	}
-			continue;
 		}
 
         	open_tmpfile();
 		raw_stack_dump(GET_STACKBASE(task), STACKSIZE());
         	rewind(pc->tmpfile);
 
-		SEARCH_STACK_FOR_PANIC_AND_DIE_CALLERS();
+		SEARCH_STACK_FOR_PANIC_DIE_AND_KEXEC_CALLERS();
 
 		close_tmpfile();
         }
@@ -4903,7 +6377,7 @@
 			raw_stack_dump(tt->hardirq_ctx[i], SIZE(thread_union));
 	        	rewind(pc->tmpfile);
 	
-			SEARCH_STACK_FOR_PANIC_AND_DIE_CALLERS();
+			SEARCH_STACK_FOR_PANIC_DIE_AND_KEXEC_CALLERS();
 
 			close_tmpfile();
 	        }
@@ -4930,7 +6404,7 @@
 			raw_stack_dump(tt->softirq_ctx[i], SIZE(thread_union));
 	        	rewind(pc->tmpfile);
 	
-			SEARCH_STACK_FOR_PANIC_AND_DIE_CALLERS();
+			SEARCH_STACK_FOR_PANIC_DIE_AND_KEXEC_CALLERS();
 
 			close_tmpfile();
 	        }
@@ -4938,6 +6412,28 @@
 		RESOLVE_PANIC_AND_DIE_CALLERS();
 	} 
 
+	if (crash_kexec_task) {
+		if (CRASHDEBUG(1))
+			error(INFO,
+		    "get_active_set_panic_task: %lx (crash_kexec)\n", 
+				crash_kexec_task);
+		return crash_kexec_task;
+	}
+
+	if (xen_sysrq_task) {
+		if (CRASHDEBUG(1))
+			error(INFO,
+		    "get_active_set_panic_task: %lx (sysrq_handle_crashdump)\n", 
+				xen_sysrq_task);
+		return xen_sysrq_task;
+	}
+
+no_panic_task_found:
+
+	if (CRASHDEBUG(1)) 
+		error(INFO,
+		    "get_active_set_panic_task: failed\n");
+
 	return NO_TASK;
 }
 
@@ -4997,6 +6493,11 @@
 	ulong *tlist;
 	struct task_context *tc;
 
+	if (VALID_MEMBER(rq_cfs)) {
+		dump_CFS_runqueues();
+		return;
+	}
+ 
 	if (VALID_MEMBER(runqueue_arrays)) {
 		dump_runqueues();
 		return;
@@ -5017,120 +6518,370 @@
 		error(FATAL, 
 		    "cannot determine run queue structures being used\n");
 
-	cnt = 0;
-	do {
-		if (cnt == qlen) {
-			FREEBUF(tlist);
-			qlen += 1000;
-			goto start_again;
-		} 
+	cnt = 0;
+	do {
+		if (cnt == qlen) {
+			FREEBUF(tlist);
+			qlen += 1000;
+			goto start_again;
+		} 
+
+		tlist[cnt++] = next;
+
+                readmem(next+offs, KVADDR, &next, sizeof(void *), 
+			"run queue entry", FAULT_ON_ERROR);
+
+		if (next == runqueue_head)
+			break;
+	} while (next);
+
+	for (i = 0; i < cnt; i++) {
+		if (tlist[i] == runqueue_head)
+			continue;
+
+		if (!(tc = task_to_context(VIRTPAGEBASE(tlist[i])))) {
+			fprintf(fp, 
+			    	"PID: ?      TASK: %lx  CPU: ?   COMMAND: ?\n",
+					tlist[i]);
+			continue;
+		}
+
+		if (!is_idle_thread(tc->task))
+			print_task_header(fp, tc, 0);
+	}
+}
+
+#define RUNQ_ACTIVE  (1)
+#define RUNQ_EXPIRED (2)
+
+static void
+dump_runqueues(void)
+{
+	int cpu;
+	ulong runq, offset;
+	char *runqbuf;
+	ulong active, expired, arrays;
+	int per_cpu;
+
+
+        if (symbol_exists("runqueues")) {
+                runq = symbol_value("runqueues");
+                per_cpu = FALSE;
+        } else if (symbol_exists("per_cpu__runqueues")) {
+                runq = symbol_value("per_cpu__runqueues");
+                per_cpu = TRUE;
+        }
+
+        runqbuf = GETBUF(SIZE(runqueue));
+
+	for (cpu = 0; cpu < kt->cpus; cpu++, runq += SIZE(runqueue)) {
+		if (per_cpu) {
+			if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) {
+                 		runq = symbol_value("per_cpu__runqueues") +
+                        		kt->__per_cpu_offset[cpu];
+                 	} else
+                 		runq = symbol_value("per_cpu__runqueues");
+		}
+
+		fprintf(fp, "RUNQUEUES[%d]: %lx\n", cpu, runq);
+
+                readmem(runq, KVADDR, runqbuf, SIZE(runqueue), 
+			"runqueues array entry", FAULT_ON_ERROR);
+		active = ULONG(runqbuf + OFFSET(runqueue_active));
+		expired = ULONG(runqbuf + OFFSET(runqueue_expired));
+		arrays = runq + OFFSET(runqueue_arrays);
+
+		console("active: %lx\n", active);
+		console("expired: %lx\n", expired);
+		console("arrays: %lx\n", arrays);
+
+		offset = active == arrays ? OFFSET(runqueue_arrays) :
+			OFFSET(runqueue_arrays) + SIZE(prio_array);
+		offset = active - runq;
+		dump_prio_array(RUNQ_ACTIVE, active, &runqbuf[offset]);
+
+		offset = expired == arrays ? OFFSET(runqueue_arrays) :
+			OFFSET(runqueue_arrays) + SIZE(prio_array);
+		offset = expired - runq;
+		dump_prio_array(RUNQ_EXPIRED, expired, &runqbuf[offset]);
+	}
+}
+
+static void
+dump_prio_array(int which, ulong k_prio_array, char *u_prio_array)
+{
+	int i, c, cnt, qheads, nr_active;
+	ulong offset, kvaddr, uvaddr;
+	ulong list_head[2];
+        struct list_data list_data, *ld;
+	struct task_context *tc;
+	ulong *tlist;
+
+        qheads = (i = ARRAY_LENGTH(prio_array_queue)) ?
+                i : get_array_length("prio_array.queue", NULL, SIZE(list_head));
+
+	console("dump_prio_array[%d]: %lx %lx\n",
+		which, k_prio_array, (ulong)u_prio_array);
+
+	nr_active = INT(u_prio_array + OFFSET(prio_array_nr_active));
+	console("nr_active: %d\n", nr_active);
+
+	fprintf(fp, " %s PRIO_ARRAY: %lx\n",  
+		which == RUNQ_ACTIVE ? "ACTIVE" : "EXPIRED", k_prio_array);
+
+	ld = &list_data;
+
+	for (i = 0; i < 140; i++) {
+		offset =  OFFSET(prio_array_queue) + (i * SIZE(list_head));
+		kvaddr = k_prio_array + offset;
+		uvaddr = (ulong)u_prio_array + offset;
+		BCOPY((char *)uvaddr, (char *)&list_head[0], sizeof(ulong)*2);
+
+		if (CRASHDEBUG(1))
+			fprintf(fp, "prio_array[%d] @ %lx => %lx/%lx\n", 
+				i, kvaddr, list_head[0], list_head[1]);
+
+		if ((list_head[0] == kvaddr) && (list_head[1] == kvaddr))
+			continue;
+
+		console("[%d] %lx => %lx-%lx ", i, kvaddr, list_head[0],
+			list_head[1]);
+
+		fprintf(fp, "  [%3d] ", i);
+
+		BZERO(ld, sizeof(struct list_data));
+		ld->start = list_head[0];
+		ld->list_head_offset = OFFSET(task_struct_run_list);
+		ld->end = kvaddr;
+		hq_open();
+		cnt = do_list(ld);
+		hq_close();
+		console("%d entries\n", cnt);
+        	tlist = (ulong *)GETBUF((cnt) * sizeof(ulong));
+		cnt = retrieve_list(tlist, cnt);
+		for (c = 0; c < cnt; c++) {
+			if (!(tc = task_to_context(tlist[c])))
+				continue;
+			if (c)
+				INDENT(8);
+			print_task_header(fp, tc, FALSE);
+		}
+		FREEBUF(tlist);
+	}
+}
+
+/*
+ *  CFS scheduler uses Red-Black trees to maintain run queue.
+ */
+struct rb_node
+{
+        unsigned long  rb_parent_color;
+#define RB_RED          0
+#define RB_BLACK        1
+        struct rb_node *rb_right;
+        struct rb_node *rb_left;
+};
+
+struct rb_root
+{
+        struct rb_node *rb_node;
+};
+
+static struct rb_node *
+rb_first(struct rb_root *root)
+{
+        struct rb_root rloc;
+        struct rb_node *n;
+	struct rb_node nloc;
+
+	readmem((ulong)root, KVADDR, &rloc, sizeof(struct rb_root), 
+		"rb_root", FAULT_ON_ERROR);
+
+        n = rloc.rb_node;
+        if (!n)
+                return NULL;
+        while (rb_left(n, &nloc))
+		n = nloc.rb_left;
+
+        return n;
+}
+
+static struct rb_node *
+rb_parent(struct rb_node *node, struct rb_node *nloc)
+{
+	readmem((ulong)node, KVADDR, nloc, sizeof(struct rb_node), 
+		"rb_node", FAULT_ON_ERROR);
+
+	return (struct rb_node *)(nloc->rb_parent_color & ~3);
+}
+
+static struct rb_node *
+rb_right(struct rb_node *node, struct rb_node *nloc)
+{
+	readmem((ulong)node, KVADDR, nloc, sizeof(struct rb_node), 
+		"rb_node", FAULT_ON_ERROR);
+
+	return nloc->rb_right;
+}
 
-		tlist[cnt++] = next;
+static struct rb_node *
+rb_left(struct rb_node *node, struct rb_node *nloc)
+{
+	readmem((ulong)node, KVADDR, nloc, sizeof(struct rb_node), 
+		"rb_node", FAULT_ON_ERROR);
 
-                readmem(next+offs, KVADDR, &next, sizeof(void *), 
-			"run queue entry", FAULT_ON_ERROR);
+	return nloc->rb_left;
+}
 
-		if (next == runqueue_head)
-			break;
-	} while (next);
+static struct rb_node *
+rb_next(struct rb_node *node)
+{
+	struct rb_node nloc;
+        struct rb_node *parent;
 
-	for (i = 0; i < cnt; i++) {
-		if (tlist[i] == runqueue_head)
-			continue;
+	parent = rb_parent(node, &nloc);
 
-		if (!(tc = task_to_context(VIRTPAGEBASE(tlist[i])))) {
-			fprintf(fp, 
-			    	"PID: ?      TASK: %lx  CPU: ?   COMMAND: ?\n",
-					tlist[i]);
-			continue;
-		}
+	if (parent == node)
+		return NULL;
 
-		if (!is_idle_thread(tc->task))
-			print_task_header(fp, tc, 0);
+        if (nloc.rb_right) {
+		node = nloc.rb_right;
+		while (rb_left(node, &nloc))
+			node = nloc.rb_left;
+		return node;
 	}
-}
 
-#define RUNQ_ACTIVE  (1)
-#define RUNQ_EXPIRED (2)
+        while ((parent = rb_parent(node, &nloc)) && (node == rb_right(parent, &nloc)))
+                node = parent;
+
+        return parent;
+}
 
 static void
-dump_runqueues(void)
+dump_CFS_runqueues(void)
 {
 	int cpu;
-	ulong runq, offset;
-	char *runqbuf;
-	ulong active, expired, arrays;
-	int per_cpu;
+	ulong runq, cfs_rq;
+	char *runqbuf, *cfs_rq_buf;
+	ulong leftmost, tasks_timeline;
+	struct task_context *tc;
+	long nr_running, cfs_rq_nr_running;
+	struct rb_root *root;
+	struct rb_node *node;
+
+	if (!VALID_STRUCT(cfs_rq)) {
+		STRUCT_SIZE_INIT(cfs_rq, "cfs_rq");
+		MEMBER_OFFSET_INIT(rq_rt, "rq", "rt");
+		MEMBER_OFFSET_INIT(rq_nr_running, "rq", "nr_running");
+		MEMBER_OFFSET_INIT(task_struct_se, "task_struct", "se");
+		MEMBER_OFFSET_INIT(sched_entity_run_node, "sched_entity", 
+			"run_node");
+		MEMBER_OFFSET_INIT(cfs_rq_rb_leftmost, "cfs_rq", "rb_leftmost");
+		MEMBER_OFFSET_INIT(cfs_rq_nr_running, "cfs_rq", "nr_running");
+		MEMBER_OFFSET_INIT(cfs_rq_tasks_timeline, "cfs_rq", 
+			"tasks_timeline");
+		MEMBER_OFFSET_INIT(rt_rq_active, "rt_rq", "active");
+                MEMBER_OFFSET_INIT(task_struct_run_list, "task_struct",
+                        "run_list");
+	}
 
+	if (!symbol_exists("per_cpu__runqueues"))
+		error(FATAL, "per_cpu__runqueues does not exist\n");
 
-        if (symbol_exists("runqueues")) {
-                runq = symbol_value("runqueues");
-                per_cpu = FALSE;
-        } else if (symbol_exists("per_cpu__runqueues")) {
-                runq = symbol_value("per_cpu__runqueues");
-                per_cpu = TRUE;
-        }
+        runq = symbol_value("per_cpu__runqueues");
 
         runqbuf = GETBUF(SIZE(runqueue));
+	cfs_rq_buf = symbol_exists("per_cpu__init_cfs_rq") ?
+		GETBUF(SIZE(cfs_rq)) : NULL;
 
-	for (cpu = 0; cpu < kt->cpus; cpu++, runq += SIZE(runqueue)) {
-		if (per_cpu) {
-			if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) {
-                 		runq = symbol_value("per_cpu__runqueues") +
-                        		kt->__per_cpu_offset[cpu];
-                 	} else
-                 		runq = symbol_value("per_cpu__runqueues");
-		}
+        for (cpu = 0; cpu < kt->cpus; cpu++) {
+		if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) {
+			runq = symbol_value("per_cpu__runqueues") +
+				kt->__per_cpu_offset[cpu];
+		} else
+			runq = symbol_value("per_cpu__runqueues");
 
-		fprintf(fp, "RUNQUEUES[%d]: %lx\n", cpu, runq);
+                fprintf(fp, "RUNQUEUES[%d]: %lx\n", cpu, runq);
+                readmem(runq, KVADDR, runqbuf, SIZE(runqueue),
+                        "per-cpu rq", FAULT_ON_ERROR);
+
+		if (cfs_rq_buf) {
+			/*
+		 	 *  Use default task group's cfs_rq on each cpu.
+		 	 */
+			if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) {
+				cfs_rq = symbol_value("per_cpu__init_cfs_rq") +
+					kt->__per_cpu_offset[cpu];
+			} else
+				cfs_rq = symbol_value("per_cpu__init_cfs_rq");
 
-                readmem(runq, KVADDR, runqbuf, SIZE(runqueue), 
-			"runqueues array entry", FAULT_ON_ERROR);
-		active = ULONG(runqbuf + OFFSET(runqueue_active));
-		expired = ULONG(runqbuf + OFFSET(runqueue_expired));
-		arrays = runq + OFFSET(runqueue_arrays);
+			readmem(cfs_rq, KVADDR, cfs_rq_buf, SIZE(cfs_rq),
+				"per-cpu cfs_rq", FAULT_ON_ERROR);
+	                leftmost = ULONG(cfs_rq_buf + OFFSET(cfs_rq_rb_leftmost));
+	                tasks_timeline = ULONG(cfs_rq_buf + 
+				OFFSET(cfs_rq_tasks_timeline));
+			nr_running = LONG(cfs_rq_buf + OFFSET(rq_nr_running));
+	                cfs_rq_nr_running = ULONG(cfs_rq_buf + 
+				OFFSET(cfs_rq_nr_running));
+			root = (struct rb_root *)(cfs_rq + 
+				OFFSET(cfs_rq_tasks_timeline));
+		} else {
+	                leftmost = ULONG(runqbuf + OFFSET(rq_cfs) + 
+				OFFSET(cfs_rq_rb_leftmost));
+	                tasks_timeline = ULONG(runqbuf + OFFSET(rq_cfs) + 
+				OFFSET(cfs_rq_tasks_timeline));
+			nr_running = LONG(runqbuf + OFFSET(rq_nr_running));
+	                cfs_rq_nr_running = ULONG(runqbuf + OFFSET(rq_cfs) + 
+				OFFSET(cfs_rq_nr_running));
+			root = (struct rb_root *)(runq + OFFSET(rq_cfs) + 
+				OFFSET(cfs_rq_tasks_timeline));
+		}
+
+		dump_RT_prio_array(nr_running != cfs_rq_nr_running,
+			runq + OFFSET(rq_rt) + OFFSET(rt_rq_active), 
+			&runqbuf[OFFSET(rq_rt) + OFFSET(rt_rq_active)]);
 
-		console("active: %lx\n", active);
-		console("expired: %lx\n", expired);
-		console("arrays: %lx\n", arrays);
+		fprintf(fp, " CFS RB_ROOT: %lx\n", (ulong)root);
 
-		offset = active == arrays ? OFFSET(runqueue_arrays) :
-			OFFSET(runqueue_arrays) + SIZE(prio_array);
-		offset = active - runq;
-		dump_prio_array(RUNQ_ACTIVE, active, &runqbuf[offset]);
+		if (!leftmost)
+			continue;
 
-		offset = expired == arrays ? OFFSET(runqueue_arrays) :
-			OFFSET(runqueue_arrays) + SIZE(prio_array);
-		offset = expired - runq;
-		dump_prio_array(RUNQ_EXPIRED, expired, &runqbuf[offset]);
+		for (node = rb_first(root); node; node = rb_next(node)) {
+			tc = task_to_context((ulong)node - OFFSET(task_struct_se) -
+			     OFFSET(sched_entity_run_node));
+			if (!tc)
+				continue;
+			INDENT(2);
+			print_task_header(fp, tc, FALSE);
+		}
 	}
+
+	FREEBUF(runqbuf);
+	if (cfs_rq_buf)
+		FREEBUF(cfs_rq_buf);
 }
 
 static void
-dump_prio_array(int which, ulong k_prio_array, char *u_prio_array)
+dump_RT_prio_array(int active, ulong k_prio_array, char *u_prio_array)
 {
-	int i, c, cnt, qheads, nr_active;
+	int i, c, cnt, qheads;
 	ulong offset, kvaddr, uvaddr;
 	ulong list_head[2];
         struct list_data list_data, *ld;
 	struct task_context *tc;
 	ulong *tlist;
 
-        qheads = (i = ARRAY_LENGTH(prio_array_queue)) ?
-                i : get_array_length("prio_array.queue", NULL, SIZE(list_head));
-
-	console("dump_prio_array[%d]: %lx %lx\n",
-		which, k_prio_array, (ulong)u_prio_array);
+	fprintf(fp, " RT PRIO_ARRAY: %lx\n",  k_prio_array);
 
-	nr_active = INT(u_prio_array + OFFSET(prio_array_nr_active));
-	console("nr_active: %d\n", nr_active);
+	if (!active)
+		return;
 
-	fprintf(fp, " %s PRIO_ARRAY: %lx\n",  
-		which == RUNQ_ACTIVE ? "ACTIVE" : "EXPIRED", k_prio_array);
+        qheads = (i = ARRAY_LENGTH(prio_array_queue)) ?
+                i : get_array_length("prio_array.queue", NULL, SIZE(list_head));
 
 	ld = &list_data;
 
-	for (i = 0; i < 140; i++) {
+	for (i = 0; i < qheads; i++) {
 		offset =  OFFSET(prio_array_queue) + (i * SIZE(list_head));
 		kvaddr = k_prio_array + offset;
 		uvaddr = (ulong)u_prio_array + offset;
@@ -5143,9 +6894,6 @@
 		if ((list_head[0] == kvaddr) && (list_head[1] == kvaddr))
 			continue;
 
-		console("[%d] %lx => %lx-%lx ", i, kvaddr, list_head[0],
-			list_head[1]);
-
 		fprintf(fp, "  [%3d] ", i);
 
 		BZERO(ld, sizeof(struct list_data));
@@ -5155,8 +6903,7 @@
 		hq_open();
 		cnt = do_list(ld);
 		hq_close();
-		console("%d entries\n", cnt);
-        	tlist = (ulong *)GETBUF((cnt) * sizeof(ulong));
+		tlist = (ulong *)GETBUF((cnt) * sizeof(ulong));
 		cnt = retrieve_list(tlist, cnt);
 		for (c = 0; c < cnt; c++) {
 			if (!(tc = task_to_context(tlist[c])))
@@ -5174,6 +6921,9 @@
 #define _NSIG_BPW       machdep->bits
 #define _NSIG_WORDS     (_NSIG / _NSIG_BPW)
 
+#undef SIGRTMIN
+#define SIGRTMIN	32
+
 static struct signame {
         char *name;
         char *altname;
@@ -5209,23 +6959,56 @@
     /* 28 */  {"SIGWINCH",   NULL},
     /* 29 */  {"SIGIO",      "SIGPOLL"},
     /* 30 */  {"SIGPWR",     NULL},
-    /* 31 */  {"SIGSYS",     NULL},
+    /* 31 */  {"SIGSYS",     "SIGUNUSED"},
               {NULL,         NULL},    /* Real time signals start here. */
 };
 
+static int
+sigrt_minmax(int *min, int *max) 
+{
+	int sigrtmax, j;
+
+	sigrtmax = THIS_KERNEL_VERSION < LINUX(2,5,0) ? 
+		_NSIG - 1  : _NSIG;
+
+	if (min && max) {
+		j = sigrtmax-SIGRTMIN-1;
+		*max = j / 2;
+		*min = j - *max;
+	}
+
+	return sigrtmax;
+}
+
 static void
 signame_list(void)
 {
-	int i;
+	int i, sigrtmax, j, min, max;
 
-        for (i = 0; i < _NSIG; i++) {
-                if (!signame[i].name)
-                        continue;
+	sigrtmax = sigrt_minmax(&min, &max);
+	j = 1;
+
+        for (i = 1; i <= sigrtmax; i++) {
+		if ((i == SIGRTMIN) || (i == sigrtmax)) {
+			fprintf(fp, "[%d] %s", i, 
+			    (i== SIGRTMIN) ? "SIGRTMIN" : "SIGRTMAX");
+		} else if (i > SIGRTMIN) {
+			if (j <= min){
+				fprintf(fp, "[%d] %s%d", i , "SIGRTMIN+", j);
+				j++;
+			} else if (max >= 1) {
+				fprintf(fp, "[%d] %s%d", i , "SIGRTMAX-",max);
+				max--;
+			}
+		} else {
+                	if (!signame[i].name)
+                        	continue;
 
-                fprintf(fp, "%s[%d] %s", i < 10 ? " " : "", 
-			i, signame[i].name);
-		if (signame[i].altname)
-			fprintf(fp, "/%s",  signame[i].altname);
+                	fprintf(fp, "%s[%d] %s", i < 10 ? " " : "", 
+				i, signame[i].name);
+			if (signame[i].altname)
+				fprintf(fp, "/%s",  signame[i].altname);
+		}
 		fprintf(fp, "\n");
         }
 }
@@ -5236,8 +7019,7 @@
 static void 
 translate_sigset(ulonglong sigset)
 {
-	int i, c, bit, len;
-	ulonglong mask, sig;
+	int sigrtmax, min, max, i, j, c, len;
 	char buf[BUFSIZE];
 
 	if (!sigset) {
@@ -5246,21 +7028,42 @@
 	}
 
 	len = 0;
+	sigrtmax= sigrt_minmax(&min, &max);
+	j = 1;
+
+        for (i = 1, c = 0; i <= sigrtmax; i++) {
+		if (sigset & (ulonglong)1) {
+			if (i == SIGRTMIN || i == sigrtmax)
+				sprintf(buf, "%s%s", c++ ? " " : "", 
+					(i==SIGRTMIN) ? "SIGRTMIN" : "SIGRTMAX");
+			else if (i > SIGRTMIN) {
+				if (j <= min)
+					sprintf(buf, "%s%s%d", 
+						c++ ? " " : "", "SIGRTMIN+", j);
+				else if (max >= 1)
+					sprintf(buf, "%s%s%d", 
+						c++ ? " " : "", "SIGRTMAX-", max);
+			} else
+				sprintf(buf, "%s%s", c++ ? " " : "", 
+					signame[i].name);
 
-        for (i = c = 0; i < (_NSIG/2); i++) {
-              	mask = (ulong)(1) << i;
-		if ((sig = (sigset & mask))) {
-			bit = ffs((int)sig);
-			sprintf(buf, "%s%s", c++ ? " " : "", 
-				signame[bit].name);
 			if ((len + strlen(buf)) > 80) {
 				shift_string_left(buf, 1);
 				fprintf(fp,  "\n");
 				len = 0;
 			}
+
 			len += strlen(buf);
 			fprintf(fp, buf);
 		}
+
+		sigset >>= 1;
+		if (i > SIGRTMIN) {
+			if (j <= min) 
+				j++;
+			else if (max >= 1)
+				max--;
+		}	
 	}
 	fprintf(fp, "\n");
 }
@@ -5290,13 +7093,14 @@
 	struct task_context *tc;
 	ulong *tasklist;
 	char *siglist;
+	int thread_group = FALSE;
 
 	tasklist = (ulong *)GETBUF((MAXARGS+NR_CPUS)*sizeof(ulong));
 	ref = (struct reference *)GETBUF(sizeof(struct reference));
 	siglist = GETBUF(BUFSIZE);
 	ref->str = siglist;
 
-        while ((c = getopt(argcnt, args, "lR:s:")) != EOF) {
+        while ((c = getopt(argcnt, args, "lR:s:g")) != EOF) {
                 switch(c)
 		{
 		case 's':
@@ -5314,6 +7118,10 @@
 			signame_list();
 			return;
 
+		case 'g':
+			pc->curcmd_flags |= TASK_SPECIFIED;
+			thread_group = TRUE;
+			break;
 		default:
 			argerrs++;
 			break;
@@ -5360,10 +7168,65 @@
 		tasklist[tcnt++] = CURRENT_TASK();
 
 	for (c = 0; c < tcnt; c++) {
-		do_sig(tasklist[c], 0, strlen(ref->str) ? ref : NULL);
-		fprintf(fp, "\n");
+		if (thread_group)
+			do_sig_thread_group(tasklist[c]);
+		else {
+			do_sig(tasklist[c], 0, strlen(ref->str) ? ref : NULL);
+			fprintf(fp, "\n");
+		}
+	}
+
+}
+
+
+/*
+ *  Do the work for the "sig -g" command option, coming from sig or foreach.
+ */
+static void
+do_sig_thread_group(ulong task)
+{
+        int i;
+        int cnt;
+        struct task_context *tc;
+	ulong tgid;
+
+        tc = task_to_context(task);
+	tgid = task_tgid(task);
+
+	if (tc->pid != tgid) {
+		if (pc->curcmd_flags & TASK_SPECIFIED) {
+			if (!(tc = tgid_to_context(tgid))) 
+				return;
+			task = tc->task;
+		} else 
+			return;
 	}
 
+	if ((tc->pid == 0) && (pc->curcmd_flags & IDLE_TASK_SHOWN))
+		return;
+
+       	print_task_header(fp, tc, 0);
+	dump_signal_data(tc, THREAD_GROUP_LEVEL);
+	fprintf(fp, "\n  ");
+	print_task_header(fp, tc, 0);
+	dump_signal_data(tc, TASK_LEVEL|TASK_INDENT);
+
+	tc = FIRST_CONTEXT();
+        for (i = cnt = 0; i < RUNNING_TASKS(); i++, tc++) {
+		if (tc->task == task)
+			continue;
+
+		if (task_tgid(tc->task)	== tgid) {
+			fprintf(fp, "\n  ");
+                        print_task_header(fp, tc, 0);
+			dump_signal_data(tc, TASK_LEVEL|TASK_INDENT);
+                        cnt++;
+			if (tc->pid == 0)
+				pc->curcmd_flags |= IDLE_TASK_SHOWN;
+                }
+        }
+
+	fprintf(fp, "\n");
 }
 
 /*
@@ -5381,7 +7244,7 @@
         else {
                 if (!(flags & FOREACH_TASK))
                         print_task_header(fp, tc, 0);
-                dump_signal_data(tc);
+                dump_signal_data(tc, TASK_LEVEL|THREAD_GROUP_LEVEL);
         }
 }
 
@@ -5401,40 +7264,34 @@
  *  Dump all signal-handling data for a task.
  */
 static void
-dump_signal_data(struct task_context *tc)
+dump_signal_data(struct task_context *tc, ulong flags)
 {
-	int i, others, use_sighand;
-	int translate, sig, sigpending;
+	int i, sigrtmax, others, use_sighand;
+	int translate, sigpending;
 	uint ti_flags;
 	ulonglong sigset, blocked, mask;
-	ulong signal_struct, kaddr, handler, flags, sigqueue, next;
+	ulong signal_struct, kaddr, handler, sa_flags, sigqueue;
 	ulong sighand_struct;
 	long size;
 	char *signal_buf, *uaddr;
+	ulong shared_pending, signal;
 	char buf1[BUFSIZE];
 	char buf2[BUFSIZE];
 	char buf3[BUFSIZE];
 	char buf4[BUFSIZE];
 
-	sigset = task_signal(tc->task);
+        if (VALID_STRUCT(sigqueue) && !VALID_MEMBER(sigqueue_next)) {
+                MEMBER_OFFSET_INIT(sigqueue_next, "sigqueue", "next");
+                MEMBER_OFFSET_INIT(sigqueue_list, "sigqueue", "list");
+                MEMBER_OFFSET_INIT(sigqueue_info, "sigqueue", "info");
+        } else if (!VALID_MEMBER(signal_queue_next)) {
+                MEMBER_OFFSET_INIT(signal_queue_next, "signal_queue", "next");
+                MEMBER_OFFSET_INIT(signal_queue_info, "signal_queue", "info");
+        }
+
+	sigset = task_signal(tc->task, 0);
 	if (!tt->last_task_read)
 		return;
-	blocked = task_blocked(tc->task);
-
-	if (VALID_MEMBER(task_struct_sigpending))
-		sigpending = INT(tt->task_struct + 
-			OFFSET(task_struct_sigpending));
-	else if (VALID_MEMBER(thread_info_flags)) {
-		fill_thread_info(tc->thread_info);
-		ti_flags = UINT(tt->thread_info + OFFSET(thread_info_flags));
-		sigpending = ti_flags & (1<<TIF_SIGPENDING);
-	}
-	
-	fprintf(fp, "SIGPENDING: %s\n", sigpending ? "yes" : "no");
-		
-	fprintf(fp, "    SIGNAL: %016llx\n", sigset);
-
-	fprintf(fp, "   BLOCKED: %016llx\n", blocked);
 
 	if (VALID_MEMBER(task_struct_sig))
 		signal_struct = ULONG(tt->task_struct + 
@@ -5443,143 +7300,259 @@
 		signal_struct = ULONG(tt->task_struct + 
 			OFFSET(task_struct_signal));
 
-	fprintf(fp, "SIGNAL_STRUCT: %lx  ", signal_struct);
-
 	size = MAX(SIZE(signal_struct), VALID_SIZE(signal_queue) ?  
 		SIZE(signal_queue) : SIZE(sigqueue));
 	if (VALID_SIZE(sighand_struct))
 		size = MAX(size, SIZE(sighand_struct));
 	signal_buf = GETBUF(size);
 
-	readmem(signal_struct, KVADDR, signal_buf,
-		SIZE(signal_struct), "signal_struct buffer",
-		FAULT_ON_ERROR);
-	fprintf(fp, "COUNT: %d\n",
-		INT(signal_buf + OFFSET(signal_struct_count)));
-
-	fprintf(fp, " SIG %s %s %s %s\n",
-		mkstring(buf1, VADDR_PRLEN == 8 ? 9 : VADDR_PRLEN, 
-			CENTER, "SIGACTION"),
+	if (signal_struct)
+		readmem(signal_struct, KVADDR, signal_buf,
+			SIZE(signal_struct), "signal_struct buffer",
+			FAULT_ON_ERROR);
+
+	/*
+	 *  Signal dispositions (thread group level).
+	 */
+	if (flags & THREAD_GROUP_LEVEL) {
+		if (flags & TASK_INDENT)
+			INDENT(2);
+		fprintf(fp, "SIGNAL_STRUCT: %lx  ", signal_struct);
+		if (!signal_struct) {
+			fprintf(fp, "\n");
+			return;
+		}
+		fprintf(fp, "COUNT: %d\n",
+			INT(signal_buf + OFFSET(signal_struct_count)));
+
+		if (flags & TASK_INDENT)
+			INDENT(2);
+		fprintf(fp, " SIG %s %s %s %s\n",
+			mkstring(buf1, VADDR_PRLEN == 8 ? 9 : VADDR_PRLEN, 
+				CENTER, "SIGACTION"),
 		mkstring(buf2, UVADDR_PRLEN, RJUST, "HANDLER"),
 		mkstring(buf3, 16, CENTER, "MASK"),
 		mkstring(buf4, VADDR_PRLEN, LJUST, "FLAGS"));
 
-	if (VALID_MEMBER(task_struct_sighand)) {
-		sighand_struct = ULONG(tt->task_struct +
-                        OFFSET(task_struct_sighand));
-		readmem(sighand_struct, KVADDR, signal_buf,
-			SIZE(sighand_struct), "sighand_struct buffer",
-			FAULT_ON_ERROR);
-		use_sighand = TRUE;
-	} else
-		use_sighand = FALSE;
-
-        for (i = 1; i < _NSIG; i++) {
-                fprintf(fp, "%s[%d] ", i < 10 ? " " : "", i);
-
-		if (use_sighand) {
-			kaddr = sighand_struct + OFFSET(sighand_struct_action) +
-				((i-1) * SIZE(k_sigaction));
-			uaddr = signal_buf + OFFSET(sighand_struct_action) +
-				((i-1) * SIZE(k_sigaction));
-		} else {
-			kaddr = signal_struct + OFFSET(signal_struct_action) +
-				((i-1) * SIZE(k_sigaction));
-			uaddr = signal_buf + OFFSET(signal_struct_action) +
-				((i-1) * SIZE(k_sigaction));
-		}
+		if (VALID_MEMBER(task_struct_sighand)) {
+			sighand_struct = ULONG(tt->task_struct +
+	                        OFFSET(task_struct_sighand));
+			readmem(sighand_struct, KVADDR, signal_buf,
+				SIZE(sighand_struct), "sighand_struct buffer",
+				FAULT_ON_ERROR);
+			use_sighand = TRUE;
+		} else
+			use_sighand = FALSE;
 
-		handler = ULONG(uaddr + OFFSET(sigaction_sa_handler));
-		switch ((long)handler)
-		{
-		case -1:
-			mkstring(buf1, UVADDR_PRLEN, RJUST, "SIG_ERR");
-			break;
-		case 0:
-			mkstring(buf1, UVADDR_PRLEN, RJUST, "SIG_DFL");
-			break;
-		case 1:
-			mkstring(buf1, UVADDR_PRLEN, RJUST, "SIG_IGN");
-			break;
-		default:
-			mkstring(buf1, UVADDR_PRLEN, RJUST|LONG_HEX,
-                                    MKSTR(handler));
-			break;
-		}
+		sigrtmax = sigrt_minmax(NULL, NULL);
 
-		mask = sigaction_mask((ulong)uaddr);
-		flags = ULONG(uaddr + OFFSET(sigaction_sa_flags));
+	        for (i = 1; i <= sigrtmax; i++) {
+			if (flags & TASK_INDENT)
+				INDENT(2);
 
-		fprintf(fp, "%s%s %s %016llx %lx ",
-			space(MINSPACE-1), 
-			mkstring(buf2,UVADDR_PRLEN,LJUST|LONG_HEX,MKSTR(kaddr)),
-			buf1,
-			mask,
-			flags);
-
-		if (flags) {
-			others = 0; translate = 1;
-			if (flags & SA_NOCLDSTOP)
-				fprintf(fp, "%s%sSA_NOCLDSTOP",
-					translate-- > 0 ? "(" : "",
-					others++ ? "|" : "");
+	                fprintf(fp, "%s[%d] ", i < 10 ? " " : "", i);
+	
+			if (use_sighand) {
+				kaddr = sighand_struct + 
+					OFFSET(sighand_struct_action) +
+					((i-1) * SIZE(k_sigaction));
+				uaddr = signal_buf + 
+					OFFSET(sighand_struct_action) +
+					((i-1) * SIZE(k_sigaction));
+			} else {
+				kaddr = signal_struct + 
+					OFFSET(signal_struct_action) +
+					((i-1) * SIZE(k_sigaction));
+				uaddr = signal_buf + 
+					OFFSET(signal_struct_action) +
+					((i-1) * SIZE(k_sigaction));
+			}
+	
+			handler = ULONG(uaddr + OFFSET(sigaction_sa_handler));
+			switch ((long)handler)
+			{
+			case -1:
+				mkstring(buf1, UVADDR_PRLEN, RJUST, "SIG_ERR");
+				break;
+			case 0:
+				mkstring(buf1, UVADDR_PRLEN, RJUST, "SIG_DFL");
+				break;
+			case 1:
+				mkstring(buf1, UVADDR_PRLEN, RJUST, "SIG_IGN");
+				break;
+			default:
+				mkstring(buf1, UVADDR_PRLEN, RJUST|LONG_HEX,
+	                                    MKSTR(handler));
+				break;
+			}
+	
+			mask = sigaction_mask((ulong)uaddr);
+			sa_flags = ULONG(uaddr + OFFSET(sigaction_sa_flags));
+	
+			fprintf(fp, "%s%s %s %016llx %lx ",
+				space(MINSPACE-1), 
+				mkstring(buf2,
+				UVADDR_PRLEN,LJUST|LONG_HEX,MKSTR(kaddr)),
+				buf1,
+				mask,
+				sa_flags);
+	
+			if (sa_flags) {
+				others = 0; translate = 1;
+				if (sa_flags & SA_NOCLDSTOP)
+					fprintf(fp, "%s%sSA_NOCLDSTOP",
+						translate-- > 0 ? "(" : "",
+						others++ ? "|" : "");
 #ifdef SA_RESTORER
-                        if (flags & SA_RESTORER)
-                                fprintf(fp, "%s%sSA_RESTORER",
-                                        translate-- > 0 ? "(" : "",
-                                        others++ ? "|" : "");
+	                        if (sa_flags & SA_RESTORER)
+	                                fprintf(fp, "%s%sSA_RESTORER",
+	                                        translate-- > 0 ? "(" : "",
+	                                        others++ ? "|" : "");
 #endif
 #ifdef SA_NOCLDWAIT
-			if (flags & SA_NOCLDWAIT)
-				fprintf(fp, "%s%sSA_NOCLDWAIT", 
-					translate-- > 0 ? "(" : "",
-					others++ ? "|" : "");
+				if (sa_flags & SA_NOCLDWAIT)
+					fprintf(fp, "%s%sSA_NOCLDWAIT", 
+						translate-- > 0 ? "(" : "",
+						others++ ? "|" : "");
 #endif
-			if (flags & SA_SIGINFO)
-				fprintf(fp, "%s%sSA_SIGINFO", 
-					translate-- > 0 ? "(" : "",
-					others++ ? "|" : "");
-			if (flags & SA_ONSTACK)
-				fprintf(fp, "%s%sSA_ONSTACK", 
-					translate-- > 0 ? "(" : "",
-					others++ ? "|" : "");
-			if (flags & SA_RESTART)
-				fprintf(fp, "%s%sSA_RESTART", 
-					translate-- > 0 ? "(" : "",
-					others++ ? "|" : "");
-			if (flags & SA_NODEFER)
-				fprintf(fp, "%s%sSA_NODEFER", 
-					translate-- > 0 ? "(" : "",
-					others++ ? "|" : "");
-			if (flags & SA_RESETHAND)
-				fprintf(fp, "%s%sSA_RESETHAND", 
-					translate-- > 0 ? "(" : "",
-					others++ ? "|" : "");
-			if (translate < 1)
-                		fprintf(fp, ")");
-		}
-
-                fprintf(fp, "\n");
-        }
-
-	if (VALID_MEMBER(task_struct_sigqueue)) 
-		sigqueue = ULONG(tt->task_struct + 
-			OFFSET(task_struct_sigqueue));
-
-	else if (VALID_MEMBER(task_struct_pending)) 
-		sigqueue = ULONG(tt->task_struct +
-			OFFSET(task_struct_pending) +
-			OFFSET_OPTION(sigpending_head, sigpending_list));
-
-	if (VALID_MEMBER(sigqueue_list) && empty_list(sigqueue))
-		sigqueue = 0;
-
-	if (sigqueue)
-                fprintf(fp, "SIGQUEUE:  SIG  %s\n",
-                        mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "SIGINFO"));
-	else
-                fprintf(fp, "SIGQUEUE: (empty)\n");
+				if (sa_flags & SA_SIGINFO)
+					fprintf(fp, "%s%sSA_SIGINFO", 
+						translate-- > 0 ? "(" : "",
+						others++ ? "|" : "");
+				if (sa_flags & SA_ONSTACK)
+					fprintf(fp, "%s%sSA_ONSTACK", 
+						translate-- > 0 ? "(" : "",
+						others++ ? "|" : "");
+				if (sa_flags & SA_RESTART)
+					fprintf(fp, "%s%sSA_RESTART", 
+						translate-- > 0 ? "(" : "",
+						others++ ? "|" : "");
+				if (sa_flags & SA_NODEFER)
+					fprintf(fp, "%s%sSA_NODEFER", 
+						translate-- > 0 ? "(" : "",
+						others++ ? "|" : "");
+				if (sa_flags & SA_RESETHAND)
+					fprintf(fp, "%s%sSA_RESETHAND", 
+						translate-- > 0 ? "(" : "",
+						others++ ? "|" : "");
+				if (translate < 1)
+	                		fprintf(fp, ")");
+			}
+	
+	                fprintf(fp, "\n");
+	        }
+	}
+	
+	if (flags & TASK_LEVEL) {
+		/*
+	 	* Pending signals (task level).
+		*/
+		if (VALID_MEMBER(task_struct_sigpending))
+			sigpending = INT(tt->task_struct + 
+				OFFSET(task_struct_sigpending));
+		else if (VALID_MEMBER(thread_info_flags)) {
+			fill_thread_info(tc->thread_info);
+			ti_flags = UINT(tt->thread_info + OFFSET(thread_info_flags));
+			sigpending = ti_flags & (1<<TIF_SIGPENDING);
+		}
+		if (flags & TASK_INDENT)
+			INDENT(2);
+		fprintf(fp, "SIGPENDING: %s\n", sigpending ? "yes" : "no");
+
+		/*
+	 	*  Blocked signals (task level).
+	 	*/
+
+		blocked = task_blocked(tc->task);
+		if (flags & TASK_INDENT)
+			INDENT(2);
+		fprintf(fp, "   BLOCKED: %016llx\n", blocked);
+		
+		/*
+	 	*  Pending queue (task level).
+	 	*/
+	
+		if (flags & TASK_INDENT)
+			INDENT(2);
+		if (VALID_MEMBER(signal_struct_shared_pending)) {
+			fprintf(fp, "PRIVATE_PENDING\n");
+			if (flags & TASK_INDENT)
+				INDENT(2);
+		}
+		fprintf(fp, "    SIGNAL: %016llx\n", sigset);
+
+		if (VALID_MEMBER(task_struct_sigqueue)) 
+			sigqueue = ULONG(tt->task_struct + 
+				OFFSET(task_struct_sigqueue));
+	
+		else if (VALID_MEMBER(task_struct_pending)) 
+			sigqueue = ULONG(tt->task_struct +
+				OFFSET(task_struct_pending) +
+				OFFSET_OPTION(sigpending_head, 
+				sigpending_list));
+	
+		if (VALID_MEMBER(sigqueue_list) && empty_list(sigqueue))
+			sigqueue = 0;
+
+		if (flags & TASK_INDENT)
+			INDENT(2);
+		if (sigqueue) {
+                	fprintf(fp, "  SIGQUEUE:  SIG  %s\n",
+                        	mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "SIGINFO"));
+		 	sigqueue_list(sigqueue);
+		} else
+                	fprintf(fp, "  SIGQUEUE: (empty)\n");
+	}
+
+	/*
+	 *  Pending queue (thread group level).
+	 */
+	if ((flags & THREAD_GROUP_LEVEL) &&
+	    VALID_MEMBER(signal_struct_shared_pending)) {
+
+		fprintf(fp, "SHARED_PENDING\n");
+		shared_pending = signal_struct + OFFSET(signal_struct_shared_pending);
+		signal = shared_pending + OFFSET(sigpending_signal);
+		readmem(signal, KVADDR, signal_buf,SIZE(sigpending_signal),
+			"signal", FAULT_ON_ERROR);
+		sigset = task_signal(0, (ulong*)signal_buf);
+		if (flags & TASK_INDENT)
+			INDENT(2);
+		fprintf(fp, "    SIGNAL: %016llx\n", sigset);
+                sigqueue = (shared_pending + 
+			OFFSET_OPTION(sigpending_head, sigpending_list) + 
+			OFFSET(list_head_next));
+		readmem(sigqueue,KVADDR, signal_buf,
+			SIZE(sigqueue), "sigqueue", FAULT_ON_ERROR);
+		sigqueue = ULONG(signal_buf);
+
+		if (VALID_MEMBER(sigqueue_list) && empty_list(sigqueue))
+			sigqueue = 0;
+		if (flags & TASK_INDENT)
+			INDENT(2);
+		if (sigqueue) {
+               		fprintf(fp, "  SIGQUEUE:  SIG  %s\n",
+                       		mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "SIGINFO"));
+			 sigqueue_list(sigqueue);
+		} else
+               		fprintf(fp, "  SIGQUEUE: (empty)\n");
+	}
+	FREEBUF(signal_buf);
+}
+
+/*
+ *  Dump a pending signal queue (private/shared).
+ */
+
+static void sigqueue_list(ulong sigqueue) {
+        ulong sigqueue_save, next;
+	int sig;
+	char *signal_buf;
+	long size;
+        size = VALID_SIZE(signal_queue) ?  SIZE(signal_queue) : SIZE(sigqueue);
+        signal_buf = GETBUF(size);
 
+        sigqueue_save = sigqueue;
         while (sigqueue) {
         	readmem(sigqueue, KVADDR, signal_buf, 
 			SIZE_OPTION(signal_queue, sigqueue), 
@@ -5597,14 +7570,17 @@
 				OFFSET(siginfo_si_signo));
 		}
 
-                fprintf(fp, "           %3d  %lx\n",
+		if (sigqueue_save == next)
+			break;
+
+                fprintf(fp, "             %3d  %lx\n",
                         sig, sigqueue +
 			OFFSET_OPTION(signal_queue_info, sigqueue_info));
 
                 sigqueue = next;
         }
-
 	FREEBUF(signal_buf);
+
 }
 
 /*
@@ -5614,12 +7590,13 @@
  */
 
 static ulonglong 
-task_signal(ulong task)
+task_signal(ulong task, ulong *signal)
 {
 	ulonglong sigset;
 	ulong *sigset_ptr;
 
-        fill_task_struct(task);
+	if (task) {
+        	fill_task_struct(task);
 
 	if (!tt->last_task_read) 
 		return 0;
@@ -5633,6 +7610,10 @@
                         OFFSET(task_struct_signal));
         } else
 		return 0;
+	} else if (signal) {
+		sigset_ptr = signal;
+	} else
+		return 0;
 
 	switch (_NSIG_WORDS)
 	{
--- crash/xen_hyper_dump_tables.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/xen_hyper_dump_tables.c	2008-12-02 10:37:11.000000000 -0500
@@ -0,0 +1,950 @@
+/*
+ *  xen_hyper_dump_tables.c
+ *
+ *  Portions Copyright (C) 2006-2007 Fujitsu Limited
+ *  Portions Copyright (C) 2006-2007 VA Linux Systems Japan K.K.
+ *
+ *  Authors: Itsuro Oda <oda@valinux.co.jp>
+ *           Fumihiko Kakuma <kakuma@valinux.co.jp>
+ *
+ *  This file is part of Xencrash.
+ *
+ *  Xencrash is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation (version 2 of the License).
+ *
+ *  Xencrash is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with Xencrash; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA.
+ */
+
+#include "defs.h"
+
+#ifdef XEN_HYPERVISOR_ARCH
+#include "xen_hyper_defs.h"
+
+static void xen_hyper_dump_xen_hyper_table(int verbose);
+static void xen_hyper_dump_xen_hyper_dumpinfo_table(int verbose);
+static void xen_hyper_dump_xen_hyper_domain_table(int verbose);
+static void xen_hyper_dump_xen_hyper_vcpu_table(int verbose);
+static void xen_hyper_dump_xen_hyper_pcpu_table(int verbose);
+static void xen_hyper_dump_xen_hyper_sched_table(int verbose);
+static void xen_hyper_dump_xen_hyper_size_table(char *spec, ulong makestruct);
+static void xen_hyper_dump_xen_hyper_offset_table(char *spec, ulong makestruct);
+
+static void xen_hyper_dump_mem(void *mem, ulong len, int dsz);
+
+/*
+ *  Get help for a command, to dump an internal table, or the GNU public
+ *  license copying/warranty information.
+ */
+void
+xen_hyper_cmd_help(void)
+{
+	int c;
+	int oflag;
+
+	oflag = 0;
+
+        while ((c = getopt(argcnt, args, 
+	        "aBbcDgHhM:mnOopszX:")) != EOF) {
+                switch(c)
+                {
+		case 'a':
+			dump_alias_data();
+			return;
+		case 'b':
+			dump_shared_bufs();
+			return;
+		case 'B':
+			dump_build_data();
+			return;
+		case 'c':
+			dump_numargs_cache();
+			return;
+		case 'n':
+		case 'D':
+			dumpfile_memory(DUMPFILE_MEM_DUMP);
+			return;
+		case 'g':
+			dump_gdb_data();
+			return;
+		case 'H':
+			dump_hash_table(VERBOSE);
+			return;
+		case 'h':
+			dump_hash_table(!VERBOSE);
+ 			return;
+		case 'M':
+			dump_machdep_table(stol(optarg, FAULT_ON_ERROR, NULL));
+			return;
+		case 'm':
+			dump_machdep_table(0);
+			return;
+		case 'O':
+			dump_offset_table(NULL, TRUE);
+			return;
+		case 'o':
+			oflag = TRUE;
+			break;
+		case 'p':
+			dump_program_context();
+			return;
+		case 's':
+			dump_symbol_table();
+			return;
+		case 'X':
+			if (strlen(optarg) != 3) {
+				argerrs++;
+				break;
+			}
+			if (!strncmp("Xen", optarg, strlen(optarg)))
+				xen_hyper_dump_xen_hyper_table(VERBOSE);
+			else if (!strncmp("xen", optarg, strlen(optarg)))
+				xen_hyper_dump_xen_hyper_table(!VERBOSE);
+			else if (!strncmp("Dmp", optarg, strlen(optarg)))
+				xen_hyper_dump_xen_hyper_dumpinfo_table(VERBOSE);
+			else if (!strncmp("dmp", optarg, strlen(optarg)))
+				xen_hyper_dump_xen_hyper_dumpinfo_table(!VERBOSE);
+			else if (!strncmp("Dom", optarg, strlen(optarg)))
+				xen_hyper_dump_xen_hyper_domain_table(VERBOSE);
+			else if (!strncmp("dom", optarg, strlen(optarg)))
+				xen_hyper_dump_xen_hyper_domain_table(!VERBOSE);
+			else if (!strncmp("Vcp", optarg, strlen(optarg)))
+				xen_hyper_dump_xen_hyper_vcpu_table(VERBOSE);
+			else if (!strncmp("vcp", optarg, strlen(optarg)))
+				xen_hyper_dump_xen_hyper_vcpu_table(!VERBOSE);
+			else if (!strncmp("Pcp", optarg, strlen(optarg)))
+				xen_hyper_dump_xen_hyper_pcpu_table(VERBOSE);
+			else if (!strncmp("pcp", optarg, strlen(optarg)))
+				xen_hyper_dump_xen_hyper_pcpu_table(!VERBOSE);
+			else if (!strncmp("Sch", optarg, strlen(optarg)))
+				xen_hyper_dump_xen_hyper_sched_table(VERBOSE);
+			else if (!strncmp("sch", optarg, strlen(optarg)))
+				xen_hyper_dump_xen_hyper_sched_table(!VERBOSE);
+			else if (!strncmp("siz", optarg, strlen(optarg)))
+				xen_hyper_dump_xen_hyper_size_table(NULL, TRUE);
+			else if (!strncmp("ofs", optarg, strlen(optarg)))
+				xen_hyper_dump_xen_hyper_offset_table(NULL, TRUE);
+			else {
+				argerrs++;
+				break;
+			}
+ 			return;
+		case 'z':
+			fprintf(fp, "help options:\n");
+			fprintf(fp, " -a - alias data\n");
+			fprintf(fp, " -b - shared buffer data\n");
+			fprintf(fp, " -B - build data\n");
+			fprintf(fp, " -c - numargs cache\n");
+			fprintf(fp, " -M <num> machine specific\n");
+			fprintf(fp, " -m - machdep_table\n");
+			fprintf(fp, " -s - symbol table data\n");
+			fprintf(fp, " -o - offset_table and size_table\n");
+			fprintf(fp, " -p - program_context\n");
+			fprintf(fp, " -h - hash_table data\n");
+			fprintf(fp, " -H - hash_table data (verbose)\n");
+			fprintf(fp, " -X Xen - xen table data (verbose)\n");
+			fprintf(fp, " -X xen - xen table data\n");
+			fprintf(fp, " -X Dmp - dumpinfo table data (verbose)\n");
+			fprintf(fp, " -X dmp - dumpinfo table data\n");
+			fprintf(fp, " -X Dom - domain table data (verbose)\n");
+			fprintf(fp, " -X dom - domain table data\n");
+			fprintf(fp, " -X Vcp - vcpu table data (verbose)\n");
+			fprintf(fp, " -X vcp - vcpu table data\n");
+			fprintf(fp, " -X Pcp - pcpu table data (verbose)\n");
+			fprintf(fp, " -X pcp - pcpu table data\n");
+			fprintf(fp, " -X Sch - schedule table data (verbose)\n");
+			fprintf(fp, " -X sch - schedule table data\n");
+			fprintf(fp, " -X siz - size table data\n");
+			fprintf(fp, " -X ofs - offset table data\n");
+			return;
+                default:  
+			argerrs++;
+                        break;
+                }
+        }
+
+        if (argerrs)
+                cmd_usage(pc->curcmd, COMPLETE_HELP);
+
+	if (!args[optind]) {
+		if (oflag) 
+			dump_offset_table(NULL, FALSE);
+		else 
+			display_help_screen("");
+		return;
+	}
+
+        do {
+		if (oflag) 
+			dump_offset_table(args[optind], FALSE);
+		else	
+        		cmd_usage(args[optind], COMPLETE_HELP);
+		optind++;
+        } while (args[optind]);
+}
+
+/*
+ * "help -x xen" output
+ */
+static void
+xen_hyper_dump_xen_hyper_table(int verbose)
+{
+	char buf[XEN_HYPER_CMD_BUFSIZE];
+	uint cpuid;
+	int len, flag, i;
+
+	len = 14;
+	flag = XEN_HYPER_PRI_R;
+
+	XEN_HYPER_PRI(fp, len, "cpu_data_address: ", buf, flag,
+		(buf, "%lu\n", xht->cpu_data_address));
+	XEN_HYPER_PRI(fp, len, "cpu_curr: ", buf, flag,
+		(buf, "%u\n", xht->cpu_curr));
+	XEN_HYPER_PRI(fp, len, "max_cpus: ", buf, flag,
+		(buf, "%u\n", xht->max_cpus));
+	XEN_HYPER_PRI(fp, len, "cores: ", buf, flag,
+		(buf, "%d\n", xht->cores));
+	XEN_HYPER_PRI(fp, len, "pcpus: ", buf, flag,
+		(buf, "%d\n", xht->pcpus));
+	XEN_HYPER_PRI(fp, len, "vcpus: ", buf, flag,
+		(buf, "%d\n", xht->vcpus));
+	XEN_HYPER_PRI(fp, len, "domains: ", buf, flag,
+		(buf, "%d\n", xht->domains));
+	XEN_HYPER_PRI(fp, len, "sys_pages: ", buf, flag,
+		(buf, "%lu\n", xht->sys_pages));
+	XEN_HYPER_PRI(fp, len, "crashing_cpu: ", buf, flag,
+		(buf, "%d\n", xht->crashing_cpu));
+	XEN_HYPER_PRI(fp, len, "crashing_vcc: ", buf, flag,
+		(buf, "%p\n", xht->crashing_vcc));
+	XEN_HYPER_PRI(fp, len, "max_page: ", buf, flag,
+		(buf, "%lu\n", xht->max_page));
+	XEN_HYPER_PRI(fp, len, "total_pages: ", buf, flag,
+		(buf, "%lu\n", xht->total_pages));
+	XEN_HYPER_PRI(fp, len, "cpumask: ", buf, flag,
+		(buf, "%p\n", xht->cpumask));
+	if (verbose && xht->cpumask) {
+		xen_hyper_dump_mem(xht->cpumask,
+				XEN_HYPER_SIZE(cpumask_t), sizeof(long));
+	}
+	XEN_HYPER_PRI(fp, len, "cpu_idxs: ", buf, flag,
+		(buf, "%p\n", xht->cpu_idxs));
+	if (verbose) {
+		for_cpu_indexes(i, cpuid)
+			fprintf(fp, "%03d : %d\n", i, cpuid);
+	}
+}
+
+/*
+ * "help -x dmp" output
+ */
+static void
+xen_hyper_dump_xen_hyper_dumpinfo_table(int verbose)
+{
+	char buf[XEN_HYPER_CMD_BUFSIZE];
+	int len, flag;
+
+	len = 25;
+	flag = XEN_HYPER_PRI_R;
+
+	XEN_HYPER_PRI(fp, len, "note_ver: ", buf, flag,
+		(buf, "%u\n", xhdit->note_ver));
+	XEN_HYPER_PRI(fp, len, "context_array: ", buf, flag,
+		(buf, "%p\n", xhdit->context_array));
+	if (verbose && xhdit->context_array) {
+		xen_hyper_dump_mem((long *)xhdit->context_array,
+				sizeof(struct xen_hyper_dumpinfo_context) *
+				XEN_HYPER_MAX_CPUS(), sizeof(long));
+	}
+	XEN_HYPER_PRI(fp, len, "context_xen_core_array: ", buf, flag,
+		(buf, "%p\n", xhdit->context_xen_core_array));
+	if (verbose && xhdit->context_xen_core_array) {
+		xen_hyper_dump_mem((long *)xhdit->context_xen_core_array,
+				sizeof(struct xen_hyper_dumpinfo_context_xen_core) *
+				XEN_HYPER_MAX_CPUS(), sizeof(long));
+	}
+	XEN_HYPER_PRI_CONST(fp, len, "context_xen_info: ", flag|XEN_HYPER_PRI_LF);
+	XEN_HYPER_PRI(fp, len, "note: ", buf, flag,
+		(buf, "%lx\n", xhdit->context_xen_info.note));
+	XEN_HYPER_PRI(fp, len, "pcpu_id: ", buf, flag,
+		(buf, "%u\n", xhdit->context_xen_info.pcpu_id));
+	XEN_HYPER_PRI(fp, len, "crash_xen_info_ptr: ", buf, flag,
+		(buf, "%p\n", xhdit->context_xen_info.crash_xen_info_ptr));
+	XEN_HYPER_PRI(fp, len, "crash_note_core_array: ", buf, flag,
+		(buf, "%p\n", xhdit->crash_note_core_array));
+	if (verbose && xhdit->crash_note_core_array) {
+		xen_hyper_dump_mem((long *)xhdit->crash_note_core_array,
+				xhdit->core_size * XEN_HYPER_NR_PCPUS(),
+				sizeof(long));
+	}
+	XEN_HYPER_PRI(fp, len, "crash_note_xen_core_array: ", buf, flag,
+		(buf, "%p\n", xhdit->crash_note_xen_core_array));
+	if (verbose && xhdit->crash_note_xen_core_array) {
+		xen_hyper_dump_mem(
+				xhdit->crash_note_xen_core_array,
+				xhdit->xen_core_size * XEN_HYPER_NR_PCPUS(),
+				sizeof(long));
+	}
+	XEN_HYPER_PRI(fp, len, "crash_note_xen_info_ptr: ", buf, flag,
+		(buf, "%p\n", xhdit->crash_note_xen_info_ptr));
+	if (verbose && xhdit->crash_note_xen_info_ptr) {
+		xen_hyper_dump_mem(
+				xhdit->crash_note_xen_info_ptr,
+				xhdit->xen_info_size, sizeof(long));
+	}
+	XEN_HYPER_PRI(fp, len, "xen_info_cpu: ", buf, flag,
+		(buf, "%u\n", xhdit->xen_info_cpu));
+	XEN_HYPER_PRI(fp, len, "note_size: ", buf, flag,
+		(buf, "%u\n", xhdit->note_size));
+	XEN_HYPER_PRI(fp, len, "core_offset: ", buf, flag,
+		(buf, "%u\n", xhdit->core_offset));
+	XEN_HYPER_PRI(fp, len, "core_size: ", buf, flag,
+		(buf, "%u\n", xhdit->core_size));
+	XEN_HYPER_PRI(fp, len, "xen_core_offset: ", buf, flag,
+		(buf, "%u\n", xhdit->xen_core_offset));
+	XEN_HYPER_PRI(fp, len, "xen_core_size: ", buf, flag,
+		(buf, "%u\n", xhdit->xen_core_size));
+	XEN_HYPER_PRI(fp, len, "xen_info_offset: ", buf, flag,
+		(buf, "%u\n", xhdit->xen_info_offset));
+	XEN_HYPER_PRI(fp, len, "xen_info_size: ", buf, flag,
+		(buf, "%u\n", xhdit->xen_info_size));
+}
+
+/*
+ * "help -x dom" output
+ */
+static void
+xen_hyper_dump_xen_hyper_domain_table(int verbose)
+{
+	char buf[XEN_HYPER_CMD_BUFSIZE];
+	struct xen_hyper_domain_context *dcca;
+	int len, flag, i;
+
+	len = 22;
+	flag = XEN_HYPER_PRI_R;
+
+	XEN_HYPER_PRI(fp, len, "context_array: ", buf, flag,
+		(buf, "%p\n", xhdt->context_array));
+	if (verbose) {
+		char buf1[XEN_HYPER_CMD_BUFSIZE];
+		int j;
+		for (i = 0, dcca = xhdt->context_array;
+		i < xhdt->context_array_cnt; i++, dcca++) {
+			snprintf(buf, XEN_HYPER_CMD_BUFSIZE, "context_array[%d]: ", i);
+			XEN_HYPER_PRI_CONST(fp, len, buf, flag|XEN_HYPER_PRI_LF);
+			XEN_HYPER_PRI(fp, len, "domain: ", buf, flag,
+				(buf, "%lx\n", dcca->domain));
+			XEN_HYPER_PRI(fp, len, "domain_id: ", buf, flag,
+				(buf, "%d\n", dcca->domain_id));
+			XEN_HYPER_PRI(fp, len, "tot_pages: ", buf, flag,
+				(buf, "%x\n", dcca->tot_pages));
+			XEN_HYPER_PRI(fp, len, "max_pages: ", buf, flag,
+				(buf, "%x\n", dcca->max_pages));
+			XEN_HYPER_PRI(fp, len, "xenheap_pages: ", buf, flag,
+				(buf, "%x\n", dcca->xenheap_pages));
+			XEN_HYPER_PRI(fp, len, "shared_info: ", buf, flag,
+				(buf, "%lx\n", dcca->shared_info));
+			XEN_HYPER_PRI(fp, len, "sched_priv: ", buf, flag,
+				(buf, "%lx\n", dcca->sched_priv));
+			XEN_HYPER_PRI(fp, len, "next_in_list: ", buf, flag,
+				(buf, "%lx\n", dcca->next_in_list));
+			XEN_HYPER_PRI(fp, len, "domain_flags: ", buf, flag,
+				(buf, "%lx\n", dcca->domain_flags));
+			XEN_HYPER_PRI(fp, len, "evtchn: ", buf, flag,
+				(buf, "%lx\n", dcca->evtchn));
+			XEN_HYPER_PRI(fp, len, "vcpu_cnt: ", buf, flag,
+				(buf, "%d\n", dcca->vcpu_cnt));
+			for (j = 0; j < XEN_HYPER_MAX_VIRT_CPUS; j++) {
+				snprintf(buf1, XEN_HYPER_CMD_BUFSIZE, "vcpu[%d]: ", j);
+				XEN_HYPER_PRI(fp, len, buf1, buf, flag,
+					(buf, "%lx\n", dcca->vcpu[j]));
+			}
+			XEN_HYPER_PRI(fp, len, "vcpu_context_array: ", buf, flag,
+				(buf, "%p\n", dcca->vcpu_context_array));
+		}
+	}
+	XEN_HYPER_PRI(fp, len, "context_array_cnt: ", buf, flag,
+		(buf, "%d\n", xhdt->context_array_cnt));
+	XEN_HYPER_PRI(fp, len, "running_domains: ", buf, flag,
+		(buf, "%lu\n", xhdt->running_domains));
+	XEN_HYPER_PRI(fp, len, "dom_io: ", buf, flag,
+		(buf, "%p\n", xhdt->dom_io));
+	XEN_HYPER_PRI(fp, len, "dom_xen: ", buf, flag,
+		(buf, "%p\n", xhdt->dom_xen));
+	XEN_HYPER_PRI(fp, len, "dom0: ", buf, flag,
+		(buf, "%p\n", xhdt->dom0));
+	XEN_HYPER_PRI(fp, len, "idle_domain: ", buf, flag,
+		(buf, "%p\n", xhdt->idle_domain));
+	XEN_HYPER_PRI(fp, len, "curr_domain: ", buf, flag,
+		(buf, "%p\n", xhdt->curr_domain));
+	XEN_HYPER_PRI(fp, len, "last: ", buf, flag,
+		(buf, "%p\n", xhdt->last));
+	XEN_HYPER_PRI(fp, len, "domain_struct: ", buf, flag,
+		(buf, "%p\n", xhdt->domain_struct));
+	XEN_HYPER_PRI(fp, len, "domain_struct_verify: ", buf, flag,
+		(buf, "%p\n", xhdt->domain_struct_verify));
+}
+
+/*
+ * "help -x vcp" output
+ */
+static void
+xen_hyper_dump_xen_hyper_vcpu_table(int verbose)
+{
+	char buf[XEN_HYPER_CMD_BUFSIZE];
+	int len, flag;
+
+	len = 25;
+	flag = XEN_HYPER_PRI_R;
+
+	XEN_HYPER_PRI(fp, len, "vcpu_context_arrays: ", buf, flag,
+		(buf, "%p\n", xhvct->vcpu_context_arrays));
+	XEN_HYPER_PRI(fp, len, "vcpu_context_arrays_cnt: ", buf, flag,
+		(buf, "%d\n", xhvct->vcpu_context_arrays_cnt));
+	if (verbose) {
+		struct xen_hyper_vcpu_context_array *vcca;
+		struct xen_hyper_vcpu_context *vca;
+		int i, j;
+
+		for (i = 0, vcca = xhvct->vcpu_context_arrays;
+		i < xhvct->vcpu_context_arrays_cnt; i++, vcca++) {
+			snprintf(buf, XEN_HYPER_CMD_BUFSIZE, "vcpu_context_arrays[%d]: ", i);
+			XEN_HYPER_PRI_CONST(fp, len, buf, flag|XEN_HYPER_PRI_LF);
+			if (vcca->context_array) {
+				XEN_HYPER_PRI(fp, len, "context_array: ", buf, flag,
+					(buf, "%p\n", vcca->context_array));
+			} else {
+				XEN_HYPER_PRI(fp, len, "context_array: ", buf, flag,
+					(buf, "NULL\n"));
+			}
+			XEN_HYPER_PRI(fp, len, "context_array_cnt: ", buf, flag,
+				(buf, "%d\n", vcca->context_array_cnt));
+			XEN_HYPER_PRI(fp, len, "context_array_valid: ", buf, flag,
+				(buf, "%d\n", vcca->context_array_valid));
+			for (j = 0, vca = vcca->context_array;
+			j < vcca->context_array_cnt; j++, vca++) {
+				snprintf(buf, XEN_HYPER_CMD_BUFSIZE, "context_array[%d]: ", j);
+				XEN_HYPER_PRI_CONST(fp, len, buf, flag|XEN_HYPER_PRI_LF);
+				XEN_HYPER_PRI(fp, len, "vcpu: ", buf, flag,
+					(buf, "%lx\n", vca->vcpu));
+				XEN_HYPER_PRI(fp, len, "vcpu_id: ", buf, flag,
+					(buf, "%d\n", vca->vcpu_id));
+				XEN_HYPER_PRI(fp, len, "processor: ", buf, flag,
+					(buf, "%d\n", vca->processor));
+				XEN_HYPER_PRI(fp, len, "vcpu_info: ", buf, flag,
+					(buf, "%lx\n", vca->vcpu_info));
+				XEN_HYPER_PRI(fp, len, "domain: ", buf, flag,
+					(buf, "%lx\n", vca->domain));
+				XEN_HYPER_PRI(fp, len, "next_in_list: ", buf, flag,
+					(buf, "%lx\n", vca->next_in_list));
+				XEN_HYPER_PRI(fp, len, "sleep_tick: ", buf, flag,
+					(buf, "%lx\n", vca->sleep_tick));
+				XEN_HYPER_PRI(fp, len, "sched_priv: ", buf, flag,
+					(buf, "%lx\n", vca->sched_priv));
+				XEN_HYPER_PRI(fp, len, "state: ", buf, flag,
+					(buf, "%d\n", vca->state));
+				XEN_HYPER_PRI(fp, len, "state_entry_time: ", buf, flag,
+					(buf, "%llux\n", (unsigned long long)(vca->state_entry_time)));
+				XEN_HYPER_PRI(fp, len, "runstate_guest: ", buf, flag,
+					(buf, "%lx\n", vca->runstate_guest));
+				XEN_HYPER_PRI(fp, len, "vcpu_flags: ", buf, flag,
+					(buf, "%lx\n", vca->vcpu_flags));
+			}
+		}
+	}
+	XEN_HYPER_PRI(fp, len, "idle_vcpu: ", buf, flag,
+		(buf, "%lx\n", xhvct->idle_vcpu));
+	XEN_HYPER_PRI(fp, len, "idle_vcpu_context_array: ", buf, flag,
+		(buf, "%p\n", xhvct->idle_vcpu_context_array));
+	XEN_HYPER_PRI(fp, len, "last: ", buf, flag,
+		(buf, "%p\n", xhvct->last));
+	XEN_HYPER_PRI(fp, len, "vcpu_struct: ", buf, flag,
+		(buf, "%p\n", xhvct->vcpu_struct));
+	XEN_HYPER_PRI(fp, len, "vcpu_struct_verify: ", buf, flag,
+		(buf, "%p\n", xhvct->vcpu_struct_verify));
+}
+
+/*
+ * "help -x pcp" output
+ */
+static void
+xen_hyper_dump_xen_hyper_pcpu_table(int verbose)
+{
+	char buf[XEN_HYPER_CMD_BUFSIZE];
+	struct xen_hyper_pcpu_context *pcca;
+	int len, flag, i;
+#ifdef X86_64
+	uint64_t *ist_p;
+	int j;
+#endif
+
+	len = 21;
+	flag = XEN_HYPER_PRI_R;
+
+	XEN_HYPER_PRI(fp, len, "context_array: ", buf, flag,
+		(buf, "%p\n", xhpct->context_array));
+	if (verbose) {
+		for (i = 0, pcca = xhpct->context_array;
+		i < XEN_HYPER_MAX_CPUS(); i++, pcca++) {
+			snprintf(buf, XEN_HYPER_CMD_BUFSIZE, "context_array %d: ", i);
+			XEN_HYPER_PRI_CONST(fp, len, buf, flag|XEN_HYPER_PRI_LF);
+			XEN_HYPER_PRI(fp, len, "pcpu: ", buf, flag,
+				(buf, "%lx\n", pcca->pcpu));
+			XEN_HYPER_PRI(fp, len, "processor_id: ", buf, flag,
+				(buf, "%u\n", pcca->processor_id));
+			XEN_HYPER_PRI(fp, len, "guest_cpu_user_regs: ", buf, flag,
+				(buf, "%lx\n", pcca->guest_cpu_user_regs));
+			XEN_HYPER_PRI(fp, len, "current_vcpu: ", buf, flag,
+				(buf, "%lx\n", pcca->current_vcpu));
+			XEN_HYPER_PRI(fp, len, "init_tss: ", buf, flag,
+				(buf, "%lx\n", pcca->init_tss));
+#ifdef X86
+			XEN_HYPER_PRI(fp, len, "sp.esp0: ", buf, flag,
+				(buf, "%x\n", pcca->sp.esp0));
+#endif
+#ifdef X86_64
+			XEN_HYPER_PRI(fp, len, "sp.rsp0: ", buf, flag,
+				(buf, "%lx\n", pcca->sp.rsp0));
+			for (j = 0, ist_p = pcca->ist;
+			j < XEN_HYPER_TSS_IST_MAX; j++, ist_p++) {
+				XEN_HYPER_PRI(fp, len, "ist: ", buf, flag,
+					(buf, "%lx\n", *ist_p));
+			}
+#endif
+		}
+	}
+	XEN_HYPER_PRI(fp, len, "last: ", buf, flag,
+		(buf, "%p\n", xhpct->last));
+	XEN_HYPER_PRI(fp, len, "pcpu_struct: ", buf, flag,
+		(buf, "%p\n", xhpct->pcpu_struct));
+}
+
+/*
+ * "help -x sch" output
+ */
+static void
+xen_hyper_dump_xen_hyper_sched_table(int verbose)
+{
+	struct xen_hyper_sched_context *schc;
+	char buf[XEN_HYPER_CMD_BUFSIZE];
+	int len, flag, i;
+
+	len = 21;
+	flag = XEN_HYPER_PRI_R;
+
+	XEN_HYPER_PRI(fp, len, "name: ", buf, flag,
+		(buf, "%s\n", xhscht->name));
+	XEN_HYPER_PRI(fp, len, "opt_sched: ", buf, flag,
+		(buf, "%s\n", xhscht->opt_sched));
+	XEN_HYPER_PRI(fp, len, "sched_id: ", buf, flag,
+		(buf, "%d\n", xhscht->sched_id));
+	XEN_HYPER_PRI(fp, len, "scheduler: ", buf, flag,
+		(buf, "%lx\n", xhscht->scheduler));
+	XEN_HYPER_PRI(fp, len, "scheduler_struct: ", buf, flag,
+		(buf, "%p\n", xhscht->scheduler_struct));
+	XEN_HYPER_PRI(fp, len, "sched_context_array: ", buf, flag,
+		(buf, "%p\n", xhscht->sched_context_array));
+	if (verbose) {
+		for (i = 0, schc = xhscht->sched_context_array;
+		i < xht->pcpus; i++, schc++) {
+			XEN_HYPER_PRI(fp, len, "sched_context_array[", buf,
+				flag, (buf, "%d]\n", i));
+			XEN_HYPER_PRI(fp, len, "schedule_data: ", buf, flag,
+				(buf, "%lx\n", schc->schedule_data));
+			XEN_HYPER_PRI(fp, len, "curr: ", buf, flag,
+				(buf, "%lx\n", schc->curr));
+			XEN_HYPER_PRI(fp, len, "idle: ", buf, flag,
+				(buf, "%lx\n", schc->idle));
+			XEN_HYPER_PRI(fp, len, "sched_priv: ", buf, flag,
+				(buf, "%lx\n", schc->sched_priv));
+			XEN_HYPER_PRI(fp, len, "tick: ", buf, flag,
+				(buf, "%lx\n", schc->tick));
+		}
+	}
+}
+
+/*
+ * "help -x siz" output
+ */
+static void
+xen_hyper_dump_xen_hyper_size_table(char *spec, ulong makestruct)
+{
+	char buf[XEN_HYPER_CMD_BUFSIZE];
+	int len, flag;
+
+	len = 23;
+	flag = XEN_HYPER_PRI_R;
+
+	XEN_HYPER_PRI(fp, len, "ELF_Prstatus: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_size_table.ELF_Prstatus));
+	XEN_HYPER_PRI(fp, len, "ELF_Signifo: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_size_table.ELF_Signifo));
+	XEN_HYPER_PRI(fp, len, "ELF_Gregset: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_size_table.ELF_Gregset));
+	XEN_HYPER_PRI(fp, len, "ELF_Timeval: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_size_table.ELF_Timeval));
+	XEN_HYPER_PRI(fp, len, "arch_domain: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_size_table.arch_domain));
+	XEN_HYPER_PRI(fp, len, "arch_shared_info: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_size_table.arch_shared_info));
+	XEN_HYPER_PRI(fp, len, "cpu_info: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_size_table.cpu_info));
+	XEN_HYPER_PRI(fp, len, "cpu_time: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_size_table.cpu_time));
+	XEN_HYPER_PRI(fp, len, "cpu_user_regs: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_size_table.cpu_user_regs));
+	XEN_HYPER_PRI(fp, len, "cpumask_t: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_size_table.cpumask_t));
+	XEN_HYPER_PRI(fp, len, "cpuinfo_ia64: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_size_table.cpuinfo_ia64));
+	XEN_HYPER_PRI(fp, len, "cpuinfo_x86: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_size_table.cpuinfo_x86));
+	XEN_HYPER_PRI(fp, len, "crash_note_t: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_size_table.crash_note_t));
+	XEN_HYPER_PRI(fp, len, "crash_note_core_t: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_size_table.crash_note_core_t));
+	XEN_HYPER_PRI(fp, len, "crash_note_xen_t: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_size_table.crash_note_xen_t));
+	XEN_HYPER_PRI(fp, len, "crash_note_xen_core_t: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_size_table.crash_note_xen_core_t));
+	XEN_HYPER_PRI(fp, len, "crash_note_xen_info_t: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_size_table.crash_note_xen_info_t));
+	XEN_HYPER_PRI(fp, len, "crash_xen_core_t: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_size_table.crash_xen_core_t));
+	XEN_HYPER_PRI(fp, len, "crash_xen_info_t: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_size_table.crash_xen_info_t));
+	XEN_HYPER_PRI(fp, len, "domain: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_size_table.domain));
+#ifdef IA64
+	XEN_HYPER_PRI(fp, len, "mm_struct: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_size_table.mm_struct));
+#endif
+	XEN_HYPER_PRI(fp, len, "note_buf_t: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_size_table.note_buf_t));
+	XEN_HYPER_PRI(fp, len, "schedule_data: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_size_table.schedule_data));
+	XEN_HYPER_PRI(fp, len, "scheduler: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_size_table.scheduler));
+	XEN_HYPER_PRI(fp, len, "shared_info: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_size_table.shared_info));
+	XEN_HYPER_PRI(fp, len, "timer: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_size_table.timer));
+	XEN_HYPER_PRI(fp, len, "tss_struct: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_size_table.tss_struct));
+	XEN_HYPER_PRI(fp, len, "vcpu: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_size_table.vcpu));
+	XEN_HYPER_PRI(fp, len, "vcpu_runstate_info: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_size_table.vcpu_runstate_info));
+	XEN_HYPER_PRI(fp, len, "xen_crash_xen_regs_t: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_size_table.xen_crash_xen_regs_t));
+}
+
+/*
+ * "help -x ofs" output
+ */
+static void
+xen_hyper_dump_xen_hyper_offset_table(char *spec, ulong makestruct)
+{
+	char buf[XEN_HYPER_CMD_BUFSIZE];
+	int len, flag;
+
+	len = 45;
+	flag = XEN_HYPER_PRI_R;
+
+	XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_info: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_info));
+	XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_cursig: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_cursig));
+	XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_sigpend: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_sigpend));
+	XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_sighold: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_sighold));
+	XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_pid: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_pid));
+	XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_ppid: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_ppid));
+	XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_pgrp: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_pgrp));
+	XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_sid: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_sid));
+	XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_stime: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_stime));
+	XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_cutime: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_cutime));
+	XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_cstime: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_cstime));
+	XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_reg: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_reg));
+	XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_fpvalid: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_fpvalid));
+	XEN_HYPER_PRI(fp, len, "ELF_Timeval_tv_sec: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.ELF_Timeval_tv_sec));
+	XEN_HYPER_PRI(fp, len, "ELF_Timeval_tv_usec: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.ELF_Timeval_tv_usec));
+
+#ifdef IA64
+	XEN_HYPER_PRI(fp, len, "arch_domain_mm: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.arch_domain_mm));
+#endif
+
+	XEN_HYPER_PRI(fp, len, "arch_shared_info_max_pfn: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.arch_shared_info_max_pfn));
+	XEN_HYPER_PRI(fp, len, "arch_shared_info_pfn_to_mfn_frame_list_list: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.arch_shared_info_pfn_to_mfn_frame_list_list));
+	XEN_HYPER_PRI(fp, len, "arch_shared_info_nmi_reason: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.arch_shared_info_nmi_reason));
+
+	XEN_HYPER_PRI(fp, len, "cpu_info_guest_cpu_user_regs: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.cpu_info_guest_cpu_user_regs));
+	XEN_HYPER_PRI(fp, len, "cpu_info_processor_id: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.cpu_info_processor_id));
+	XEN_HYPER_PRI(fp, len, "cpu_info_current_vcpu: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.cpu_info_current_vcpu));
+
+	XEN_HYPER_PRI(fp, len, "cpu_time_local_tsc_stamp: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.cpu_time_local_tsc_stamp));
+	XEN_HYPER_PRI(fp, len, "cpu_time_stime_local_stamp: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.cpu_time_stime_local_stamp));
+	XEN_HYPER_PRI(fp, len, "cpu_time_stime_master_stamp: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.cpu_time_stime_master_stamp));
+	XEN_HYPER_PRI(fp, len, "cpu_time_tsc_scale: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.cpu_time_tsc_scale));
+	XEN_HYPER_PRI(fp, len, "cpu_time_calibration_timer: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.cpu_time_calibration_timer));
+
+	XEN_HYPER_PRI(fp, len, "crash_note_t_core: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.crash_note_t_core));
+	XEN_HYPER_PRI(fp, len, "crash_note_t_xen: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.crash_note_t_xen));
+	XEN_HYPER_PRI(fp, len, "crash_note_t_xen_regs: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.crash_note_t_xen_regs));
+	XEN_HYPER_PRI(fp, len, "crash_note_t_xen_info: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.crash_note_t_xen_info));
+
+	XEN_HYPER_PRI(fp, len, "crash_note_core_t_note: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.crash_note_core_t_note));
+	XEN_HYPER_PRI(fp, len, "crash_note_core_t_desc: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.crash_note_core_t_desc));
+
+	XEN_HYPER_PRI(fp, len, "crash_note_xen_t_note: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.crash_note_xen_t_note));
+	XEN_HYPER_PRI(fp, len, "crash_note_xen_t_desc: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.crash_note_xen_t_desc));
+
+	XEN_HYPER_PRI(fp, len, "crash_note_xen_core_t_note: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.crash_note_xen_core_t_note));
+	XEN_HYPER_PRI(fp, len, "crash_note_xen_core_t_desc: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.crash_note_xen_core_t_desc));
+
+	XEN_HYPER_PRI(fp, len, "crash_note_xen_info_t_note: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.crash_note_xen_info_t_note));
+	XEN_HYPER_PRI(fp, len, "crash_note_xen_info_t_desc: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.crash_note_xen_info_t_desc));
+
+	XEN_HYPER_PRI(fp, len, "domain_page_list: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.domain_page_list));
+	XEN_HYPER_PRI(fp, len, "domain_xenpage_list: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.domain_xenpage_list));
+	XEN_HYPER_PRI(fp, len, "domain_domain_id: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.domain_domain_id));
+	XEN_HYPER_PRI(fp, len, "domain_tot_pages: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.domain_tot_pages));
+	XEN_HYPER_PRI(fp, len, "domain_max_pages: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.domain_max_pages));
+	XEN_HYPER_PRI(fp, len, "domain_xenheap_pages: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.domain_xenheap_pages));
+	XEN_HYPER_PRI(fp, len, "domain_shared_info: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.domain_shared_info));
+	XEN_HYPER_PRI(fp, len, "domain_sched_priv: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.domain_sched_priv));
+	XEN_HYPER_PRI(fp, len, "domain_next_in_list: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.domain_next_in_list));
+	XEN_HYPER_PRI(fp, len, "domain_domain_flags: ", buf, flag,
+		(buf, "%lx\n", xen_hyper_offset_table.domain_domain_flags));
+	XEN_HYPER_PRI(fp, len, "domain_evtchn: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.domain_evtchn));
+	XEN_HYPER_PRI(fp, len, "domain_is_hvm: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.domain_is_hvm));
+	XEN_HYPER_PRI(fp, len, "domain_is_privileged: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.domain_is_privileged));
+	XEN_HYPER_PRI(fp, len, "domain_debugger_attached: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.domain_debugger_attached));
+	if (XEN_HYPER_VALID_MEMBER(domain_is_polling)) {
+		XEN_HYPER_PRI(fp, len, "domain_is_polling: ", buf, flag,
+			(buf, "%ld\n", xen_hyper_offset_table.domain_is_polling));
+	}
+	XEN_HYPER_PRI(fp, len, "domain_is_dying: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.domain_is_dying));
+	XEN_HYPER_PRI(fp, len, "domain_is_paused_by_controller: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.domain_is_paused_by_controller));
+	XEN_HYPER_PRI(fp, len, "domain_is_shutting_down: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.domain_is_shutting_down));
+	XEN_HYPER_PRI(fp, len, "domain_is_shut_down: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.domain_is_shut_down));
+	XEN_HYPER_PRI(fp, len, "domain_vcpu: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.domain_vcpu));
+	XEN_HYPER_PRI(fp, len, "domain_arch: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.domain_arch));
+
+#ifdef IA64
+	XEN_HYPER_PRI(fp, len, "mm_struct_pgd: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.mm_struct_pgd));
+#endif
+
+	XEN_HYPER_PRI(fp, len, "schedule_data_schedule_lock: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.schedule_data_schedule_lock));
+	XEN_HYPER_PRI(fp, len, "schedule_data_curr: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.schedule_data_curr));
+	XEN_HYPER_PRI(fp, len, "schedule_data_idle: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.schedule_data_idle));
+	XEN_HYPER_PRI(fp, len, "schedule_data_sched_priv: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.schedule_data_sched_priv));
+	XEN_HYPER_PRI(fp, len, "schedule_data_s_timer: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.schedule_data_s_timer));
+	XEN_HYPER_PRI(fp, len, "schedule_data_tick: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.schedule_data_tick));
+
+	XEN_HYPER_PRI(fp, len, "scheduler_name: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.scheduler_name));
+	XEN_HYPER_PRI(fp, len, "scheduler_opt_name: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.scheduler_opt_name));
+	XEN_HYPER_PRI(fp, len, "scheduler_sched_id: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.scheduler_sched_id));
+	XEN_HYPER_PRI(fp, len, "scheduler_init: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.scheduler_init));
+	XEN_HYPER_PRI(fp, len, "scheduler_tick: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.scheduler_tick));
+	XEN_HYPER_PRI(fp, len, "scheduler_init_vcpu: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.scheduler_init_vcpu));
+	XEN_HYPER_PRI(fp, len, "scheduler_destroy_domain: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.scheduler_destroy_domain));
+	XEN_HYPER_PRI(fp, len, "scheduler_sleep: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.scheduler_sleep));
+	XEN_HYPER_PRI(fp, len, "scheduler_wake: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.scheduler_wake));
+	XEN_HYPER_PRI(fp, len, "scheduler_set_affinity: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.scheduler_set_affinity));
+	XEN_HYPER_PRI(fp, len, "scheduler_do_schedule: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.scheduler_do_schedule));
+	XEN_HYPER_PRI(fp, len, "scheduler_adjust: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.scheduler_adjust));
+	XEN_HYPER_PRI(fp, len, "scheduler_dump_settings: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.scheduler_dump_settings));
+	XEN_HYPER_PRI(fp, len, "scheduler_dump_cpu_state: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.scheduler_dump_cpu_state));
+
+	XEN_HYPER_PRI(fp, len, "shared_info_vcpu_info: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.shared_info_vcpu_info));
+	XEN_HYPER_PRI(fp, len, "shared_info_evtchn_pending: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.shared_info_evtchn_pending));
+	XEN_HYPER_PRI(fp, len, "shared_info_evtchn_mask: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.shared_info_evtchn_mask));
+	XEN_HYPER_PRI(fp, len, "shared_info_arch: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.shared_info_arch));
+
+	XEN_HYPER_PRI(fp, len, "timer_expires: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.timer_expires));
+	XEN_HYPER_PRI(fp, len, "timer_cpu: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.timer_cpu));
+	XEN_HYPER_PRI(fp, len, "timer_function: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.timer_function));
+	XEN_HYPER_PRI(fp, len, "timer_data: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.timer_data));
+	XEN_HYPER_PRI(fp, len, "timer_heap_offset: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.timer_heap_offset));
+	XEN_HYPER_PRI(fp, len, "timer_killed: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.timer_killed));
+
+	XEN_HYPER_PRI(fp, len, "tss_struct_rsp0: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.tss_struct_rsp0));
+	XEN_HYPER_PRI(fp, len, "tss_struct_esp0: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.tss_struct_esp0));
+
+	XEN_HYPER_PRI(fp, len, "vcpu_vcpu_id: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.vcpu_vcpu_id));
+	XEN_HYPER_PRI(fp, len, "vcpu_processor: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.vcpu_processor));
+	XEN_HYPER_PRI(fp, len, "vcpu_vcpu_info: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.vcpu_vcpu_info));
+	XEN_HYPER_PRI(fp, len, "vcpu_domain: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.vcpu_domain));
+	XEN_HYPER_PRI(fp, len, "vcpu_next_in_list: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.vcpu_next_in_list));
+	XEN_HYPER_PRI(fp, len, "vcpu_timer: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.vcpu_timer));
+	XEN_HYPER_PRI(fp, len, "vcpu_sleep_tick: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.vcpu_sleep_tick));
+	XEN_HYPER_PRI(fp, len, "vcpu_poll_timer: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.vcpu_poll_timer));
+	XEN_HYPER_PRI(fp, len, "vcpu_sched_priv: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.vcpu_sched_priv));
+	XEN_HYPER_PRI(fp, len, "vcpu_runstate: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.vcpu_runstate));
+	XEN_HYPER_PRI(fp, len, "vcpu_runstate_guest: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.vcpu_runstate_guest));
+	XEN_HYPER_PRI(fp, len, "vcpu_vcpu_flags: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.vcpu_vcpu_flags));
+	XEN_HYPER_PRI(fp, len, "vcpu_pause_count: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.vcpu_pause_count));
+	XEN_HYPER_PRI(fp, len, "vcpu_virq_to_evtchn: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.vcpu_virq_to_evtchn));
+	XEN_HYPER_PRI(fp, len, "vcpu_cpu_affinity: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.vcpu_cpu_affinity));
+	XEN_HYPER_PRI(fp, len, "vcpu_nmi_addr: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.vcpu_nmi_addr));
+	XEN_HYPER_PRI(fp, len, "vcpu_vcpu_dirty_cpumask: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.vcpu_vcpu_dirty_cpumask));
+	XEN_HYPER_PRI(fp, len, "vcpu_arch: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.vcpu_arch));
+	XEN_HYPER_PRI(fp, len, "vcpu_runstate_info_state: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.vcpu_runstate_info_state));
+	XEN_HYPER_PRI(fp, len, "vcpu_runstate_info_state_entry_time: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.vcpu_runstate_info_state_entry_time));
+	XEN_HYPER_PRI(fp, len, "vcpu_runstate_info_time: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.vcpu_runstate_info_time));
+#ifdef IA64
+	XEN_HYPER_PRI(fp, len, "vcpu_thread_ksp: ", buf, flag,
+		(buf, "%ld\n", xen_hyper_offset_table.vcpu_thread_ksp));
+#endif
+}
+
+/*
+ * dump specified memory with specified size.
+ */
+#define DSP_BYTE_SIZE 16
+
+static void
+xen_hyper_dump_mem(void *mem, ulong len, int dsz)
+{
+	long i, max;
+	void *mem_w = mem;
+
+	if (!len || 
+	(dsz != SIZEOF_8BIT && dsz != SIZEOF_16BIT &&
+	 dsz != SIZEOF_32BIT && dsz != SIZEOF_64BIT))
+		return;
+	max = len / dsz + (len % dsz ? 1 : 0);
+	for (i = 0; i <  max; i++) {
+		if (i != 0 && !(i % (DSP_BYTE_SIZE / dsz)))
+			fprintf(fp, "\n");
+		if (i == 0 || !(i % (DSP_BYTE_SIZE / dsz)))
+			fprintf(fp, "%p : ", mem_w);
+		if (dsz == SIZEOF_8BIT)
+			fprintf(fp, "%02x ", *(uint8_t *)mem_w);
+		else if (dsz == SIZEOF_16BIT)
+			fprintf(fp, "%04x ", *(uint16_t *)mem_w);
+		else if (dsz == SIZEOF_32BIT)
+			fprintf(fp, "%08x ", *(uint32_t *)mem_w);
+		else if (dsz == SIZEOF_64BIT)
+			fprintf(fp, "%016llx ", *(unsigned long long *)mem_w);
+		mem_w = (char *)mem_w + dsz;
+	}
+	fprintf(fp, "\n");
+}
+#endif
--- crash/lkcd_dump_v8.h.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/lkcd_dump_v8.h	2007-10-30 10:51:55.000000000 -0400
@@ -235,4 +235,304 @@
 	int             stack_offset;
 } lkcdinfo_t;
 
+/*
+ *
+ * machine specific dump headers
+ *
+ */
+
+/*
+ * IA64 ---------------------------------------------------------
+ */
+
+#if defined(IA64)
+
+#define DUMP_ASM_MAGIC_NUMBER     0xdeaddeadULL  /* magic number */
+#define DUMP_ASM_VERSION_NUMBER   0x5            /* version number          */
+
+
+struct pt_regs {
+	/* The following registers are saved by SAVE_MIN: */
+	unsigned long b6;		/* scratch */
+	unsigned long b7;		/* scratch */
+
+	unsigned long ar_csd;           /* used by cmp8xchg16 (scratch) */
+	unsigned long ar_ssd;           /* reserved for future use (scratch) */
+
+	unsigned long r8;		/* scratch (return value register 0) */
+	unsigned long r9;		/* scratch (return value register 1) */
+	unsigned long r10;		/* scratch (return value register 2) */
+	unsigned long r11;		/* scratch (return value register 3) */
+
+	unsigned long cr_ipsr;		/* interrupted task's psr */
+	unsigned long cr_iip;		/* interrupted task's instruction pointer */
+	unsigned long cr_ifs;		/* interrupted task's function state */
+
+	unsigned long ar_unat;		/* interrupted task's NaT register (preserved) */
+	unsigned long ar_pfs;		/* prev function state  */
+	unsigned long ar_rsc;		/* RSE configuration */
+	/* The following two are valid only if cr_ipsr.cpl > 0: */
+	unsigned long ar_rnat;		/* RSE NaT */
+	unsigned long ar_bspstore;	/* RSE bspstore */
+
+	unsigned long pr;		/* 64 predicate registers (1 bit each) */
+	unsigned long b0;		/* return pointer (bp) */
+	unsigned long loadrs;		/* size of dirty partition << 16 */
+
+	unsigned long r1;		/* the gp pointer */
+	unsigned long r12;		/* interrupted task's memory stack pointer */
+	unsigned long r13;		/* thread pointer */
+
+	unsigned long ar_fpsr;		/* floating point status (preserved) */
+	unsigned long r15;		/* scratch */
+
+	/* The remaining registers are NOT saved for system calls.  */
+
+	unsigned long r14;		/* scratch */
+	unsigned long r2;		/* scratch */
+	unsigned long r3;		/* scratch */
+
+	/* The following registers are saved by SAVE_REST: */
+	unsigned long r16;		/* scratch */
+	unsigned long r17;		/* scratch */
+	unsigned long r18;		/* scratch */
+	unsigned long r19;		/* scratch */
+	unsigned long r20;		/* scratch */
+	unsigned long r21;		/* scratch */
+	unsigned long r22;		/* scratch */
+	unsigned long r23;		/* scratch */
+	unsigned long r24;		/* scratch */
+	unsigned long r25;		/* scratch */
+	unsigned long r26;		/* scratch */
+	unsigned long r27;		/* scratch */
+	unsigned long r28;		/* scratch */
+	unsigned long r29;		/* scratch */
+	unsigned long r30;		/* scratch */
+	unsigned long r31;		/* scratch */
+
+	unsigned long ar_ccv;		/* compare/exchange value (scratch) */
+
+	/*
+	 * Floating point registers that the kernel considers scratch:
+	 */
+	struct ia64_fpreg f6;		/* scratch */
+	struct ia64_fpreg f7;		/* scratch */
+	struct ia64_fpreg f8;		/* scratch */
+	struct ia64_fpreg f9;		/* scratch */
+	struct ia64_fpreg f10;		/* scratch */
+	struct ia64_fpreg f11;		/* scratch */
+};
+
+
+
+/*
+ * Structure: dump_header_asm_t
+ *  Function: This is the header for architecture-specific stuff.  It
+ *            follows right after the dump header.
+ *
+ */
+typedef struct _dump_header_asm_s {
+
+        /* the dump magic number -- unique to verify dump is valid */
+        uint64_t             dha_magic_number;
+
+        /* the version number of this dump */
+        uint32_t             dha_version;
+
+        /* the size of this header (in case we can't read it) */
+        uint32_t             dha_header_size;
+
+        /* pointer to pt_regs, (OLD: (struct pt_regs *, NEW: (uint64_t)) */
+	uint64_t             dha_pt_regs;
+
+	/* the dump registers */
+	struct pt_regs       dha_regs;
+
+        /* the rnat register saved after flushrs */
+        uint64_t             dha_rnat;
+
+	/* the pfs register saved after flushrs */
+	uint64_t             dha_pfs;
+
+	/* the bspstore register saved after flushrs */
+	uint64_t             dha_bspstore;
+
+	/* smp specific */
+	uint32_t	     dha_smp_num_cpus;
+	uint32_t	     dha_dumping_cpu;
+	struct pt_regs	     dha_smp_regs[NR_CPUS];
+	uint64_t	     dha_smp_current_task[NR_CPUS];
+	uint64_t	     dha_stack[NR_CPUS];
+	uint64_t	     dha_stack_ptr[NR_CPUS];
+
+	/* load address of kernel */
+        uint64_t             dha_kernel_addr;
+
+} __attribute__((packed)) dump_header_asm_t;
+
+struct dump_CPU_info_ia64 {
+	struct pt_regs	     dha_smp_regs;
+	uint64_t	     dha_smp_current_task;
+	uint64_t	     dha_stack;
+	uint64_t	     dha_stack_ptr;
+} __attribute__((packed)) dump_CPU_info_ia64_t;
+
+typedef struct dump_CPU_info_ia64 dump_CPU_info_t;
+
+/*
+ * i386 ---------------------------------------------------------
+ */
+
+#elif defined(X86)
+
+#define DUMP_ASM_MAGIC_NUMBER	0xdeaddeadULL	/* magic number            */
+#define DUMP_ASM_VERSION_NUMBER	0x5	/* version number          */
+
+
+struct pt_regs {
+	long ebx;
+	long ecx;
+	long edx;
+	long esi;
+	long edi;
+	long ebp;
+	long eax;
+	int  xds;
+	int  xes;
+	long orig_eax;
+	long eip;
+	int  xcs;
+	long eflags;
+	long esp;
+	int  xss;
+};
+
+/*
+ * Structure: __dump_header_asm
+ *  Function: This is the header for architecture-specific stuff.  It
+ *            follows right after the dump header.
+ */
+typedef struct _dump_header_asm_s {
+	/* the dump magic number -- unique to verify dump is valid */
+	uint64_t	dha_magic_number;
+
+	/* the version number of this dump */
+	uint32_t	dha_version;
+
+	/* the size of this header (in case we can't read it) */
+	uint32_t	dha_header_size;
+
+	/* the esp for i386 systems */
+	uint32_t	dha_esp;
+
+	/* the eip for i386 systems */
+	uint32_t	dha_eip;
+
+	/* the dump registers */
+	struct pt_regs	dha_regs;
+
+	/* smp specific */
+	uint32_t	dha_smp_num_cpus;
+	uint32_t	dha_dumping_cpu;
+	struct pt_regs	dha_smp_regs[NR_CPUS];
+	uint32_t	dha_smp_current_task[NR_CPUS];
+	uint32_t	dha_stack[NR_CPUS];
+	uint32_t	dha_stack_ptr[NR_CPUS];
+} __attribute__((packed)) dump_header_asm_t;
+
+/*
+ * CPU specific part of dump_header_asm_t
+ */
+typedef struct dump_CPU_info_s {
+	struct pt_regs	dha_smp_regs;
+	uint64_t	dha_smp_current_task;
+	uint64_t	dha_stack;
+	uint64_t	dha_stack_ptr;
+} __attribute__ ((packed)) dump_CPU_info_t;
+
+
+/*
+ * x86-64 ---------------------------------------------------------
+ */
+
+#elif defined(X86_64)
+
+/* definitions */
+#define DUMP_ASM_MAGIC_NUMBER     0xdeaddeadULL  /* magic number            */
+#define DUMP_ASM_VERSION_NUMBER   0x2            /* version number          */
+
+
+struct pt_regs {
+	unsigned long r15;
+	unsigned long r14;
+	unsigned long r13;
+	unsigned long r12;
+	unsigned long rbp;
+	unsigned long rbx;
+/* arguments: non interrupts/non tracing syscalls only save upto here*/
+ 	unsigned long r11;
+	unsigned long r10;
+	unsigned long r9;
+	unsigned long r8;
+	unsigned long rax;
+	unsigned long rcx;
+	unsigned long rdx;
+	unsigned long rsi;
+	unsigned long rdi;
+	unsigned long orig_rax;
+/* end of arguments */
+/* cpu exception frame or undefined */
+	unsigned long rip;
+	unsigned long cs;
+	unsigned long eflags;
+	unsigned long rsp;
+	unsigned long ss;
+/* top of stack page */
+};
+
+/*
+ * Structure: dump_header_asm_t
+ *  Function: This is the header for architecture-specific stuff.  It
+ *            follows right after the dump header.
+ */
+typedef struct _dump_header_asm_s {
+
+        /* the dump magic number -- unique to verify dump is valid */
+        uint64_t             dha_magic_number;
+
+        /* the version number of this dump */
+        uint32_t             dha_version;
+
+        /* the size of this header (in case we can't read it) */
+        uint32_t             dha_header_size;
+
+	/* the dump registers */
+	struct pt_regs       dha_regs;
+
+	/* smp specific */
+	uint32_t	     dha_smp_num_cpus;
+	int		     dha_dumping_cpu;
+	struct pt_regs	     dha_smp_regs[NR_CPUS];
+	uint64_t	     dha_smp_current_task[NR_CPUS];
+	uint64_t	     dha_stack[NR_CPUS];
+	uint64_t	     dha_stack_ptr[NR_CPUS];
+} __attribute__((packed)) dump_header_asm_t;
+
+
+/*
+ * CPU specific part of dump_header_asm_t
+ */
+typedef struct dump_CPU_info_s {
+	struct pt_regs	     dha_smp_regs;
+	uint64_t	     dha_smp_current_task;
+	uint64_t	     dha_stack;
+	uint64_t	     dha_stack_ptr;
+} __attribute__ ((packed)) dump_CPU_info_t;
+
+#else
+
+#define HAVE_NO_DUMP_HEADER_ASM 1
+
+#endif
+
 #endif /* _DUMP_H */
--- crash/unwind.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/unwind.c	2009-01-23 10:30:52.000000000 -0500
@@ -6,8 +6,8 @@
 /*
  *  unwind.c
  *
- *  Copyright (C) 2002, 2003, 2004, 2005 David Anderson
- *  Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved.
+ *  Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 David Anderson
+ *  Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Red Hat, Inc. All rights reserved.
  *
  *  Adapted from:  
  *
@@ -36,6 +36,7 @@
 /* #include <asm/ptrace.h>  can't include this -- it's changing over time! */
 
 #include "defs.h"
+#include "xen_hyper_defs.h"
 
 typedef unsigned char u8;
 typedef unsigned long long u64;
@@ -64,6 +65,8 @@
 	struct bt_info *);
 static int unw_switch_from_osinit_v2(struct unw_frame_info *,
 	struct bt_info *);
+static int unw_switch_from_osinit_v3(struct unw_frame_info *,
+	struct bt_info *, char *);
 static unsigned long get_init_stack_ulong(unsigned long addr);
 static void unw_init_frame_info(struct unw_frame_info *, 
 	struct bt_info *, ulong);
@@ -1392,14 +1395,62 @@
 unwind_init_v3(void)
 #endif
 {
+	int len;
 	struct gnu_request request, *req;
 
 	req = &request;
 
+	if (LKCD_KERNTYPES()) {
+		if ((len = STRUCT_SIZE("unw")) == 0) {
+			error(WARNING,
+			"cannot determine unw.tables offset; no struct unw\n");
+			machdep->flags |= UNW_OUT_OF_SYNC;
+			return;
+		}
+		machdep->machspec->unw_tables_offset =
+			MEMBER_OFFSET("unw", "tables");
+		if (MEMBER_EXISTS("unw", "r0"))
+			machdep->flags |= UNW_R0;
+		/*
+		 * no verification of save_order, sw_off, preg_index as
+		 * we're purely depending on the structure definition.
+		 */
+		if (MEMBER_EXISTS("unw", "pt_regs_offsets")) {
+			machdep->machspec->unw_pt_regs_offsets =
+				MEMBER_OFFSET("unw", "pt_regs_offsets") -
+				machdep->machspec->unw_tables_offset;
+			machdep->machspec->unw_kernel_table_offset =
+				MEMBER_OFFSET("unw", "kernel_table") -
+				machdep->machspec->unw_tables_offset;
+			machdep->flags |= UNW_PTREGS;
+		}
+		if (!load_unw_table(CLEAR_SCRIPT_CACHE)) {
+			error(WARNING,
+				"unwind_init: cannot read kernel unw table\n");
+			machdep->flags |= UNW_OUT_OF_SYNC;
+		}
+		machdep->machspec->unw = (void *)&unw;
+		/* fall to common structure size verifications */
+		goto verify;
+	}
+
         if (get_symbol_type("unw", "tables", req) == TYPE_CODE_UNDEF) {
-		error(WARNING, "cannot determine unw.tables offset\n");
-		machdep->flags |= UNW_OUT_OF_SYNC;
-	} else { 
+		/*
+		 *  KLUDGE ALERT:
+		 *  If unw.tables cannot be ascertained by gdb, try unw.save_order,
+		 *  given that it is the field just after unw.tables.
+		 */
+		if (get_symbol_type("unw", "save_order", req) == TYPE_CODE_UNDEF) {
+			error(WARNING, "cannot determine unw.tables offset\n");
+			machdep->flags |= UNW_OUT_OF_SYNC;
+		} else
+	        	req->member_offset -= BITS_PER_BYTE * sizeof(void *);
+
+		if (CRASHDEBUG(1))
+			error(WARNING, "using unw.save_order to determine unw.tables\n");
+	}
+
+	if (!(machdep->flags & UNW_OUT_OF_SYNC)) {
 		machdep->machspec->unw_tables_offset =
 			 req->member_offset/BITS_PER_BYTE;
 
@@ -1433,7 +1484,7 @@
 
 		machdep->machspec->unw = (void *)&unw;
 	}
-
+verify:	
 	verify_common_struct("unw_frame_info", sizeof(struct unw_frame_info));
 	verify_common_struct("unw_table", sizeof(struct unw_table));
 	verify_common_struct("unw_table_entry", sizeof(struct unw_table_entry));
@@ -1658,8 +1709,13 @@
                 unw_get_sp(info, &sp);
                 unw_get_bsp(info, &bsp);
 
-                if (ip < GATE_ADDR + PAGE_SIZE)
-                        break;
+		if (XEN_HYPER_MODE()) {
+			if (!IS_KVADDR(ip))
+				break;
+		} else {
+                	if (ip < GATE_ADDR + PAGE_SIZE)
+                       		break;
+		}
 
                 if ((sm = value_search(ip, NULL)))
                         name = sm->name;
@@ -1720,11 +1776,29 @@
 		 * ia64_init_handler.  
 		 */
 		if (STREQ(name, "ia64_init_handler")) {
-			unw_switch_from_osinit_v2(info, bt);
-			frame++;
-			goto restart;
+			if (symbol_exists("ia64_mca_modify_original_stack")) {
+				/*
+				 * 2.6.14 or later kernels no longer keep
+				 * minstate info in pt_regs/switch_stack.
+				 * unw_switch_from_osinit_v3() will try
+				 * to find the interrupted task and restart
+				 * backtrace itself.
+				 */
+				if (unw_switch_from_osinit_v3(info, bt, "INIT") == FALSE)
+					break;
+			} else {
+				if (unw_switch_from_osinit_v2(info, bt) == FALSE)
+					break;
+				frame++;
+				goto restart;
+			}
 		}
 
+		if (STREQ(name, "ia64_mca_handler") &&
+		    symbol_exists("ia64_mca_modify_original_stack"))
+			if (unw_switch_from_osinit_v3(info, bt, "MCA") == FALSE)
+				break;
+
                 frame++;
 
         } while (unw_unwind(info) >= 0);
@@ -1844,8 +1918,13 @@
 	ulong sw;
 
 	sw = SWITCH_STACK_ADDR(bt->task);
-	if (!INSTACK(sw, bt) && !ia64_in_init_stack(sw))
-		return FALSE;
+	if (XEN_HYPER_MODE()) {
+		if (!INSTACK(sw, bt) && !ia64_in_mca_stack_hyper(sw, bt))
+			return FALSE;
+	} else {
+		if (!INSTACK(sw, bt) && !ia64_in_init_stack(sw))
+			return FALSE;
+	}
 
         unw_init_frame_info(info, bt, sw);
 	return TRUE;
@@ -1967,6 +2046,124 @@
 	return TRUE;
 }
 
+/* CPL (current privilege level) is 2-bit field */
+#define IA64_PSR_CPL0_BIT	32
+#define IA64_PSR_CPL_MASK	(3UL << IA64_PSR_CPL0_BIT)
+
+static int
+user_mode(struct bt_info *bt, unsigned long pt)
+{
+	unsigned long cr_ipsr;
+
+	cr_ipsr = IA64_GET_STACK_ULONG(pt + offsetof(struct pt_regs, cr_ipsr));
+	if (cr_ipsr & IA64_PSR_CPL_MASK)
+		return 1;
+	return 0;
+}
+
+/*
+ * Cope with INIT/MCA stack for the kernel 2.6.14 or later
+ *
+ * Returns FALSE if no more unwinding is needed.
+ */
+#define ALIGN16(x) ((x)&~15)
+static int
+unw_switch_from_osinit_v3(struct unw_frame_info *info, struct bt_info *bt,
+			  char *type)
+{
+	unsigned long pt, sw, pid;
+	int processor;
+	char *p, *q;
+	struct task_context *tc = NULL;
+	struct bt_info clone_bt;
+
+	/*
+	 *    The structure of INIT/MCA stack
+	 *
+	 *    +---------------------------+ <-------- IA64_STK_OFFSET
+	 *    |          pt_regs          |
+	 *    +---------------------------+
+	 *    |        switch_stack       |
+	 *    +---------------------------+
+	 *    |        SAL/OS state       |
+	 *    +---------------------------+
+	 *    |    16 byte scratch area   |
+	 *    +---------------------------+ <-------- SP at start of C handler
+	 *    |           .....           |
+	 *    +---------------------------+
+	 *    | RBS for MCA/INIT handler  |
+	 *    +---------------------------+
+	 *    | struct task for MCA/INIT  |
+	 *    +---------------------------+ <-------- bt->task
+	 */
+	pt = ALIGN16(bt->task + IA64_STK_OFFSET - STRUCT_SIZE("pt_regs"));
+	sw = ALIGN16(pt - STRUCT_SIZE("switch_stack"));
+
+	/*
+	 * 1. Try to find interrupted task from comm
+	 *
+	 *    comm format of INIT/MCA task:
+	 *       - "<type> <pid>"
+	 *       - "<type> <comm> <processor>"
+	 *    where "<type>" is either "INIT" or "MCA".
+	 *    The latter form is chosen if PID is 0.
+	 * 
+	 *    See ia64_mca_modify_comm() in arch/ia64/kernel/mca.c
+	 */
+	if (!bt->tc || !bt->tc->comm)
+		goto find_exframe;
+
+	if ((p = strstr(bt->tc->comm, type))) {
+		p += strlen(type);
+		if (*p != ' ')
+			goto find_exframe;
+		if ((q = strchr(++p, ' '))) {
+			/* "<type> <comm> <processor>" */
+			if (sscanf(++q, "%d", &processor) > 0) {
+				tc = pid_to_context(0);
+				while (tc) {
+					if (tc != bt->tc &&
+					    tc->processor == processor)
+						break;
+					tc = tc->tc_next;
+				}
+			}
+		} else if (sscanf(p, "%lu", &pid) > 0)
+			/* "<type> <pid>" */
+			tc = pid_to_context(pid);
+	}
+
+	if (tc) {
+		/* Clone bt_info and do backtrace */
+		clone_bt_info(bt, &clone_bt, tc);
+		if (!BT_REFERENCE_CHECK(&clone_bt)) {
+			fprintf(fp, "(%s) INTERRUPTED TASK\n", type);
+			print_task_header(fp, tc, 0);
+		}
+		if (!user_mode(bt, pt))
+			back_trace(&clone_bt);
+		else if (!BT_REFERENCE_CHECK(bt)) {
+			fprintf(fp, " #0 [interrupted in user space]\n");
+			/* at least show the incomplete exception frame */
+			bt->flags |= BT_INCOMPLETE_USER_EFRAME;
+			ia64_exception_frame(pt, bt);
+		}
+		return FALSE;
+	}
+
+	/* task matching with INIT/MCA task's comm is not found */
+
+find_exframe:
+	/*
+	 * 2. If step 1 doesn't work, try best to find exception frame
+	 */
+	unw_init_from_interruption(info, bt, pt, sw);
+	if (!BT_REFERENCE_CHECK(bt))
+		ia64_exception_frame(pt, bt);
+
+	return TRUE;
+}
+
 static void
 unw_init_frame_info (struct unw_frame_info *info, struct bt_info *bt, ulong sw)
 {
--- crash/gdb-6.1/gdb/dwarf2read.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/gdb-6.1/gdb/dwarf2read.c	2009-01-22 16:09:48.000000000 -0500
@@ -7391,8 +7391,7 @@
    When the result is a register number, the global isreg flag is set,
    otherwise it is cleared.
 
-   Note that stack[0] is unused except as a default error return.
-   Note that stack overflow is not yet handled.  */
+   Note that stack[0] is unused except as a default error return. */
 
 static CORE_ADDR
 decode_locdesc (struct dwarf_block *blk, struct dwarf2_cu *cu)
@@ -7409,7 +7408,7 @@
 
   i = 0;
   stacki = 0;
-  stack[stacki] = 0;
+  stack[++stacki] = 0;
   isreg = 0;
 
   while (i < size)
@@ -7591,6 +7590,16 @@
 		     dwarf_stack_op_name (op));
 	  return (stack[stacki]);
 	}
+      /* Enforce maximum stack depth of size-1 to avoid ++stacki writing
+         outside of the allocated space. Also enforce minimum > 0.
+         -- wad@google.com 14 Aug 2006 */
+      if (stacki >= sizeof (stack) / sizeof (*stack) - 1)
+	internal_error (__FILE__, __LINE__,
+	                _("location description stack too deep: %d"),
+	                stacki);
+      if (stacki <= 0)
+	internal_error (__FILE__, __LINE__,
+	                _("location description stack too shallow"));
     }
   return (stack[stacki]);
 }
--- crash/gdb-6.1/gdb/symfile.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/gdb-6.1/gdb/symfile.c	2007-01-30 10:43:08.000000000 -0500
@@ -3,7 +3,7 @@
    Copyright 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
    1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
    Portions Copyright (C) 2001, 2002 Mission Critical Linux, Inc.
-   Copyright (c) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved.
+   Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007 Red Hat, Inc. All rights reserved.
 
    Contributed by Cygnus Support, using pieces from other GDB modules.
 
@@ -1678,7 +1678,11 @@
                to load the program. */
 	    sect_opts[section_index].name = ".text";
 	    sect_opts[section_index].value = arg;
+#ifdef CRASH_MERGE
+	    if (++section_index >= num_sect_opts) 
+#else
 	    if (++section_index > num_sect_opts) 
+#endif
 	      {
 		num_sect_opts *= 2;
 		sect_opts = ((struct sect_opt *) 
@@ -1714,7 +1718,11 @@
 		    {
 		      sect_opts[section_index].value = arg;
 		      expecting_sec_addr = 0;
+#ifdef CRASH_MERGE
+		      if (++section_index >= num_sect_opts) 
+#else
 		      if (++section_index > num_sect_opts) 
+#endif
 			{
 			  num_sect_opts *= 2;
 			  sect_opts = ((struct sect_opt *) 
@@ -3510,6 +3518,13 @@
 bfd_byte *
 symfile_relocate_debug_section (bfd *abfd, asection *sectp, bfd_byte *buf)
 {
+#ifdef CRASH_MERGE
+  /* Executable files have all the relocations already resolved.
+   * Handle files linked with --emit-relocs.
+   * http://sources.redhat.com/ml/gdb/2006-08/msg00137.html  */
+  if ((abfd->flags & EXEC_P) != 0)
+    return NULL;
+#endif
   /* We're only interested in debugging sections with relocation
      information.  */
   if ((sectp->flags & SEC_RELOC) == 0)
--- crash/gdb-6.1/gdb/main.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/gdb-6.1/gdb/main.c	2009-01-22 15:33:12.000000000 -0500
@@ -628,7 +628,7 @@
 
       if (!inhibit_gdbinit)
 	{
-	  catch_command_errors (source_command, homeinit, 0, RETURN_MASK_ALL);
+	  catch_command_errors (source_command, homeinit, -1, RETURN_MASK_ALL);
 	}
 
       /* Do stats; no need to do them elsewhere since we'll only
@@ -714,7 +714,7 @@
       || memcmp ((char *) &homebuf, (char *) &cwdbuf, sizeof (struct stat)))
     if (!inhibit_gdbinit)
       {
-	catch_command_errors (source_command, gdbinit, 0, RETURN_MASK_ALL);
+	catch_command_errors (source_command, gdbinit, -1, RETURN_MASK_ALL);
       }
 
   for (i = 0; i < ncmd; i++)
--- crash/gdb-6.1/gdb/symtab.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/gdb-6.1/gdb/symtab.c	2007-01-30 10:43:08.000000000 -0500
@@ -4,7 +4,7 @@
    1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004
    Free Software Foundation, Inc.
    Portions Copyright (C) 2001, 2002 Mission Critical Linux, Inc.
-   Copyright (c) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved.
+   Copyright (c) 2002, 2003, 2004, 2005, 2007 Red Hat, Inc. All rights reserved.
 
    This file is part of GDB.
 
@@ -4523,14 +4523,54 @@
 	struct symbol *sym;
 	struct expression *expr;
 	struct cleanup *old_chain;
-
+	int i;
+        int allsect = 0;
+        char *secname;
+        char buf[80];
+    
 	gdb_current_load_module = lm = (struct load_module *)req->addr;
 
 	req->name = lm->mod_namelist;
 	gdb_delete_symbol_file(req);
 
-        sprintf(req->buf, "add-symbol-file %s 0x%lx", lm->mod_namelist, 
-		lm->mod_text_start);
+        for (i = 0 ; i < lm->mod_sections; i++) {
+            if (STREQ(lm->mod_section_data[i].name, ".text") &&
+                (lm->mod_section_data[i].flags & SEC_FOUND))
+                    allsect = 1;
+        }
+
+        if (!allsect) {
+            sprintf(req->buf, "add-symbol-file %s 0x%lx", lm->mod_namelist,
+                    lm->mod_text_start ? lm->mod_text_start : lm->mod_base);
+	    if (lm->mod_data_start) {
+                    sprintf(buf, " -s .data 0x%lx", lm->mod_data_start);
+                    strcat(req->buf, buf);
+	    }
+	    if (lm->mod_bss_start) {
+                    sprintf(buf, " -s .bss 0x%lx", lm->mod_bss_start);
+                    strcat(req->buf, buf);
+	    }
+	    if (lm->mod_rodata_start) {
+                    sprintf(buf, " -s .rodata 0x%lx", lm->mod_rodata_start);
+                    strcat(req->buf, buf);
+	    }
+        } else {
+            sprintf(req->buf, "add-symbol-file %s 0x%lx", lm->mod_namelist,
+                    lm->mod_text_start);
+            for (i = 0; i < lm->mod_sections; i++) {
+                    secname = lm->mod_section_data[i].name;
+                    if ((lm->mod_section_data[i].flags & SEC_FOUND) &&
+                        !STREQ(secname, ".text")) {
+                            sprintf(buf, " -s %s 0x%lx", secname,
+                                lm->mod_section_data[i].offset + lm->mod_base);
+                            strcat(req->buf, buf);
+                    }
+            }
+        }
+
+	if (gdb_CRASHDEBUG(1)) {
+            fprintf_filtered(gdb_stdout, "gdb_add_symbol_file: %s\n", req->buf);
+	}
 
        	execute_command(req->buf, FALSE);
 
--- crash/gdb-6.1/gdb/Makefile.in.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/gdb-6.1/gdb/Makefile.in	2009-01-22 15:33:12.000000000 -0500
@@ -2513,7 +2513,7 @@
 	$(expression_h) $(frame_h) $(value_h) $(language_h) $(filenames_h) \
 	$(objfiles_h) $(source_h) $(disasm_h) $(ui_out_h) $(top_h) \
 	$(cli_decode_h) $(cli_script_h) $(cli_setshow_h) $(cli_cmds_h) \
-	$(tui_h)
+	$(tui_h) $(gdb_stat_h)
 	$(CC) -c $(INTERNAL_CFLAGS) $(srcdir)/cli/cli-cmds.c
 cli-decode.o: $(srcdir)/cli/cli-decode.c $(defs_h) $(symtab_h) \
 	$(gdb_regex_h) $(gdb_string_h) $(ui_out_h) $(cli_cmds_h) \
--- crash/gdb-6.1/gdb/ppc-linux-tdep.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/gdb-6.1/gdb/ppc-linux-tdep.c	2005-07-14 11:08:17.000000000 -0400
@@ -0,0 +1,1116 @@
+/* Target-dependent code for GDB, the GNU debugger.
+
+   Copyright 1986, 1987, 1989, 1991, 1992, 1993, 1994, 1995, 1996,
+   1997, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
+   Copyright (c) 2004, 2005 Red Hat, Inc. All rights reserved.
+
+   This file is part of GDB.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 2 of the License, or
+   (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 59 Temple Place - Suite 330,
+   Boston, MA 02111-1307, USA.  */
+
+#include "defs.h"
+#include "frame.h"
+#include "inferior.h"
+#include "symtab.h"
+#include "target.h"
+#include "gdbcore.h"
+#include "gdbcmd.h"
+#include "symfile.h"
+#include "objfiles.h"
+#include "regcache.h"
+#include "value.h"
+#include "osabi.h"
+
+#include "solib-svr4.h"
+#include "ppc-tdep.h"
+
+/* The following instructions are used in the signal trampoline code
+   on GNU/Linux PPC. The kernel used to use magic syscalls 0x6666 and
+   0x7777 but now uses the sigreturn syscalls.  We check for both.  */
+#define INSTR_LI_R0_0x6666		0x38006666
+#define INSTR_LI_R0_0x7777		0x38007777
+#define INSTR_LI_R0_NR_sigreturn	0x38000077
+#define INSTR_LI_R0_NR_rt_sigreturn	0x380000AC
+
+#define INSTR_SC			0x44000002
+
+/* Since the *-tdep.c files are platform independent (i.e, they may be
+   used to build cross platform debuggers), we can't include system
+   headers.  Therefore, details concerning the sigcontext structure
+   must be painstakingly rerecorded.  What's worse, if these details
+   ever change in the header files, they'll have to be changed here
+   as well. */
+
+/* __SIGNAL_FRAMESIZE from <asm/ptrace.h> */
+#define PPC_LINUX_SIGNAL_FRAMESIZE 64
+
+/* From <asm/sigcontext.h>, offsetof(struct sigcontext_struct, regs) == 0x1c */
+#define PPC_LINUX_REGS_PTR_OFFSET (PPC_LINUX_SIGNAL_FRAMESIZE + 0x1c)
+
+/* From <asm/sigcontext.h>, 
+   offsetof(struct sigcontext_struct, handler) == 0x14 */
+#define PPC_LINUX_HANDLER_PTR_OFFSET (PPC_LINUX_SIGNAL_FRAMESIZE + 0x14)
+
+/* From <asm/ptrace.h>, values for PT_NIP, PT_R1, and PT_LNK */
+#define PPC_LINUX_PT_R0		0
+#define PPC_LINUX_PT_R1		1
+#define PPC_LINUX_PT_R2		2
+#define PPC_LINUX_PT_R3		3
+#define PPC_LINUX_PT_R4		4
+#define PPC_LINUX_PT_R5		5
+#define PPC_LINUX_PT_R6		6
+#define PPC_LINUX_PT_R7		7
+#define PPC_LINUX_PT_R8		8
+#define PPC_LINUX_PT_R9		9
+#define PPC_LINUX_PT_R10	10
+#define PPC_LINUX_PT_R11	11
+#define PPC_LINUX_PT_R12	12
+#define PPC_LINUX_PT_R13	13
+#define PPC_LINUX_PT_R14	14
+#define PPC_LINUX_PT_R15	15
+#define PPC_LINUX_PT_R16	16
+#define PPC_LINUX_PT_R17	17
+#define PPC_LINUX_PT_R18	18
+#define PPC_LINUX_PT_R19	19
+#define PPC_LINUX_PT_R20	20
+#define PPC_LINUX_PT_R21	21
+#define PPC_LINUX_PT_R22	22
+#define PPC_LINUX_PT_R23	23
+#define PPC_LINUX_PT_R24	24
+#define PPC_LINUX_PT_R25	25
+#define PPC_LINUX_PT_R26	26
+#define PPC_LINUX_PT_R27	27
+#define PPC_LINUX_PT_R28	28
+#define PPC_LINUX_PT_R29	29
+#define PPC_LINUX_PT_R30	30
+#define PPC_LINUX_PT_R31	31
+#define PPC_LINUX_PT_NIP	32
+#define PPC_LINUX_PT_MSR	33
+#define PPC_LINUX_PT_CTR	35
+#define PPC_LINUX_PT_LNK	36
+#define PPC_LINUX_PT_XER	37
+#define PPC_LINUX_PT_CCR	38
+#define PPC_LINUX_PT_MQ		39
+#define PPC_LINUX_PT_FPR0	48	/* each FP reg occupies 2 slots in this space */
+#define PPC_LINUX_PT_FPR31 (PPC_LINUX_PT_FPR0 + 2*31)
+#define PPC_LINUX_PT_FPSCR (PPC_LINUX_PT_FPR0 + 2*32 + 1)
+
+static int ppc_linux_at_sigtramp_return_path (CORE_ADDR pc);
+
+/* Determine if pc is in a signal trampoline...
+
+   Ha!  That's not what this does at all.  wait_for_inferior in
+   infrun.c calls PC_IN_SIGTRAMP in order to detect entry into a
+   signal trampoline just after delivery of a signal.  But on
+   GNU/Linux, signal trampolines are used for the return path only.
+   The kernel sets things up so that the signal handler is called
+   directly.
+
+   If we use in_sigtramp2() in place of in_sigtramp() (see below)
+   we'll (often) end up with stop_pc in the trampoline and prev_pc in
+   the (now exited) handler.  The code there will cause a temporary
+   breakpoint to be set on prev_pc which is not very likely to get hit
+   again.
+
+   If this is confusing, think of it this way...  the code in
+   wait_for_inferior() needs to be able to detect entry into a signal
+   trampoline just after a signal is delivered, not after the handler
+   has been run.
+
+   So, we define in_sigtramp() below to return 1 if the following is
+   true:
+
+   1) The previous frame is a real signal trampoline.
+
+   - and -
+
+   2) pc is at the first or second instruction of the corresponding
+   handler.
+
+   Why the second instruction?  It seems that wait_for_inferior()
+   never sees the first instruction when single stepping.  When a
+   signal is delivered while stepping, the next instruction that
+   would've been stepped over isn't, instead a signal is delivered and
+   the first instruction of the handler is stepped over instead.  That
+   puts us on the second instruction.  (I added the test for the
+   first instruction long after the fact, just in case the observed
+   behavior is ever fixed.)
+
+   PC_IN_SIGTRAMP is called from blockframe.c as well in order to set
+   the frame's type (if a SIGTRAMP_FRAME).  Because of our strange
+   definition of in_sigtramp below, we can't rely on the frame's type
+   getting set correctly from within blockframe.c.  This is why we
+   take pains to set it in init_extra_frame_info().
+
+   NOTE: cagney/2002-11-10: I suspect the real problem here is that
+   the get_prev_frame() only initializes the frame's type after the
+   call to INIT_FRAME_INFO.  get_prev_frame() should be fixed, this
+   code shouldn't be working its way around a bug :-(.  */
+
+int
+ppc_linux_in_sigtramp (CORE_ADDR pc, char *func_name)
+{
+  CORE_ADDR lr;
+  CORE_ADDR sp;
+  CORE_ADDR tramp_sp;
+  char buf[4];
+  CORE_ADDR handler;
+
+  lr = read_register (gdbarch_tdep (current_gdbarch)->ppc_lr_regnum);
+  if (!ppc_linux_at_sigtramp_return_path (lr))
+    return 0;
+
+  sp = read_register (SP_REGNUM);
+
+  if (target_read_memory (sp, buf, sizeof (buf)) != 0)
+    return 0;
+
+  tramp_sp = extract_unsigned_integer (buf, 4);
+
+  if (target_read_memory (tramp_sp + PPC_LINUX_HANDLER_PTR_OFFSET, buf,
+			  sizeof (buf)) != 0)
+    return 0;
+
+  handler = extract_unsigned_integer (buf, 4);
+
+  return (pc == handler || pc == handler + 4);
+}
+
+static int
+insn_is_sigreturn (unsigned long pcinsn)
+{
+  switch(pcinsn)
+    {
+    case INSTR_LI_R0_0x6666:
+    case INSTR_LI_R0_0x7777:
+    case INSTR_LI_R0_NR_sigreturn:
+    case INSTR_LI_R0_NR_rt_sigreturn:
+      return 1;
+    default:
+      return 0;
+    }
+}
+
+/*
+ * The signal handler trampoline is on the stack and consists of exactly
+ * two instructions.  The easiest and most accurate way of determining
+ * whether the pc is in one of these trampolines is by inspecting the
+ * instructions.  It'd be faster though if we could find a way to do this
+ * via some simple address comparisons.
+ */
+static int
+ppc_linux_at_sigtramp_return_path (CORE_ADDR pc)
+{
+  char buf[12];
+  unsigned long pcinsn;
+  if (target_read_memory (pc - 4, buf, sizeof (buf)) != 0)
+    return 0;
+
+  /* extract the instruction at the pc */
+  pcinsn = extract_unsigned_integer (buf + 4, 4);
+
+  return (
+	   (insn_is_sigreturn (pcinsn)
+	    && extract_unsigned_integer (buf + 8, 4) == INSTR_SC)
+	   ||
+	   (pcinsn == INSTR_SC
+	    && insn_is_sigreturn (extract_unsigned_integer (buf, 4))));
+}
+
+static CORE_ADDR
+ppc_linux_skip_trampoline_code (CORE_ADDR pc)
+{
+  char buf[4];
+  struct obj_section *sect;
+  struct objfile *objfile;
+  unsigned long insn;
+  CORE_ADDR plt_start = 0;
+  CORE_ADDR symtab = 0;
+  CORE_ADDR strtab = 0;
+  int num_slots = -1;
+  int reloc_index = -1;
+  CORE_ADDR plt_table;
+  CORE_ADDR reloc;
+  CORE_ADDR sym;
+  long symidx;
+  char symname[1024];
+  struct minimal_symbol *msymbol;
+
+  /* Find the section pc is in; return if not in .plt */
+  sect = find_pc_section (pc);
+  if (!sect || strcmp (sect->the_bfd_section->name, ".plt") != 0)
+    return 0;
+
+  objfile = sect->objfile;
+
+  /* Pick up the instruction at pc.  It had better be of the
+     form
+     li r11, IDX
+
+     where IDX is an index into the plt_table.  */
+
+  if (target_read_memory (pc, buf, 4) != 0)
+    return 0;
+  insn = extract_unsigned_integer (buf, 4);
+
+  if ((insn & 0xffff0000) != 0x39600000 /* li r11, VAL */ )
+    return 0;
+
+  reloc_index = (insn << 16) >> 16;
+
+  /* Find the objfile that pc is in and obtain the information
+     necessary for finding the symbol name. */
+  for (sect = objfile->sections; sect < objfile->sections_end; ++sect)
+    {
+      const char *secname = sect->the_bfd_section->name;
+      if (strcmp (secname, ".plt") == 0)
+	plt_start = sect->addr;
+      else if (strcmp (secname, ".rela.plt") == 0)
+	num_slots = ((int) sect->endaddr - (int) sect->addr) / 12;
+      else if (strcmp (secname, ".dynsym") == 0)
+	symtab = sect->addr;
+      else if (strcmp (secname, ".dynstr") == 0)
+	strtab = sect->addr;
+    }
+
+  /* Make sure we have all the information we need. */
+  if (plt_start == 0 || num_slots == -1 || symtab == 0 || strtab == 0)
+    return 0;
+
+  /* Compute the value of the plt table */
+  plt_table = plt_start + 72 + 8 * num_slots;
+
+  /* Get address of the relocation entry (Elf32_Rela) */
+  if (target_read_memory (plt_table + reloc_index, buf, 4) != 0)
+    return 0;
+  reloc = extract_unsigned_integer (buf, 4);
+
+  sect = find_pc_section (reloc);
+  if (!sect)
+    return 0;
+
+  if (strcmp (sect->the_bfd_section->name, ".text") == 0)
+    return reloc;
+
+  /* Now get the r_info field which is the relocation type and symbol
+     index. */
+  if (target_read_memory (reloc + 4, buf, 4) != 0)
+    return 0;
+  symidx = extract_unsigned_integer (buf, 4);
+
+  /* Shift out the relocation type leaving just the symbol index */
+  /* symidx = ELF32_R_SYM(symidx); */
+  symidx = symidx >> 8;
+
+  /* compute the address of the symbol */
+  sym = symtab + symidx * 4;
+
+  /* Fetch the string table index */
+  if (target_read_memory (sym, buf, 4) != 0)
+    return 0;
+  symidx = extract_unsigned_integer (buf, 4);
+
+  /* Fetch the string; we don't know how long it is.  Is it possible
+     that the following will fail because we're trying to fetch too
+     much? */
+  if (target_read_memory (strtab + symidx, symname, sizeof (symname)) != 0)
+    return 0;
+
+  /* This might not work right if we have multiple symbols with the
+     same name; the only way to really get it right is to perform
+     the same sort of lookup as the dynamic linker. */
+  msymbol = lookup_minimal_symbol_text (symname, NULL);
+  if (!msymbol)
+    return 0;
+
+  return SYMBOL_VALUE_ADDRESS (msymbol);
+}
+
+/* The rs6000 version of FRAME_SAVED_PC will almost work for us.  The
+   signal handler details are different, so we'll handle those here
+   and call the rs6000 version to do the rest. */
+CORE_ADDR
+ppc_linux_frame_saved_pc (struct frame_info *fi)
+{
+  if ((get_frame_type (fi) == SIGTRAMP_FRAME))
+    {
+      CORE_ADDR regs_addr =
+	read_memory_integer (get_frame_base (fi)
+			     + PPC_LINUX_REGS_PTR_OFFSET, 4);
+      /* return the NIP in the regs array */
+      return read_memory_integer (regs_addr + 4 * PPC_LINUX_PT_NIP, 4);
+    }
+  else if (get_next_frame (fi)
+	   && (get_frame_type (get_next_frame (fi)) == SIGTRAMP_FRAME))
+    {
+      CORE_ADDR regs_addr =
+	read_memory_integer (get_frame_base (get_next_frame (fi))
+			     + PPC_LINUX_REGS_PTR_OFFSET, 4);
+      /* return LNK in the regs array */
+      return read_memory_integer (regs_addr + 4 * PPC_LINUX_PT_LNK, 4);
+    }
+  else
+    return rs6000_frame_saved_pc (fi);
+}
+
+void
+ppc_linux_init_extra_frame_info (int fromleaf, struct frame_info *fi)
+{
+  rs6000_init_extra_frame_info (fromleaf, fi);
+
+  if (get_next_frame (fi) != 0)
+    {
+      /* We're called from get_prev_frame_info; check to see if
+         this is a signal frame by looking to see if the pc points
+         at trampoline code */
+      if (ppc_linux_at_sigtramp_return_path (get_frame_pc (fi)))
+	deprecated_set_frame_type (fi, SIGTRAMP_FRAME);
+      else
+	/* FIXME: cagney/2002-11-10: Is this double bogus?  What
+           happens if the frame has previously been marked as a dummy?  */
+	deprecated_set_frame_type (fi, NORMAL_FRAME);
+    }
+}
+
+int
+ppc_linux_frameless_function_invocation (struct frame_info *fi)
+{
+  /* We'll find the wrong thing if we let 
+     rs6000_frameless_function_invocation () search for a signal trampoline */
+  if (ppc_linux_at_sigtramp_return_path (get_frame_pc (fi)))
+    return 0;
+  else
+    return rs6000_frameless_function_invocation (fi);
+}
+
+void
+ppc_linux_frame_init_saved_regs (struct frame_info *fi)
+{
+  if ((get_frame_type (fi) == SIGTRAMP_FRAME))
+    {
+      CORE_ADDR regs_addr;
+      int i;
+      if (deprecated_get_frame_saved_regs (fi))
+	return;
+
+      frame_saved_regs_zalloc (fi);
+
+      regs_addr =
+	read_memory_integer (get_frame_base (fi)
+			     + PPC_LINUX_REGS_PTR_OFFSET, 4);
+      deprecated_get_frame_saved_regs (fi)[PC_REGNUM] = regs_addr + 4 * PPC_LINUX_PT_NIP;
+      deprecated_get_frame_saved_regs (fi)[gdbarch_tdep (current_gdbarch)->ppc_ps_regnum] =
+        regs_addr + 4 * PPC_LINUX_PT_MSR;
+      deprecated_get_frame_saved_regs (fi)[gdbarch_tdep (current_gdbarch)->ppc_cr_regnum] =
+        regs_addr + 4 * PPC_LINUX_PT_CCR;
+      deprecated_get_frame_saved_regs (fi)[gdbarch_tdep (current_gdbarch)->ppc_lr_regnum] =
+        regs_addr + 4 * PPC_LINUX_PT_LNK;
+      deprecated_get_frame_saved_regs (fi)[gdbarch_tdep (current_gdbarch)->ppc_ctr_regnum] =
+        regs_addr + 4 * PPC_LINUX_PT_CTR;
+      deprecated_get_frame_saved_regs (fi)[gdbarch_tdep (current_gdbarch)->ppc_xer_regnum] =
+        regs_addr + 4 * PPC_LINUX_PT_XER;
+      deprecated_get_frame_saved_regs (fi)[gdbarch_tdep (current_gdbarch)->ppc_mq_regnum] =
+	regs_addr + 4 * PPC_LINUX_PT_MQ;
+      for (i = 0; i < 32; i++)
+	deprecated_get_frame_saved_regs (fi)[gdbarch_tdep (current_gdbarch)->ppc_gp0_regnum + i] =
+	  regs_addr + 4 * PPC_LINUX_PT_R0 + 4 * i;
+      for (i = 0; i < 32; i++)
+	deprecated_get_frame_saved_regs (fi)[FP0_REGNUM + i] = regs_addr + 4 * PPC_LINUX_PT_FPR0 + 8 * i;
+    }
+  else
+    rs6000_frame_init_saved_regs (fi);
+}
+
+CORE_ADDR
+ppc_linux_frame_chain (struct frame_info *thisframe)
+{
+  /* Kernel properly constructs the frame chain for the handler */
+  if ((get_frame_type (thisframe) == SIGTRAMP_FRAME))
+    return read_memory_integer (get_frame_base (thisframe), 4);
+  else
+    return rs6000_frame_chain (thisframe);
+}
+
+/* ppc_linux_memory_remove_breakpoints attempts to remove a breakpoint
+   in much the same fashion as memory_remove_breakpoint in mem-break.c,
+   but is careful not to write back the previous contents if the code
+   in question has changed in between inserting the breakpoint and
+   removing it.
+
+   Here is the problem that we're trying to solve...
+
+   Once upon a time, before introducing this function to remove
+   breakpoints from the inferior, setting a breakpoint on a shared
+   library function prior to running the program would not work
+   properly.  In order to understand the problem, it is first
+   necessary to understand a little bit about dynamic linking on
+   this platform.
+
+   A call to a shared library function is accomplished via a bl
+   (branch-and-link) instruction whose branch target is an entry
+   in the procedure linkage table (PLT).  The PLT in the object
+   file is uninitialized.  To gdb, prior to running the program, the
+   entries in the PLT are all zeros.
+
+   Once the program starts running, the shared libraries are loaded
+   and the procedure linkage table is initialized, but the entries in
+   the table are not (necessarily) resolved.  Once a function is
+   actually called, the code in the PLT is hit and the function is
+   resolved.  In order to better illustrate this, an example is in
+   order; the following example is from the gdb testsuite.
+	    
+	We start the program shmain.
+
+	    [kev@arroyo testsuite]$ ../gdb gdb.base/shmain
+	    [...]
+
+	We place two breakpoints, one on shr1 and the other on main.
+
+	    (gdb) b shr1
+	    Breakpoint 1 at 0x100409d4
+	    (gdb) b main
+	    Breakpoint 2 at 0x100006a0: file gdb.base/shmain.c, line 44.
+
+	Examine the instruction (and the immediatly following instruction)
+	upon which the breakpoint was placed.  Note that the PLT entry
+	for shr1 contains zeros.
+
+	    (gdb) x/2i 0x100409d4
+	    0x100409d4 <shr1>:      .long 0x0
+	    0x100409d8 <shr1+4>:    .long 0x0
+
+	Now run 'til main.
+
+	    (gdb) r
+	    Starting program: gdb.base/shmain 
+	    Breakpoint 1 at 0xffaf790: file gdb.base/shr1.c, line 19.
+
+	    Breakpoint 2, main ()
+		at gdb.base/shmain.c:44
+	    44        g = 1;
+
+	Examine the PLT again.  Note that the loading of the shared
+	library has initialized the PLT to code which loads a constant
+	(which I think is an index into the GOT) into r11 and then
+	branchs a short distance to the code which actually does the
+	resolving.
+
+	    (gdb) x/2i 0x100409d4
+	    0x100409d4 <shr1>:      li      r11,4
+	    0x100409d8 <shr1+4>:    b       0x10040984 <sg+4>
+	    (gdb) c
+	    Continuing.
+
+	    Breakpoint 1, shr1 (x=1)
+		at gdb.base/shr1.c:19
+	    19        l = 1;
+
+	Now we've hit the breakpoint at shr1.  (The breakpoint was
+	reset from the PLT entry to the actual shr1 function after the
+	shared library was loaded.) Note that the PLT entry has been
+	resolved to contain a branch that takes us directly to shr1. 
+	(The real one, not the PLT entry.)
+
+	    (gdb) x/2i 0x100409d4
+	    0x100409d4 <shr1>:      b       0xffaf76c <shr1>
+	    0x100409d8 <shr1+4>:    b       0x10040984 <sg+4>
+
+   The thing to note here is that the PLT entry for shr1 has been
+   changed twice.
+
+   Now the problem should be obvious.  GDB places a breakpoint (a
+   trap instruction) on the zero value of the PLT entry for shr1. 
+   Later on, after the shared library had been loaded and the PLT
+   initialized, GDB gets a signal indicating this fact and attempts
+   (as it always does when it stops) to remove all the breakpoints.
+
+   The breakpoint removal was causing the former contents (a zero
+   word) to be written back to the now initialized PLT entry thus
+   destroying a portion of the initialization that had occurred only a
+   short time ago.  When execution continued, the zero word would be
+   executed as an instruction an an illegal instruction trap was
+   generated instead.  (0 is not a legal instruction.)
+
+   The fix for this problem was fairly straightforward.  The function
+   memory_remove_breakpoint from mem-break.c was copied to this file,
+   modified slightly, and renamed to ppc_linux_memory_remove_breakpoint.
+   In tm-linux.h, MEMORY_REMOVE_BREAKPOINT is defined to call this new
+   function.
+
+   The differences between ppc_linux_memory_remove_breakpoint () and
+   memory_remove_breakpoint () are minor.  All that the former does
+   that the latter does not is check to make sure that the breakpoint
+   location actually contains a breakpoint (trap instruction) prior
+   to attempting to write back the old contents.  If it does contain
+   a trap instruction, we allow the old contents to be written back. 
+   Otherwise, we silently do nothing.
+
+   The big question is whether memory_remove_breakpoint () should be
+   changed to have the same functionality.  The downside is that more
+   traffic is generated for remote targets since we'll have an extra
+   fetch of a memory word each time a breakpoint is removed.
+
+   For the time being, we'll leave this self-modifying-code-friendly
+   version in ppc-linux-tdep.c, but it ought to be migrated somewhere
+   else in the event that some other platform has similar needs with
+   regard to removing breakpoints in some potentially self modifying
+   code.  */
+int
+ppc_linux_memory_remove_breakpoint (CORE_ADDR addr, char *contents_cache)
+{
+  const unsigned char *bp;
+  int val;
+  int bplen;
+  char old_contents[BREAKPOINT_MAX];
+
+  /* Determine appropriate breakpoint contents and size for this address.  */
+  bp = BREAKPOINT_FROM_PC (&addr, &bplen);
+  if (bp == NULL)
+    error ("Software breakpoints not implemented for this target.");
+
+  val = target_read_memory (addr, old_contents, bplen);
+
+  /* If our breakpoint is no longer at the address, this means that the
+     program modified the code on us, so it is wrong to put back the
+     old value */
+  if (val == 0 && memcmp (bp, old_contents, bplen) == 0)
+    val = target_write_memory (addr, contents_cache, bplen);
+
+  return val;
+}
+
+/* For historic reasons, PPC 32 GNU/Linux follows PowerOpen rather
+   than the 32 bit SYSV R4 ABI structure return convention - all
+   structures, no matter their size, are put in memory.  Vectors,
+   which were added later, do get returned in a register though.  */
+
+static enum return_value_convention
+ppc_linux_return_value (struct gdbarch *gdbarch, struct type *valtype,
+			struct regcache *regcache, void *readbuf,
+			const void *writebuf)
+{  
+  if ((TYPE_CODE (valtype) == TYPE_CODE_STRUCT
+       || TYPE_CODE (valtype) == TYPE_CODE_UNION)
+      && !((TYPE_LENGTH (valtype) == 16 || TYPE_LENGTH (valtype) == 8)
+	   && TYPE_VECTOR (valtype)))
+    return RETURN_VALUE_STRUCT_CONVENTION;
+  else
+    return ppc_sysv_abi_return_value (gdbarch, valtype, regcache, readbuf,
+				      writebuf);
+}
+
+/* Fetch (and possibly build) an appropriate link_map_offsets
+   structure for GNU/Linux PPC targets using the struct offsets
+   defined in link.h (but without actual reference to that file).
+
+   This makes it possible to access GNU/Linux PPC shared libraries
+   from a GDB that was not built on an GNU/Linux PPC host (for cross
+   debugging).  */
+
+struct link_map_offsets *
+ppc_linux_svr4_fetch_link_map_offsets (void)
+{
+  static struct link_map_offsets lmo;
+  static struct link_map_offsets *lmp = NULL;
+
+  if (lmp == NULL)
+    {
+      lmp = &lmo;
+
+      lmo.r_debug_size = 8;	/* The actual size is 20 bytes, but
+				   this is all we need.  */
+      lmo.r_map_offset = 4;
+      lmo.r_map_size   = 4;
+
+      lmo.link_map_size = 20;	/* The actual size is 560 bytes, but
+				   this is all we need.  */
+      lmo.l_addr_offset = 0;
+      lmo.l_addr_size   = 4;
+
+      lmo.l_name_offset = 4;
+      lmo.l_name_size   = 4;
+
+      lmo.l_next_offset = 12;
+      lmo.l_next_size   = 4;
+
+      lmo.l_prev_offset = 16;
+      lmo.l_prev_size   = 4;
+    }
+
+  return lmp;
+}
+
+
+/* Macros for matching instructions.  Note that, since all the
+   operands are masked off before they're or-ed into the instruction,
+   you can use -1 to make masks.  */
+
+#define insn_d(opcd, rts, ra, d)                \
+  ((((opcd) & 0x3f) << 26)                      \
+   | (((rts) & 0x1f) << 21)                     \
+   | (((ra) & 0x1f) << 16)                      \
+   | ((d) & 0xffff))
+
+#define insn_ds(opcd, rts, ra, d, xo)           \
+  ((((opcd) & 0x3f) << 26)                      \
+   | (((rts) & 0x1f) << 21)                     \
+   | (((ra) & 0x1f) << 16)                      \
+   | ((d) & 0xfffc)                             \
+   | ((xo) & 0x3))
+
+#define insn_xfx(opcd, rts, spr, xo)            \
+  ((((opcd) & 0x3f) << 26)                      \
+   | (((rts) & 0x1f) << 21)                     \
+   | (((spr) & 0x1f) << 16)                     \
+   | (((spr) & 0x3e0) << 6)                     \
+   | (((xo) & 0x3ff) << 1))
+
+/* Read a PPC instruction from memory.  PPC instructions are always
+   big-endian, no matter what endianness the program is running in, so
+   we can't use read_memory_integer or one of its friends here.  */
+static unsigned int
+read_insn (CORE_ADDR pc)
+{
+  unsigned char buf[4];
+
+  read_memory (pc, buf, 4);
+  return (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3];
+}
+
+
+/* An instruction to match.  */
+struct insn_pattern
+{
+  unsigned int mask;            /* mask the insn with this... */
+  unsigned int data;            /* ...and see if it matches this. */
+  int optional;                 /* If non-zero, this insn may be absent.  */
+};
+
+/* Return non-zero if the instructions at PC match the series
+   described in PATTERN, or zero otherwise.  PATTERN is an array of
+   'struct insn_pattern' objects, terminated by an entry whose mask is
+   zero.
+
+   When the match is successful, fill INSN[i] with what PATTERN[i]
+   matched.  If PATTERN[i] is optional, and the instruction wasn't
+   present, set INSN[i] to 0 (which is not a valid PPC instruction).
+   INSN should have as many elements as PATTERN.  Note that, if
+   PATTERN contains optional instructions which aren't present in
+   memory, then INSN will have holes, so INSN[i] isn't necessarily the
+   i'th instruction in memory.  */
+static int
+insns_match_pattern (CORE_ADDR pc,
+                     struct insn_pattern *pattern,
+                     unsigned int *insn)
+{
+  int i;
+
+  for (i = 0; pattern[i].mask; i++)
+    {
+      insn[i] = read_insn (pc);
+      if ((insn[i] & pattern[i].mask) == pattern[i].data)
+        pc += 4;
+      else if (pattern[i].optional)
+        insn[i] = 0;
+      else
+        return 0;
+    }
+
+  return 1;
+}
+
+
+/* Return the 'd' field of the d-form instruction INSN, properly
+   sign-extended.  */
+static CORE_ADDR
+insn_d_field (unsigned int insn)
+{
+  return ((((CORE_ADDR) insn & 0xffff) ^ 0x8000) - 0x8000);
+}
+
+
+/* Return the 'ds' field of the ds-form instruction INSN, with the two
+   zero bits concatenated at the right, and properly
+   sign-extended.  */
+static CORE_ADDR
+insn_ds_field (unsigned int insn)
+{
+  return ((((CORE_ADDR) insn & 0xfffc) ^ 0x8000) - 0x8000);
+}
+
+
+/* If DESC is the address of a 64-bit PowerPC GNU/Linux function
+   descriptor, return the descriptor's entry point.  */
+static CORE_ADDR
+ppc64_desc_entry_point (CORE_ADDR desc)
+{
+  /* The first word of the descriptor is the entry point.  */
+  return (CORE_ADDR) read_memory_unsigned_integer (desc, 8);
+}
+
+
+/* Pattern for the standard linkage function.  These are built by
+   build_plt_stub in elf64-ppc.c, whose GLINK argument is always
+   zero.  */
+static struct insn_pattern ppc64_standard_linkage[] =
+  {
+    /* addis r12, r2, <any> */
+    { insn_d (-1, -1, -1, 0), insn_d (15, 12, 2, 0), 0 },
+
+    /* std r2, 40(r1) */
+    { -1, insn_ds (62, 2, 1, 40, 0), 0 },
+
+    /* ld r11, <any>(r12) */
+    { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 11, 12, 0, 0), 0 },
+
+    /* addis r12, r12, 1 <optional> */
+    { insn_d (-1, -1, -1, -1), insn_d (15, 12, 2, 1), 1 },
+
+    /* ld r2, <any>(r12) */
+    { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 2, 12, 0, 0), 0 },
+
+    /* addis r12, r12, 1 <optional> */
+    { insn_d (-1, -1, -1, -1), insn_d (15, 12, 2, 1), 1 },
+
+    /* mtctr r11 */
+    { insn_xfx (-1, -1, -1, -1), insn_xfx (31, 11, 9, 467),
+      0 },
+
+    /* ld r11, <any>(r12) */
+    { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 11, 12, 0, 0), 0 },
+      
+    /* bctr */
+    { -1, 0x4e800420, 0 },
+
+    { 0, 0, 0 }
+  };
+#define PPC64_STANDARD_LINKAGE_LEN \
+  (sizeof (ppc64_standard_linkage) / sizeof (ppc64_standard_linkage[0]))
+
+
+/* Recognize a 64-bit PowerPC GNU/Linux linkage function --- what GDB
+   calls a "solib trampoline".  */
+static int
+ppc64_in_solib_call_trampoline (CORE_ADDR pc, char *name)
+{
+  /* Detecting solib call trampolines on PPC64 GNU/Linux is a pain.
+
+     It's not specifically solib call trampolines that are the issue.
+     Any call from one function to another function that uses a
+     different TOC requires a trampoline, to save the caller's TOC
+     pointer and then load the callee's TOC.  An executable or shared
+     library may have more than one TOC, so even intra-object calls
+     may require a trampoline.  Since executable and shared libraries
+     will all have their own distinct TOCs, every inter-object call is
+     also an inter-TOC call, and requires a trampoline --- so "solib
+     call trampolines" are just a special case.
+
+     The 64-bit PowerPC GNU/Linux ABI calls these call trampolines
+     "linkage functions".  Since they need to be near the functions
+     that call them, they all appear in .text, not in any special
+     section.  The .plt section just contains an array of function
+     descriptors, from which the linkage functions load the callee's
+     entry point, TOC value, and environment pointer.  So
+     in_plt_section is useless.  The linkage functions don't have any
+     special linker symbols to name them, either.
+
+     The only way I can see to recognize them is to actually look at
+     their code.  They're generated by ppc_build_one_stub and some
+     other functions in bfd/elf64-ppc.c, so that should show us all
+     the instruction sequences we need to recognize.  */
+  unsigned int insn[PPC64_STANDARD_LINKAGE_LEN];
+
+  return insns_match_pattern (pc, ppc64_standard_linkage, insn);
+}
+
+
+/* When the dynamic linker is doing lazy symbol resolution, the first
+   call to a function in another object will go like this:
+
+   - The user's function calls the linkage function:
+
+     100007c4:	4b ff fc d5 	bl	10000498
+     100007c8:	e8 41 00 28 	ld	r2,40(r1)
+
+   - The linkage function loads the entry point (and other stuff) from
+     the function descriptor in the PLT, and jumps to it:
+
+     10000498:	3d 82 00 00 	addis	r12,r2,0
+     1000049c:	f8 41 00 28 	std	r2,40(r1)
+     100004a0:	e9 6c 80 98 	ld	r11,-32616(r12)
+     100004a4:	e8 4c 80 a0 	ld	r2,-32608(r12)
+     100004a8:	7d 69 03 a6 	mtctr	r11
+     100004ac:	e9 6c 80 a8 	ld	r11,-32600(r12)
+     100004b0:	4e 80 04 20 	bctr
+
+   - But since this is the first time that PLT entry has been used, it
+     sends control to its glink entry.  That loads the number of the
+     PLT entry and jumps to the common glink0 code:
+
+     10000c98:	38 00 00 00 	li	r0,0
+     10000c9c:	4b ff ff dc 	b	10000c78
+
+   - The common glink0 code then transfers control to the dynamic
+     linker's fixup code:
+
+     10000c78:	e8 41 00 28 	ld	r2,40(r1)
+     10000c7c:	3d 82 00 00 	addis	r12,r2,0
+     10000c80:	e9 6c 80 80 	ld	r11,-32640(r12)
+     10000c84:	e8 4c 80 88 	ld	r2,-32632(r12)
+     10000c88:	7d 69 03 a6 	mtctr	r11
+     10000c8c:	e9 6c 80 90 	ld	r11,-32624(r12)
+     10000c90:	4e 80 04 20 	bctr
+
+   Eventually, this code will figure out how to skip all of this,
+   including the dynamic linker.  At the moment, we just get through
+   the linkage function.  */
+
+/* If the current thread is about to execute a series of instructions
+   at PC matching the ppc64_standard_linkage pattern, and INSN is the result
+   from that pattern match, return the code address to which the
+   standard linkage function will send them.  (This doesn't deal with
+   dynamic linker lazy symbol resolution stubs.)  */
+static CORE_ADDR
+ppc64_standard_linkage_target (CORE_ADDR pc, unsigned int *insn)
+{
+  struct gdbarch_tdep *tdep = gdbarch_tdep (current_gdbarch);
+
+  /* The address of the function descriptor this linkage function
+     references.  */
+  CORE_ADDR desc
+    = ((CORE_ADDR) read_register (tdep->ppc_gp0_regnum + 2)
+       + (insn_d_field (insn[0]) << 16)
+       + insn_ds_field (insn[2]));
+
+  /* The first word of the descriptor is the entry point.  Return that.  */
+  return ppc64_desc_entry_point (desc);
+}
+
+
+/* Given that we've begun executing a call trampoline at PC, return
+   the entry point of the function the trampoline will go to.  */
+static CORE_ADDR
+ppc64_skip_trampoline_code (CORE_ADDR pc)
+{
+  unsigned int ppc64_standard_linkage_insn[PPC64_STANDARD_LINKAGE_LEN];
+
+  if (insns_match_pattern (pc, ppc64_standard_linkage,
+                           ppc64_standard_linkage_insn))
+    return ppc64_standard_linkage_target (pc, ppc64_standard_linkage_insn);
+  else
+    return 0;
+}
+
+
+/* Support for CONVERT_FROM_FUNC_PTR_ADDR (ARCH, ADDR, TARG) on PPC64
+   GNU/Linux.
+
+   Usually a function pointer's representation is simply the address
+   of the function. On GNU/Linux on the 64-bit PowerPC however, a
+   function pointer is represented by a pointer to a TOC entry. This
+   TOC entry contains three words, the first word is the address of
+   the function, the second word is the TOC pointer (r2), and the
+   third word is the static chain value.  Throughout GDB it is
+   currently assumed that a function pointer contains the address of
+   the function, which is not easy to fix.  In addition, the
+   conversion of a function address to a function pointer would
+   require allocation of a TOC entry in the inferior's memory space,
+   with all its drawbacks.  To be able to call C++ virtual methods in
+   the inferior (which are called via function pointers),
+   find_function_addr uses this function to get the function address
+   from a function pointer.  */
+
+/* If ADDR points at what is clearly a function descriptor, transform
+   it into the address of the corresponding function.  Be
+   conservative, otherwize GDB will do the transformation on any
+   random addresses such as occures when there is no symbol table.  */
+
+static CORE_ADDR
+ppc64_linux_convert_from_func_ptr_addr (struct gdbarch *gdbarch,
+					CORE_ADDR addr,
+					struct target_ops *targ)
+{
+  struct section_table *s = target_section_by_addr (targ, addr);
+
+  /* Check if ADDR points to a function descriptor.  */
+  if (s && strcmp (s->the_bfd_section->name, ".opd") == 0)
+    return get_target_memory_unsigned (targ, addr, 8);
+
+  return addr;
+}
+
+#ifdef CRASH_MERGE
+enum {
+  PPC_ELF_NGREG = 48,
+  PPC_ELF_NFPREG = 33,
+  PPC_ELF_NVRREG = 33
+};
+
+enum {
+  ELF_GREGSET_SIZE = (PPC_ELF_NGREG * 4),
+  ELF_FPREGSET_SIZE = (PPC_ELF_NFPREG * 8)
+};
+#else
+enum {
+  ELF_NGREG = 48,
+  ELF_NFPREG = 33,
+  ELF_NVRREG = 33
+};
+
+enum {
+  ELF_GREGSET_SIZE = (ELF_NGREG * 4),
+  ELF_FPREGSET_SIZE = (ELF_NFPREG * 8)
+};
+#endif
+
+void
+ppc_linux_supply_gregset (char *buf)
+{
+  int regi;
+  struct gdbarch_tdep *tdep = gdbarch_tdep (current_gdbarch); 
+
+  for (regi = 0; regi < 32; regi++)
+    supply_register (regi, buf + 4 * regi);
+
+  supply_register (PC_REGNUM, buf + 4 * PPC_LINUX_PT_NIP);
+  supply_register (tdep->ppc_lr_regnum, buf + 4 * PPC_LINUX_PT_LNK);
+  supply_register (tdep->ppc_cr_regnum, buf + 4 * PPC_LINUX_PT_CCR);
+  supply_register (tdep->ppc_xer_regnum, buf + 4 * PPC_LINUX_PT_XER);
+  supply_register (tdep->ppc_ctr_regnum, buf + 4 * PPC_LINUX_PT_CTR);
+  if (tdep->ppc_mq_regnum != -1)
+    supply_register (tdep->ppc_mq_regnum, buf + 4 * PPC_LINUX_PT_MQ);
+  supply_register (tdep->ppc_ps_regnum, buf + 4 * PPC_LINUX_PT_MSR);
+}
+
+void
+ppc_linux_supply_fpregset (char *buf)
+{
+  int regi;
+  struct gdbarch_tdep *tdep = gdbarch_tdep (current_gdbarch); 
+
+  for (regi = 0; regi < 32; regi++)
+    supply_register (FP0_REGNUM + regi, buf + 8 * regi);
+
+  /* The FPSCR is stored in the low order word of the last doubleword in the
+     fpregset.  */
+  supply_register (tdep->ppc_fpscr_regnum, buf + 8 * 32 + 4);
+}
+
+/*
+  Use a local version of this function to get the correct types for regsets.
+*/
+
+static void
+fetch_core_registers (char *core_reg_sect,
+		      unsigned core_reg_size,
+		      int which,
+		      CORE_ADDR reg_addr)
+{
+  if (which == 0)
+    {
+      if (core_reg_size == ELF_GREGSET_SIZE)
+	ppc_linux_supply_gregset (core_reg_sect);
+      else
+	warning ("wrong size gregset struct in core file");
+    }
+  else if (which == 2)
+    {
+      if (core_reg_size == ELF_FPREGSET_SIZE)
+	ppc_linux_supply_fpregset (core_reg_sect);
+      else
+	warning ("wrong size fpregset struct in core file");
+    }
+}
+
+/* Register that we are able to handle ELF file formats using standard
+   procfs "regset" structures.  */
+
+static struct core_fns ppc_linux_regset_core_fns =
+{
+  bfd_target_elf_flavour,	/* core_flavour */
+  default_check_format,		/* check_format */
+  default_core_sniffer,		/* core_sniffer */
+  fetch_core_registers,		/* core_read_registers */
+  NULL				/* next */
+};
+
+static void
+ppc_linux_init_abi (struct gdbarch_info info,
+                    struct gdbarch *gdbarch)
+{
+  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
+
+  if (tdep->wordsize == 4)
+    {
+      /* Until November 2001, gcc did not comply with the 32 bit SysV
+	 R4 ABI requirement that structures less than or equal to 8
+	 bytes should be returned in registers.  Instead GCC was using
+	 the the AIX/PowerOpen ABI - everything returned in memory
+	 (well ignoring vectors that is).  When this was corrected, it
+	 wasn't fixed for GNU/Linux native platform.  Use the
+	 PowerOpen struct convention.  */
+      set_gdbarch_return_value (gdbarch, ppc_linux_return_value);
+
+      /* Note: kevinb/2002-04-12: See note in rs6000_gdbarch_init regarding
+	 *_push_arguments().  The same remarks hold for the methods below.  */
+      set_gdbarch_deprecated_frameless_function_invocation (gdbarch, ppc_linux_frameless_function_invocation);
+      set_gdbarch_deprecated_frame_chain (gdbarch, ppc_linux_frame_chain);
+      set_gdbarch_deprecated_frame_saved_pc (gdbarch, ppc_linux_frame_saved_pc);
+
+      set_gdbarch_deprecated_frame_init_saved_regs (gdbarch,
+                                         ppc_linux_frame_init_saved_regs);
+      set_gdbarch_deprecated_init_extra_frame_info (gdbarch,
+                                         ppc_linux_init_extra_frame_info);
+
+      set_gdbarch_memory_remove_breakpoint (gdbarch,
+                                            ppc_linux_memory_remove_breakpoint);
+      /* Shared library handling.  */
+      set_gdbarch_in_solib_call_trampoline (gdbarch, in_plt_section);
+      set_gdbarch_skip_trampoline_code (gdbarch,
+                                        ppc_linux_skip_trampoline_code);
+      set_solib_svr4_fetch_link_map_offsets
+        (gdbarch, ppc_linux_svr4_fetch_link_map_offsets);
+    }
+  
+  if (tdep->wordsize == 8)
+    {
+      /* Handle PPC64 GNU/Linux function pointers (which are really
+         function descriptors).  */
+      set_gdbarch_convert_from_func_ptr_addr
+        (gdbarch, ppc64_linux_convert_from_func_ptr_addr);
+
+      set_gdbarch_in_solib_call_trampoline
+        (gdbarch, ppc64_in_solib_call_trampoline);
+      set_gdbarch_skip_trampoline_code (gdbarch, ppc64_skip_trampoline_code);
+
+      /* PPC64 malloc's entry-point is called ".malloc".  */
+      set_gdbarch_name_of_malloc (gdbarch, ".malloc");
+    }
+}
+
+void
+_initialize_ppc_linux_tdep (void)
+{
+  /* Register for all sub-familes of the POWER/PowerPC: 32-bit and
+     64-bit PowerPC, and the older rs6k.  */
+  gdbarch_register_osabi (bfd_arch_powerpc, bfd_mach_ppc, GDB_OSABI_LINUX,
+                         ppc_linux_init_abi);
+  gdbarch_register_osabi (bfd_arch_powerpc, bfd_mach_ppc64, GDB_OSABI_LINUX,
+                         ppc_linux_init_abi);
+  gdbarch_register_osabi (bfd_arch_rs6000, bfd_mach_rs6k, GDB_OSABI_LINUX,
+                         ppc_linux_init_abi);
+  add_core_fns (&ppc_linux_regset_core_fns);
+}
--- crash/s390_dump.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/s390_dump.c	2006-10-11 09:14:35.000000000 -0400
@@ -1,8 +1,8 @@
 /* s390_dump.c - core analysis suite
  *
  * Copyright (C) 2001, 2002 Mission Critical Linux, Inc.
- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson
- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006 David Anderson
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006 Red Hat, Inc. All rights reserved.
  * Copyright (C) 2005 Michael Holzheu, IBM Corporation
  *
  * This program is free software; you can redistribute it and/or modify
@@ -16,7 +16,7 @@
  * GNU General Public License for more details.
  */
 #include "defs.h"
-#include <asm/page.h>
+//#include <asm/page.h>
 #include "ibm_common.h"
 
 static FILE * s390_file;
@@ -69,10 +69,13 @@
 	return WRITE_ERROR;
 }
 
+#define S390_PAGE_SHIFT   12
+#define S390_PAGE_SIZE    (1UL << S390_PAGE_SHIFT)
+
 uint
 s390_page_size(void)
 {
-	return PAGE_SIZE;
+	return S390_PAGE_SIZE;
 }
 
 int 
--- crash/gdb-6.1.patch.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/gdb-6.1.patch	2009-01-22 16:26:30.000000000 -0500
@@ -0,0 +1,319 @@
+--- gdb-6.1.orig/bfd/coff-alpha.c
++++ gdb-6.1/bfd/coff-alpha.c
+@@ -1455,7 +1455,7 @@ alpha_relocate_section (output_bfd, info
+ 	  amt = sizeof (struct ecoff_section_tdata);
+ 	  lita_sec_data = ((struct ecoff_section_tdata *)
+ 			   bfd_zalloc (input_bfd, amt));
+-	  ecoff_section_data (input_bfd, lita_sec) = lita_sec_data;
++	  lita_sec->used_by_bfd = lita_sec_data;
+ 	}
+ 
+       if (lita_sec_data->gp != 0)
+--- gdb-6.1.orig/sim/ppc/debug.c
++++ gdb-6.1/sim/ppc/debug.c
+@@ -28,6 +28,7 @@
+ #ifdef HAVE_STDLIB_H
+ #include <stdlib.h>
+ #endif
++#include <string.h>
+ 
+ int ppc_trace[nr_trace_options];
+ 
+--- gdb-6.1.orig/gdb/remote.c
++++ gdb-6.1/gdb/remote.c
+@@ -3445,7 +3445,7 @@ remote_store_registers (int regnum)
+   {
+     int i;
+     regs = alloca (rs->sizeof_g_packet);
+-    memset (regs, rs->sizeof_g_packet, 0);
++    memset (regs, 0, rs->sizeof_g_packet);
+     for (i = 0; i < NUM_REGS + NUM_PSEUDO_REGS; i++)
+       {
+ 	struct packet_reg *r = &rs->regs[i];
+--- gdb-6.1.orig/gdb/std-regs.c
++++ gdb-6.1/gdb/std-regs.c
+@@ -61,7 +61,7 @@ value_of_builtin_frame_reg (struct frame
+   val = allocate_value (builtin_type_frame_reg);
+   VALUE_LVAL (val) = not_lval;
+   buf = VALUE_CONTENTS_RAW (val);
+-  memset (buf, TYPE_LENGTH (VALUE_TYPE (val)), 0);
++  memset (buf, 0, TYPE_LENGTH (VALUE_TYPE (val)));
+   /* frame.base.  */
+   if (frame != NULL)
+     ADDRESS_TO_POINTER (builtin_type_void_data_ptr, buf,
+@@ -87,7 +87,7 @@ value_of_builtin_frame_fp_reg (struct fr
+       struct value *val = allocate_value (builtin_type_void_data_ptr);
+       char *buf = VALUE_CONTENTS_RAW (val);
+       if (frame == NULL)
+-	memset (buf, TYPE_LENGTH (VALUE_TYPE (val)), 0);
++	memset (buf, 0, TYPE_LENGTH (VALUE_TYPE (val)));
+       else
+ 	ADDRESS_TO_POINTER (builtin_type_void_data_ptr, buf,
+ 			    get_frame_base_address (frame));
+@@ -105,7 +105,7 @@ value_of_builtin_frame_pc_reg (struct fr
+       struct value *val = allocate_value (builtin_type_void_data_ptr);
+       char *buf = VALUE_CONTENTS_RAW (val);
+       if (frame == NULL)
+-	memset (buf, TYPE_LENGTH (VALUE_TYPE (val)), 0);
++	memset (buf, 0, TYPE_LENGTH (VALUE_TYPE (val)));
+       else
+ 	ADDRESS_TO_POINTER (builtin_type_void_data_ptr, buf,
+ 			    get_frame_pc (frame));
+--- gdb-6.1.orig/gdb/dwarf2-frame.c
++++ gdb-6.1/gdb/dwarf2-frame.c
+@@ -1353,7 +1353,9 @@ decode_frame_entry_1 (struct comp_unit *
+ 	  else if (*augmentation == 'P')
+ 	    {
+ 	      /* Skip.  */
+-	      buf += size_of_encoded_value (*buf++);
++//	      buf += size_of_encoded_value (*buf++);
++              buf += size_of_encoded_value(*buf);
++              buf++;
+ 	      augmentation++;
+ 	    }
+ 
+--- gdb-6.1/opcodes/i386-dis.c.orig
++++ gdb-6.1/opcodes/i386-dis.c
+@@ -2092,6 +2092,10 @@ print_insn (bfd_vma pc, disassemble_info
+       dp = &dis386_twobyte[*++codep];
+       need_modrm = twobyte_has_modrm[*codep];
+       uses_SSE_prefix = twobyte_uses_SSE_prefix[*codep];
++      if (dp->name && strcmp(dp->name, "ud2a") == 0) {
++	extern int kernel_BUG_encoding_bytes(void);
++	codep += kernel_BUG_encoding_bytes();
++      }
+     }
+   else
+     {
+--- gdb-6.1/gdb/amd64-linux-nat.c.orig	2008-02-19 08:59:33.000000000 -0500
++++ gdb-6.1/gdb/amd64-linux-nat.c	2008-02-19 09:27:23.000000000 -0500
+@@ -28,6 +28,7 @@
+ 
+ #include "gdb_assert.h"
+ #include "gdb_string.h"
++typedef unsigned int u32;
+ #include <sys/ptrace.h>
+ #include <sys/debugreg.h>
+ #include <sys/syscall.h>
+--- gdb-6.1/gdb/cli/cli-cmds.c.orig
++++ gdb-6.1/gdb/cli/cli-cmds.c
+@@ -37,6 +37,7 @@
+ #include "objfiles.h"
+ #include "source.h"
+ #include "disasm.h"
++#include "gdb_stat.h"
+ 
+ #include "ui-out.h"
+ 
+@@ -54,6 +55,8 @@
+ #define GDBINIT_FILENAME        ".gdbinit"
+ #endif
+ 
++#include <fcntl.h>
++
+ /* Prototypes for local command functions */
+ 
+ static void complete_command (char *, int);
+@@ -441,12 +444,30 @@ source_command (char *args, int from_tty
+   stream = fopen (file, FOPEN_RT);
+   if (!stream)
+     {
+-      if (from_tty)
++      if (from_tty > 0)
+ 	perror_with_name (file);
+       else
+ 	return;
+     }
+ 
++  if (from_tty == -1)
++    {
++      struct stat statbuf;
++      int fd = fileno (stream);
++      if (fstat (fd, &statbuf) < 0)
++	{
++	  perror_with_name (file);
++	  fclose (stream);
++	  return;
++	}
++      if (statbuf.st_uid != getuid () || (statbuf.st_mode & S_IWOTH))
++	{
++	  extern void untrusted_file(FILE *, char *); untrusted_file(NULL, file);
++	  fclose (stream);
++	  return;
++	}
++    }
++
+   script_from_file (stream, file);
+ 
+   do_cleanups (old_cleanups);
+--- gdb-6.1/gdb/dwarfread.c.orig
++++ gdb-6.1/gdb/dwarfread.c
+@@ -2138,9 +2138,7 @@ decode_line_numbers (char *linetable)
+ 
+    NOTES
+ 
+-   Note that stack[0] is unused except as a default error return.
+-   Note that stack overflow is not yet handled.
+- */
++   Note that stack[0] is unused except as a default error return. */
+ 
+ static int
+ locval (struct dieinfo *dip)
+@@ -2160,7 +2158,7 @@ locval (struct dieinfo *dip)
+   loc += nbytes;
+   end = loc + locsize;
+   stacki = 0;
+-  stack[stacki] = 0;
++  stack[++stacki] = 0;
+   dip->isreg = 0;
+   dip->offreg = 0;
+   dip->optimized_out = 1;
+@@ -2224,6 +2222,16 @@ locval (struct dieinfo *dip)
+ 	  stacki--;
+ 	  break;
+ 	}
++      /* Enforce maximum stack depth of size-1 to avoid ++stacki writing
++         outside of the allocated space. Also enforce minimum > 0.
++         -- wad@google.com 14 Aug 2006 */
++      if (stacki >= sizeof (stack) / sizeof (*stack) - 1)
++	internal_error (__FILE__, __LINE__,
++	                _("location description stack too deep: %d"),
++	                stacki);
++      if (stacki <= 0)
++	internal_error (__FILE__, __LINE__,
++	                _("location description stack too shallow"));
+     }
+   return (stack[stacki]);
+ }
+--- gdb-6.1/bfd/elfcode.h.orig
++++ gdb-6.1/bfd/elfcode.h
+@@ -33,7 +33,7 @@ Foundation, Inc., 59 Temple Place - Suit
+ /* Problems and other issues to resolve.
+ 
+    (1)	BFD expects there to be some fixed number of "sections" in
+-        the object file.  I.E. there is a "section_count" variable in the
++	the object file.  I.E. there is a "section_count" variable in the
+ 	bfd structure which contains the number of sections.  However, ELF
+ 	supports multiple "views" of a file.  In particular, with current
+ 	implementations, executable files typically have two tables, a
+@@ -629,8 +629,13 @@ elf_object_p (bfd *abfd)
+ 
+   if (i_ehdrp->e_shoff != 0)
+     {
++      bfd_signed_vma where = i_ehdrp->e_shoff;
++
++      if (where != (file_ptr) where)
++	goto got_wrong_format_error;
++
+       /* Seek to the section header table in the file.  */
+-      if (bfd_seek (abfd, (file_ptr) i_ehdrp->e_shoff, SEEK_SET) != 0)
++      if (bfd_seek (abfd, (file_ptr) where, SEEK_SET) != 0)
+ 	goto got_no_match;
+ 
+       /* Read the first section header at index 0, and convert to internal
+@@ -642,11 +647,46 @@ elf_object_p (bfd *abfd)
+       /* If the section count is zero, the actual count is in the first
+ 	 section header.  */
+       if (i_ehdrp->e_shnum == SHN_UNDEF)
+-	i_ehdrp->e_shnum = i_shdr.sh_size;
++	{
++	  i_ehdrp->e_shnum = i_shdr.sh_size;
++	  if (i_ehdrp->e_shnum != i_shdr.sh_size
++	      || i_ehdrp->e_shnum == 0)
++	    goto got_wrong_format_error;
++	}
+ 
+       /* And similarly for the string table index.  */
+       if (i_ehdrp->e_shstrndx == SHN_XINDEX)
+-	i_ehdrp->e_shstrndx = i_shdr.sh_link;
++	{
++	  i_ehdrp->e_shstrndx = i_shdr.sh_link;
++	  if (i_ehdrp->e_shstrndx != i_shdr.sh_link)
++	    goto got_wrong_format_error;
++	}
++
++      /* Sanity check that we can read all of the section headers.
++	 It ought to be good enough to just read the last one.  */
++      if (i_ehdrp->e_shnum != 1)
++	{
++	  /* Check that we don't have a totally silly number of sections.  */
++	  if (i_ehdrp->e_shnum > (unsigned int) -1 / sizeof (x_shdr)
++	      || i_ehdrp->e_shnum > (unsigned int) -1 / sizeof (i_shdr))
++	    goto got_wrong_format_error;
++
++	  where += (i_ehdrp->e_shnum - 1) * sizeof (x_shdr);
++	  if (where != (file_ptr) where)
++	    goto got_wrong_format_error;
++	  if ((bfd_size_type) where <= i_ehdrp->e_shoff)
++	    goto got_wrong_format_error;
++
++	  if (bfd_seek (abfd, (file_ptr) where, SEEK_SET) != 0)
++	    goto got_no_match;
++	  if (bfd_bread (&x_shdr, sizeof x_shdr, abfd) != sizeof (x_shdr))
++	    goto got_no_match;
++
++	  /* Back to where we were.  */
++	  where = i_ehdrp->e_shoff + sizeof (x_shdr);
++	  if (bfd_seek (abfd, (file_ptr) where, SEEK_SET) != 0)
++	    goto got_no_match;
++	}
+     }
+ 
+   /* Allocate space for a copy of the section header table in
+@@ -690,6 +730,20 @@ elf_object_p (bfd *abfd)
+ 	    goto got_no_match;
+ 	  elf_swap_shdr_in (abfd, &x_shdr, i_shdrp + shindex);
+ 
++	  /* Sanity check sh_link and sh_info.  */
++	  if (i_shdrp[shindex].sh_link >= num_sec
++	      || (i_shdrp[shindex].sh_link >= SHN_LORESERVE
++		  && i_shdrp[shindex].sh_link <= SHN_HIRESERVE))
++	    goto got_wrong_format_error;
++
++	  if (((i_shdrp[shindex].sh_flags & SHF_INFO_LINK)
++	       || i_shdrp[shindex].sh_type == SHT_RELA
++	       || i_shdrp[shindex].sh_type == SHT_REL)
++	      && (i_shdrp[shindex].sh_info >= num_sec
++		  || (i_shdrp[shindex].sh_info >= SHN_LORESERVE
++		      && i_shdrp[shindex].sh_info <= SHN_HIRESERVE)))
++	    goto got_wrong_format_error;
++
+ 	  /* If the section is loaded, but not page aligned, clear
+ 	     D_PAGED.  */
+ 	  if (i_shdrp[shindex].sh_size != 0
+@@ -708,6 +762,17 @@ elf_object_p (bfd *abfd)
+ 	goto got_no_match;
+     }
+ 
++  /* A further sanity check.  */
++  if (i_ehdrp->e_shnum != 0)
++    {
++      if (i_ehdrp->e_shstrndx >= elf_numsections (abfd)
++	  || (i_ehdrp->e_shstrndx >= SHN_LORESERVE
++	      && i_ehdrp->e_shstrndx <= SHN_HIRESERVE))
++	goto got_wrong_format_error;
++    }
++  else if (i_ehdrp->e_shstrndx != 0)
++    goto got_wrong_format_error;
++
+   /* Read in the program headers.  */
+   if (i_ehdrp->e_phnum == 0)
+     elf_tdata (abfd)->phdr = NULL;
+@@ -1071,7 +1136,7 @@ elf_slurp_symbol_table (bfd *abfd, asymb
+ 	     symcount);
+ 
+ 	  /* Slurp in the symbols without the version information,
+-             since that is more helpful than just quitting.  */
++	     since that is more helpful than just quitting.  */
+ 	  verhdr = NULL;
+ 	}
+ 
+@@ -1138,7 +1203,7 @@ elf_slurp_symbol_table (bfd *abfd, asymb
+ 	    sym->symbol.section = bfd_abs_section_ptr;
+ 
+ 	  /* If this is a relocatable file, then the symbol value is
+-             already section relative.  */
++	     already section relative.  */
+ 	  if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
+ 	    sym->symbol.value -= sym->symbol.section->vma;
+ 
--- crash/kernel.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/kernel.c	2009-02-04 16:53:05.000000000 -0500
@@ -1,8 +1,8 @@
 /* kernel.c - core analysis suite
  *
  * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc.
- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson
- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 David Anderson
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 Red Hat, Inc. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -16,11 +16,12 @@
  */
 
 #include "defs.h"
+#include "xen_hyper_defs.h"
 #include <elf.h>
 
 static void do_module_cmd(ulong, char *, ulong, char *, char *);
 static char *find_module_objfile(char *, char *, char *);
-static char *get_uptime(char *);
+static char *module_objfile_search(char *, char *, char *);
 static char *get_loadavg(char *);
 static void get_lkcd_regs(struct bt_info *, ulong *, ulong *);
 static void dump_sys_call_table(char *, int);
@@ -42,330 +43,589 @@
 static void verify_namelist(void);
 static char *debug_kernel_version(char *);
 static int restore_stack(struct bt_info *);
+static ulong __xen_m2p(ulonglong, ulong);
+static int search_mapping_page(ulong, ulong *, ulong *, ulong *);
+static void read_in_kernel_config_err(int, char *);
+static void BUG_bytes_init(void);
+static int BUG_x86(void);
+static int BUG_x86_64(void);
+static void cpu_maps_init(void);
 
 
 /*
  *  Gather a few kernel basics.
  */
 void
-kernel_init(int when)
+kernel_init()
 {
-	int i;
-	char *p1, *p2, buf[BUFSIZE];;
+	int i, c;
+	char *p1, *p2, buf[BUFSIZE];
 	struct syment *sp1, *sp2;
+	char *rqstruct;
+	char *irq_desc_type_name;	
 
 	if (pc->flags & KERNEL_DEBUG_QUERY)
 		return;
 
-	switch (when)
-	{
-	case PRE_GDB:
-		kt->stext = symbol_value("_stext");
-		kt->etext = symbol_value("_etext");
-		get_text_init_space(); 
-		if (symbol_exists("__init_begin")) {
-			kt->init_begin = symbol_value("__init_begin");
-			kt->init_end = symbol_value("__init_end");
-		}
-		kt->end = symbol_value("_end");
+        if (!(kt->cpu_flags = (ulong *)calloc(NR_CPUS, sizeof(ulong))))
+                error(FATAL, "cannot malloc cpu_flags array");
+
+	cpu_maps_init();
+
+	kt->stext = symbol_value("_stext");
+	kt->etext = symbol_value("_etext");
+	get_text_init_space(); 
+	if (symbol_exists("__init_begin")) {
+		kt->init_begin = symbol_value("__init_begin");
+		kt->init_end = symbol_value("__init_end");
+	}
+	kt->end = symbol_value("_end");
 	
-		if (symbol_exists("smp_num_cpus")) {
-			kt->flags |= SMP;
-			get_symbol_data("smp_num_cpus", sizeof(int), &kt->cpus);
-			if (kt->cpus < 1 || kt->cpus > NR_CPUS)
-				error(WARNING, 
-				    "invalid value: smp_num_cpus: %d\n",
-					kt->cpus);
-		} else if (symbol_exists("__per_cpu_offset")) {
-			kt->flags |= SMP;
-			kt->cpus = 1;
-		} else 
-			kt->cpus = 1;
-
-		if ((sp1 = symbol_search("__per_cpu_start")) &&
-	 	    (sp2 =  symbol_search("__per_cpu_end")) &&
-		    (sp1->type == 'A') && (sp2->type == 'A') &&
-		    (sp2->value > sp1->value))
-			kt->flags |= SMP|PER_CPU_OFF;
+	/*
+	 *  For the traditional (non-pv_ops) Xen architecture, default to writable 
+         *  page tables unless:
+	 *  
+	 *  (1) it's an "xm save" CANONICAL_PAGE_TABLES dumpfile,  or
+	 *  (2) the --shadow_page_tables option was explicitly entered.  
+	 *
+	 *  But if the "phys_to_maching_mapping" array does not exist, and 
+         *  it's not an "xm save" canonical dumpfile, then we have no choice 
+         *  but to presume shadow page tables.
+	 */ 
+	if (!PVOPS() && symbol_exists("xen_start_info")) {
+		kt->flags |= ARCH_XEN;
+		if (!(kt->xen_flags & (SHADOW_PAGE_TABLES|CANONICAL_PAGE_TABLES)))
+			kt->xen_flags |= WRITABLE_PAGE_TABLES;
+		if (symbol_exists("phys_to_machine_mapping"))
+         		get_symbol_data("phys_to_machine_mapping", sizeof(ulong),
+                       		&kt->phys_to_machine_mapping);
+		else if (!(kt->xen_flags & CANONICAL_PAGE_TABLES)) {
+			kt->xen_flags &= ~WRITABLE_PAGE_TABLES;
+			kt->xen_flags |= SHADOW_PAGE_TABLES;
+		}
+		if (machine_type("X86"))
+                	get_symbol_data("max_pfn", sizeof(ulong), &kt->p2m_table_size);
+		if (machine_type("X86_64")) {
+			/*
+			 * kernel version <  2.6.27 => end_pfn
+			 * kernel version >= 2.6.27 => max_pfn
+			 */
+			if (!try_get_symbol_data("end_pfn", sizeof(ulong), &kt->p2m_table_size))
+				get_symbol_data("max_pfn", sizeof(ulong), &kt->p2m_table_size);
+		}
+                if ((kt->m2p_page = (char *)malloc(PAGESIZE())) == NULL)
+                       	error(FATAL, "cannot malloc m2p page.");
+	}
+
+	if (symbol_exists("smp_num_cpus")) {
+		kt->flags |= SMP;
+		get_symbol_data("smp_num_cpus", sizeof(int), &kt->cpus);
+		if (kt->cpus < 1 || kt->cpus > NR_CPUS)
+			error(WARNING, 
+			    "invalid value: smp_num_cpus: %d\n",
+				kt->cpus);
+	} else if (symbol_exists("__per_cpu_offset")) {
+		kt->flags |= SMP;
+		kt->cpus = 1;
+	} else 
+		kt->cpus = 1;
+
+	if ((sp1 = symbol_search("__per_cpu_start")) &&
+ 	    (sp2 = symbol_search("__per_cpu_end")) &&
+	    (sp1->type == 'A' || sp1->type == 'D') && 
+	    (sp2->type == 'A' || sp2->type == 'D') &&
+	    (sp2->value > sp1->value))
+		kt->flags |= SMP|PER_CPU_OFF;
 	
-		get_symbol_data("xtime", sizeof(struct timespec), &kt->date);
+	get_symbol_data("xtime", sizeof(struct timespec), &kt->date);
 	
-		if (pc->flags & GET_TIMESTAMP) {
-	        	fprintf(fp, "%s\n\n", 
-				strip_linefeeds(ctime(&kt->date.tv_sec)));
-			clean_exit(0);
-		}
+	if (pc->flags & GET_TIMESTAMP) {
+        	fprintf(fp, "%s\n\n", 
+			strip_linefeeds(ctime(&kt->date.tv_sec)));
+		clean_exit(0);
+	}
 	
-	        readmem(symbol_value("system_utsname"), KVADDR, &kt->utsname,
-	                sizeof(struct new_utsname), "system_utsname", 
-			FAULT_ON_ERROR);
-		strncpy(buf, kt->utsname.release, MIN(strlen(kt->utsname.release), 65));
-		if (ascii_string(kt->utsname.release)) {
-			p1 = p2 = buf;
-			while (*p2 != '.')
-				p2++;
-			*p2 = NULLCHAR;
-			kt->kernel_version[0] = atoi(p1);
-			p1 = ++p2;
-			while (*p2 != '.')
-				p2++;
-			*p2 = NULLCHAR;
-			kt->kernel_version[1] = atoi(p1);
-			p1 = ++p2;
-			while ((*p2 >= '0') && (*p2 <= '9'))
-				p2++;
-			*p2 = NULLCHAR;
-			kt->kernel_version[2] = atoi(p1);
-		}
-		break;
+	if (symbol_exists("system_utsname"))
+        	readmem(symbol_value("system_utsname"), KVADDR, &kt->utsname,
+                	sizeof(struct new_utsname), "system_utsname", 
+			RETURN_ON_ERROR);
+	else if (symbol_exists("init_uts_ns"))
+		readmem(symbol_value("init_uts_ns") + sizeof(int),
+			KVADDR,  &kt->utsname, sizeof(struct new_utsname), 
+			"init_uts_ns", RETURN_ON_ERROR);
+	else
+		error(INFO, "cannot access utsname information\n\n");
 
-	case POST_GDB:
-		if (symbol_exists("__per_cpu_offset")) {
+	strncpy(buf, kt->utsname.release, MIN(strlen(kt->utsname.release), 65));
+	if (ascii_string(kt->utsname.release)) {
+		p1 = p2 = buf;
+		while (*p2 != '.')
+			p2++;
+		*p2 = NULLCHAR;
+		kt->kernel_version[0] = atoi(p1);
+		p1 = ++p2;
+		while (*p2 != '.')
+			p2++;
+		*p2 = NULLCHAR;
+		kt->kernel_version[1] = atoi(p1);
+		p1 = ++p2;
+		while ((*p2 >= '0') && (*p2 <= '9'))
+			p2++;
+		*p2 = NULLCHAR;
+		kt->kernel_version[2] = atoi(p1);
+
+		if (CRASHDEBUG(1))
+			fprintf(fp, "base kernel version: %d.%d.%d\n",
+				kt->kernel_version[0],
+				kt->kernel_version[1],
+				kt->kernel_version[2]);
+	} else
+		error(INFO, "cannot determine base kernel version\n");
+
+
+	verify_version();
+
+	if (symbol_exists("__per_cpu_offset")) {
+		if (LKCD_KERNTYPES())
+			i = get_cpus_possible();
+		else
 			i = get_array_length("__per_cpu_offset", NULL, 0);
-			get_symbol_data("__per_cpu_offset", 
-				sizeof(long)*(i <= NR_CPUS ? i : NR_CPUS),
-				&kt->__per_cpu_offset[0]); 
-			kt->flags |= PER_CPU_OFF;
-		}
-		MEMBER_OFFSET_INIT(runqueue_cpu, "runqueue", "cpu");
-		if (VALID_MEMBER(runqueue_cpu)) {
-			MEMBER_OFFSET_INIT(cpu_s_curr, "cpu_s", "curr");
-			MEMBER_OFFSET_INIT(cpu_s_idle, "cpu_s", "idle");
-		 	STRUCT_SIZE_INIT(cpu_s, "cpu_s"); 
-			kt->runq_siblings = get_array_length("runqueue.cpu", 
-				NULL, 0);
-			if (symbol_exists("__cpu_idx") &&
-			    symbol_exists("__rq_idx")) {
-				if (!readmem(symbol_value("__cpu_idx"), KVADDR, 
-			            &kt->__cpu_idx[0], sizeof(long) * NR_CPUS,
-	                            "__cpu_idx[NR_CPUS]", RETURN_ON_ERROR))
-					error(INFO, 
-				      "cannot read __cpu_idx[NR_CPUS] array\n");
-				if (!readmem(symbol_value("__rq_idx"), KVADDR, 
-			            &kt->__rq_idx[0], sizeof(long) * NR_CPUS,
-	                            "__rq_idx[NR_CPUS]", RETURN_ON_ERROR))
-					error(INFO, 
-				       "cannot read __rq_idx[NR_CPUS] array\n");
-			} else if (kt->runq_siblings > 1) 
-				error(INFO, 
-	     "runq_siblings: %d: __cpu_idx and __rq_idx arrays don't exist?\n",
-					kt->runq_siblings);
-		} else {
-			MEMBER_OFFSET_INIT(runqueue_idle, "runqueue", "idle");
-			MEMBER_OFFSET_INIT(runqueue_curr, "runqueue", "curr");
-		}
-		MEMBER_OFFSET_INIT(runqueue_active, "runqueue", "active");
-		MEMBER_OFFSET_INIT(runqueue_expired, "runqueue", "expired");
-		MEMBER_OFFSET_INIT(runqueue_arrays, "runqueue", "arrays");
-		MEMBER_OFFSET_INIT(prio_array_queue, "prio_array", "queue");
-                MEMBER_OFFSET_INIT(prio_array_nr_active, "prio_array",
-                        "nr_active");
-		STRUCT_SIZE_INIT(runqueue, "runqueue"); 
-		STRUCT_SIZE_INIT(prio_array, "prio_array"); 
-
-               /*
-                *  In 2.4, smp_send_stop() sets smp_num_cpus back to 1
-                *  in some, but not all, architectures.  So if a count
-                *  of 1 is found, be suspicious, and check the
-                *  init_tasks[NR_CPUS] array (also intro'd in 2.4),
-                *  for idle thread addresses.  For 2.2, prepare for the
-	        *  eventuality by verifying the cpu count with the machine
-		*  dependent count.
-                */
-                if ((kt->flags & SMP) && DUMPFILE() && (kt->cpus == 1)) {
-                        if (symbol_exists("init_tasks")) {
-                                ulong init_tasks[NR_CPUS];
-				int nr_cpus;
-
-				BZERO(&init_tasks[0], sizeof(ulong) * NR_CPUS);
-
-				nr_cpus = get_array_length("init_tasks", 
-					NULL, 0);
-				if ((nr_cpus < 1) || (nr_cpus > NR_CPUS))
-                                        nr_cpus = NR_CPUS;
-
-				get_idle_threads(&init_tasks[0], nr_cpus);
-
-                                for (i = kt->cpus = 0; i < nr_cpus; i++)
-                                        if (init_tasks[i])
-                                                kt->cpus++;
-                	} else 
-				kt->cpus = machdep->get_smp_cpus();
-		}
+		get_symbol_data("__per_cpu_offset",
+			sizeof(long)*((i && (i <= NR_CPUS)) ? i : NR_CPUS),
+			&kt->__per_cpu_offset[0]);
+                kt->flags |= PER_CPU_OFF;
+	}
+	if (STRUCT_EXISTS("runqueue"))
+		rqstruct = "runqueue";
+	else if (STRUCT_EXISTS("rq"))
+		rqstruct = "rq";
 
-		if ((kt->flags & SMP) && ACTIVE() && (kt->cpus == 1) &&
-		    (kt->flags & PER_CPU_OFF))
+	MEMBER_OFFSET_INIT(runqueue_cpu, rqstruct, "cpu");
+	/*
+	 * 'cpu' does not exist in 'struct rq'.
+	 */
+	if (VALID_MEMBER(runqueue_cpu) &&
+	    (get_array_length("runqueue.cpu", NULL, 0) > 0)) {
+		MEMBER_OFFSET_INIT(cpu_s_curr, "cpu_s", "curr");
+		MEMBER_OFFSET_INIT(cpu_s_idle, "cpu_s", "idle");
+	 	STRUCT_SIZE_INIT(cpu_s, "cpu_s"); 
+		kt->runq_siblings = get_array_length("runqueue.cpu", 
+			NULL, 0);
+		if (symbol_exists("__cpu_idx") &&
+		    symbol_exists("__rq_idx")) {
+			if (!(kt->__cpu_idx = (long *)
+			    calloc(NR_CPUS, sizeof(long))))
+				error(FATAL, "cannot malloc __cpu_idx array");
+			if (!(kt->__rq_idx = (long *)
+			    calloc(NR_CPUS, sizeof(long))))
+				error(FATAL, "cannot malloc __rq_idx array");
+			if (!readmem(symbol_value("__cpu_idx"), KVADDR, 
+		            &kt->__cpu_idx[0], sizeof(long) * NR_CPUS,
+                            "__cpu_idx[NR_CPUS]", RETURN_ON_ERROR))
+				error(INFO, 
+			            "cannot read __cpu_idx[NR_CPUS] array\n");
+			if (!readmem(symbol_value("__rq_idx"), KVADDR, 
+		            &kt->__rq_idx[0], sizeof(long) * NR_CPUS,
+                            "__rq_idx[NR_CPUS]", RETURN_ON_ERROR))
+				error(INFO, 
+			           "cannot read __rq_idx[NR_CPUS] array\n");
+		} else if (kt->runq_siblings > 1) 
+			error(INFO, 
+     	   "runq_siblings: %d: __cpu_idx and __rq_idx arrays don't exist?\n",
+				kt->runq_siblings);
+	} else {
+		MEMBER_OFFSET_INIT(runqueue_idle, rqstruct, "idle");
+		MEMBER_OFFSET_INIT(runqueue_curr, rqstruct, "curr");
+		ASSIGN_OFFSET(runqueue_cpu) = INVALID_OFFSET;
+	}
+	MEMBER_OFFSET_INIT(runqueue_active, rqstruct, "active");
+	MEMBER_OFFSET_INIT(runqueue_expired, rqstruct, "expired");
+	MEMBER_OFFSET_INIT(runqueue_arrays, rqstruct, "arrays");
+	MEMBER_OFFSET_INIT(prio_array_queue, "prio_array", "queue");
+        MEMBER_OFFSET_INIT(prio_array_nr_active, "prio_array", "nr_active");
+	STRUCT_SIZE_INIT(runqueue, rqstruct); 
+	STRUCT_SIZE_INIT(prio_array, "prio_array"); 
+
+	MEMBER_OFFSET_INIT(rq_cfs, "rq", "cfs");
+
+       /*
+        *  In 2.4, smp_send_stop() sets smp_num_cpus back to 1
+        *  in some, but not all, architectures.  So if a count
+        *  of 1 is found, be suspicious, and check the
+        *  init_tasks[NR_CPUS] array (also intro'd in 2.4),
+        *  for idle thread addresses.  For 2.2, prepare for the
+     	*  eventuality by verifying the cpu count with the machine
+	*  dependent count.
+        */
+        if ((kt->flags & SMP) && DUMPFILE() && (kt->cpus == 1)) {
+                if (symbol_exists("init_tasks")) {
+                        ulong init_tasks[NR_CPUS];
+			int nr_cpus;
+
+			BZERO(&init_tasks[0], sizeof(ulong) * NR_CPUS);
+
+			nr_cpus = get_array_length("init_tasks", NULL, 0);
+			if ((nr_cpus < 1) || (nr_cpus > NR_CPUS))
+                                nr_cpus = NR_CPUS;
+
+			get_idle_threads(&init_tasks[0], nr_cpus);
+
+                        for (i = kt->cpus = 0; i < nr_cpus; i++)
+                                if (init_tasks[i])
+                                        kt->cpus++;
+                } else 
 			kt->cpus = machdep->get_smp_cpus();
+	}
 
-		if (kt->cpus > NR_CPUS) {
-			error(WARNING, 
-       "calculated number of cpus (%d) greater than compiled-in NR_CPUS (%d)\n",
-				kt->cpus, NR_CPUS);
-			error(FATAL, "recompile crash with larger NR_CPUS\n");
-		}
-
-		STRUCT_SIZE_INIT(spinlock_t, "spinlock_t");
-		verify_spinlock();
-
-		STRUCT_SIZE_INIT(list_head, "list_head"); 
-		MEMBER_OFFSET_INIT(list_head_next, "list_head", "next"); 
-		MEMBER_OFFSET_INIT(list_head_prev, "list_head", "prev"); 
-		if (OFFSET(list_head_next) != 0)
-		    error(WARNING, 
-			"list_head.next offset: %ld: list command may fail\n",
-				OFFSET(list_head_next));
-
-        	MEMBER_OFFSET_INIT(hlist_node_next, "hlist_node", "next");
-        	MEMBER_OFFSET_INIT(hlist_node_pprev, "hlist_node", "pprev");
-		STRUCT_SIZE_INIT(hlist_head, "hlist_head"); 
-		STRUCT_SIZE_INIT(hlist_node, "hlist_node"); 
-
-		MEMBER_OFFSET_INIT(irq_desc_t_status,  "irq_desc_t", "status");
-		MEMBER_OFFSET_INIT(irq_desc_t_handler, "irq_desc_t", "handler");
-		MEMBER_OFFSET_INIT(irq_desc_t_action, "irq_desc_t", "action");
-		MEMBER_OFFSET_INIT(irq_desc_t_depth, "irq_desc_t", "depth");
-		MEMBER_OFFSET_INIT(hw_interrupt_type_typename, 
+	if ((kt->flags & SMP) && ACTIVE() && (kt->cpus == 1) &&
+	    (kt->flags & PER_CPU_OFF))
+		kt->cpus = machdep->get_smp_cpus();
+
+	if (kt->cpus_override && (c = atoi(kt->cpus_override))) {
+		error(WARNING, "forcing cpu count to: %d\n\n", c);
+		kt->cpus = c;
+	}
+
+	if (kt->cpus > NR_CPUS) {
+		error(WARNING, 
+       "%s number of cpus (%d) greater than compiled-in NR_CPUS (%d)\n",
+			kt->cpus_override && atoi(kt->cpus_override) ? 
+			"configured" : "calculated", kt->cpus, NR_CPUS);
+		error(FATAL, "recompile crash with larger NR_CPUS\n");
+	}
+
+	STRUCT_SIZE_INIT(spinlock_t, "spinlock_t");
+	verify_spinlock();
+
+	STRUCT_SIZE_INIT(list_head, "list_head"); 
+	MEMBER_OFFSET_INIT(list_head_next, "list_head", "next"); 
+	MEMBER_OFFSET_INIT(list_head_prev, "list_head", "prev"); 
+	if (OFFSET(list_head_next) != 0)
+	    	error(WARNING, 
+		    "list_head.next offset: %ld: list command may fail\n",
+			OFFSET(list_head_next));
+
+        MEMBER_OFFSET_INIT(hlist_node_next, "hlist_node", "next");
+        MEMBER_OFFSET_INIT(hlist_node_pprev, "hlist_node", "pprev");
+	STRUCT_SIZE_INIT(hlist_head, "hlist_head"); 
+	STRUCT_SIZE_INIT(hlist_node, "hlist_node"); 
+
+	if (STRUCT_EXISTS("irq_desc_t"))
+		irq_desc_type_name = "irq_desc_t";
+	else
+		irq_desc_type_name = "irq_desc";
+
+	STRUCT_SIZE_INIT(irq_desc_t, irq_desc_type_name);
+	MEMBER_OFFSET_INIT(irq_desc_t_status, irq_desc_type_name, "status");
+	if (MEMBER_EXISTS(irq_desc_type_name, "handler"))
+		MEMBER_OFFSET_INIT(irq_desc_t_handler, irq_desc_type_name, "handler");
+	else
+		MEMBER_OFFSET_INIT(irq_desc_t_chip, irq_desc_type_name, "chip");
+	MEMBER_OFFSET_INIT(irq_desc_t_action, irq_desc_type_name, "action");
+	MEMBER_OFFSET_INIT(irq_desc_t_depth, irq_desc_type_name, "depth");
+	if (STRUCT_EXISTS("hw_interrupt_type")) {
+		MEMBER_OFFSET_INIT(hw_interrupt_type_typename,
 			"hw_interrupt_type", "typename");
 		MEMBER_OFFSET_INIT(hw_interrupt_type_startup,
 			"hw_interrupt_type", "startup");
 		MEMBER_OFFSET_INIT(hw_interrupt_type_shutdown,
 			"hw_interrupt_type", "shutdown");
-		MEMBER_OFFSET_INIT(hw_interrupt_type_handle, 
-                        "hw_interrupt_type", "handle");
+		MEMBER_OFFSET_INIT(hw_interrupt_type_handle,
+        	        "hw_interrupt_type", "handle");
 		MEMBER_OFFSET_INIT(hw_interrupt_type_enable,
 			"hw_interrupt_type", "enable");
 		MEMBER_OFFSET_INIT(hw_interrupt_type_disable,
 			"hw_interrupt_type", "disable");
-		MEMBER_OFFSET_INIT(hw_interrupt_type_ack, 
+		MEMBER_OFFSET_INIT(hw_interrupt_type_ack,
 			"hw_interrupt_type", "ack");
-		MEMBER_OFFSET_INIT(hw_interrupt_type_end, 
+		MEMBER_OFFSET_INIT(hw_interrupt_type_end,
 			"hw_interrupt_type", "end");
 		MEMBER_OFFSET_INIT(hw_interrupt_type_set_affinity,
 			"hw_interrupt_type", "set_affinity");
-		MEMBER_OFFSET_INIT(irqaction_handler, "irqaction", "handler");
-		MEMBER_OFFSET_INIT(irqaction_flags, "irqaction", "flags");
-		MEMBER_OFFSET_INIT(irqaction_mask, "irqaction", "mask");
-		MEMBER_OFFSET_INIT(irqaction_name, "irqaction", "name");
-		MEMBER_OFFSET_INIT(irqaction_dev_id, "irqaction", "dev_id");
-		MEMBER_OFFSET_INIT(irqaction_next, "irqaction", "next");
-
-		STRUCT_SIZE_INIT(irq_desc_t, "irq_desc_t");
-
-                STRUCT_SIZE_INIT(irq_cpustat_t, "irq_cpustat_t");
-                MEMBER_OFFSET_INIT(irq_cpustat_t___softirq_active, 
-                        "irq_cpustat_t", "__softirq_active");
-                MEMBER_OFFSET_INIT(irq_cpustat_t___softirq_mask, 
-                        "irq_cpustat_t", "__softirq_mask");
-
-                STRUCT_SIZE_INIT(timer_list, "timer_list");
-                MEMBER_OFFSET_INIT(timer_list_list, "timer_list", "list");
-                MEMBER_OFFSET_INIT(timer_list_next, "timer_list", "next");
-                MEMBER_OFFSET_INIT(timer_list_entry, "timer_list", "entry");
-                MEMBER_OFFSET_INIT(timer_list_expires, "timer_list", "expires");
-                MEMBER_OFFSET_INIT(timer_list_function, 
-                        "timer_list", "function");
-                STRUCT_SIZE_INIT(timer_vec_root, "timer_vec_root");
-		if (VALID_STRUCT(timer_vec_root))
-                	MEMBER_OFFSET_INIT(timer_vec_root_vec, 
-				"timer_vec_root", "vec");
-                STRUCT_SIZE_INIT(timer_vec, "timer_vec");
-		if (VALID_STRUCT(timer_vec))
-                	MEMBER_OFFSET_INIT(timer_vec_vec, "timer_vec", "vec");
-
-	        STRUCT_SIZE_INIT(tvec_root_s, "tvec_root_s");
-                if (VALID_STRUCT(tvec_root_s)) {
-                	STRUCT_SIZE_INIT(tvec_t_base_s, "tvec_t_base_s");
-                        MEMBER_OFFSET_INIT(tvec_t_base_s_tv1,
-                                "tvec_t_base_s", "tv1");
-	               	MEMBER_OFFSET_INIT(tvec_root_s_vec, 
-				"tvec_root_s", "vec");
-	                STRUCT_SIZE_INIT(tvec_s, "tvec_s");
-	               	MEMBER_OFFSET_INIT(tvec_s_vec, "tvec_s", "vec");
-		}
-
-                STRUCT_SIZE_INIT(__wait_queue, "__wait_queue");
-                if (VALID_STRUCT(__wait_queue)) {
-                        MEMBER_OFFSET_INIT(__wait_queue_task,
-                                "__wait_queue", "task");
-                        MEMBER_OFFSET_INIT(__wait_queue_head_task_list,
-                                "__wait_queue_head", "task_list");
-                        MEMBER_OFFSET_INIT(__wait_queue_task_list,
-                                "__wait_queue", "task_list");
-                } else {
-                	STRUCT_SIZE_INIT(wait_queue, "wait_queue");
-			if (VALID_STRUCT(wait_queue)) {
-                		MEMBER_OFFSET_INIT(wait_queue_task, 
-					"wait_queue", "task");
-                		MEMBER_OFFSET_INIT(wait_queue_next, 
-					"wait_queue", "next");
-			}
+	} else { /*
+		  * On later kernels where hw_interrupt_type was replaced
+		  * by irq_chip
+		  */
+		MEMBER_OFFSET_INIT(irq_chip_typename,
+			"irq_chip", "name");
+		MEMBER_OFFSET_INIT(irq_chip_startup,
+			"irq_chip", "startup");
+		MEMBER_OFFSET_INIT(irq_chip_shutdown,
+			"irq_chip", "shutdown");
+		MEMBER_OFFSET_INIT(irq_chip_enable,
+			"irq_chip", "enable");
+		MEMBER_OFFSET_INIT(irq_chip_disable,
+			"irq_chip", "disable");
+		MEMBER_OFFSET_INIT(irq_chip_ack,
+			"irq_chip", "ack");
+		MEMBER_OFFSET_INIT(irq_chip_mask,
+			"irq_chip", "mask");
+		MEMBER_OFFSET_INIT(irq_chip_mask_ack,
+			"irq_chip", "mask_ack");
+		MEMBER_OFFSET_INIT(irq_chip_unmask,
+			"irq_chip", "unmask");
+		MEMBER_OFFSET_INIT(irq_chip_eoi,
+			"irq_chip", "eoi");
+		MEMBER_OFFSET_INIT(irq_chip_end,
+			"irq_chip", "end");
+		MEMBER_OFFSET_INIT(irq_chip_set_affinity,
+			"irq_chip", "set_affinity");
+		MEMBER_OFFSET_INIT(irq_chip_retrigger,
+			"irq_chip", "retrigger");
+		MEMBER_OFFSET_INIT(irq_chip_set_type,
+			"irq_chip", "set_type");
+		MEMBER_OFFSET_INIT(irq_chip_set_wake,
+			"irq_chip", "set_wake");
+	}
+	MEMBER_OFFSET_INIT(irqaction_handler, "irqaction", "handler");
+	MEMBER_OFFSET_INIT(irqaction_flags, "irqaction", "flags");
+	MEMBER_OFFSET_INIT(irqaction_mask, "irqaction", "mask");
+	MEMBER_OFFSET_INIT(irqaction_name, "irqaction", "name");
+	MEMBER_OFFSET_INIT(irqaction_dev_id, "irqaction", "dev_id");
+	MEMBER_OFFSET_INIT(irqaction_next, "irqaction", "next");
+
+        STRUCT_SIZE_INIT(irq_cpustat_t, "irq_cpustat_t");
+        MEMBER_OFFSET_INIT(irq_cpustat_t___softirq_active, 
+                "irq_cpustat_t", "__softirq_active");
+        MEMBER_OFFSET_INIT(irq_cpustat_t___softirq_mask, 
+                "irq_cpustat_t", "__softirq_mask");
+
+        STRUCT_SIZE_INIT(timer_list, "timer_list");
+        MEMBER_OFFSET_INIT(timer_list_list, "timer_list", "list");
+        MEMBER_OFFSET_INIT(timer_list_next, "timer_list", "next");
+        MEMBER_OFFSET_INIT(timer_list_entry, "timer_list", "entry");
+        MEMBER_OFFSET_INIT(timer_list_expires, "timer_list", "expires");
+        MEMBER_OFFSET_INIT(timer_list_function, "timer_list", "function");
+        STRUCT_SIZE_INIT(timer_vec_root, "timer_vec_root");
+	if (VALID_STRUCT(timer_vec_root))
+               	MEMBER_OFFSET_INIT(timer_vec_root_vec, 
+			"timer_vec_root", "vec");
+        STRUCT_SIZE_INIT(timer_vec, "timer_vec");
+	if (VALID_STRUCT(timer_vec))
+               	MEMBER_OFFSET_INIT(timer_vec_vec, "timer_vec", "vec");
+
+	STRUCT_SIZE_INIT(tvec_root_s, "tvec_root_s");
+        if (VALID_STRUCT(tvec_root_s)) {
+               	STRUCT_SIZE_INIT(tvec_t_base_s, "tvec_t_base_s");
+                MEMBER_OFFSET_INIT(tvec_t_base_s_tv1,
+                        "tvec_t_base_s", "tv1");
+	        MEMBER_OFFSET_INIT(tvec_root_s_vec, 
+			"tvec_root_s", "vec");
+	        STRUCT_SIZE_INIT(tvec_s, "tvec_s");
+	        MEMBER_OFFSET_INIT(tvec_s_vec, "tvec_s", "vec");
+	} else {
+		STRUCT_SIZE_INIT(tvec_root_s, "tvec_root");
+        	if (VALID_STRUCT(tvec_root_s)) {
+               		STRUCT_SIZE_INIT(tvec_t_base_s, "tvec_base");
+                	MEMBER_OFFSET_INIT(tvec_t_base_s_tv1,
+                        	"tvec_base", "tv1");
+	        	MEMBER_OFFSET_INIT(tvec_root_s_vec, 
+				"tvec_root", "vec");
+	        	STRUCT_SIZE_INIT(tvec_s, "tvec");
+	        	MEMBER_OFFSET_INIT(tvec_s_vec, "tvec", "vec");
+		}
+	}
+        STRUCT_SIZE_INIT(__wait_queue, "__wait_queue");
+        if (VALID_STRUCT(__wait_queue)) {
+		if (MEMBER_EXISTS("__wait_queue", "task"))
+			MEMBER_OFFSET_INIT(__wait_queue_task,
+				"__wait_queue", "task");
+		else
+			MEMBER_OFFSET_INIT(__wait_queue_task,
+				"__wait_queue", "private");
+                MEMBER_OFFSET_INIT(__wait_queue_head_task_list,
+                        "__wait_queue_head", "task_list");
+                MEMBER_OFFSET_INIT(__wait_queue_task_list,
+                        "__wait_queue", "task_list");
+        } else {
+               	STRUCT_SIZE_INIT(wait_queue, "wait_queue");
+		if (VALID_STRUCT(wait_queue)) {
+               		MEMBER_OFFSET_INIT(wait_queue_task, 
+				"wait_queue", "task");
+               		MEMBER_OFFSET_INIT(wait_queue_next, 
+				"wait_queue", "next");
 		}
+	}
 
-		STRUCT_SIZE_INIT(pt_regs, "pt_regs");
-		STRUCT_SIZE_INIT(softirq_state, "softirq_state");
-		STRUCT_SIZE_INIT(desc_struct, "desc_struct");
-
-		STRUCT_SIZE_INIT(char_device_struct, "char_device_struct");
-		if (VALID_STRUCT(char_device_struct)) {
-			MEMBER_OFFSET_INIT(char_device_struct_next,
-				"char_device_struct", "next");
-			MEMBER_OFFSET_INIT(char_device_struct_name,
-				"char_device_struct", "name");
-			MEMBER_OFFSET_INIT(char_device_struct_fops,
-				"char_device_struct", "fops");
-			MEMBER_OFFSET_INIT(char_device_struct_major,
-				"char_device_struct", "major");
-		}
-
-	        MEMBER_OFFSET_INIT(module_kallsyms_start, "module", 
-			"kallsyms_start");
-
-		STRUCT_SIZE_INIT(kallsyms_header, "kallsyms_header");
-
-		if (VALID_MEMBER(module_kallsyms_start) &&
-		    VALID_SIZE(kallsyms_header)) {
-        		MEMBER_OFFSET_INIT(kallsyms_header_sections,
-				"kallsyms_header", "sections");
-        		MEMBER_OFFSET_INIT(kallsyms_header_section_off,
-				"kallsyms_header", "section_off");
-        		MEMBER_OFFSET_INIT(kallsyms_header_symbols,
-				"kallsyms_header", "symbols");
-        		MEMBER_OFFSET_INIT(kallsyms_header_symbol_off,
-				"kallsyms_header", "symbol_off");
-        		MEMBER_OFFSET_INIT(kallsyms_header_string_off,
-				"kallsyms_header", "string_off");
-        		MEMBER_OFFSET_INIT(kallsyms_symbol_section_off,
-				"kallsyms_symbol", "section_off");
-        		MEMBER_OFFSET_INIT(kallsyms_symbol_symbol_addr,
-				"kallsyms_symbol", "symbol_addr");
-        		MEMBER_OFFSET_INIT(kallsyms_symbol_name_off,
-				"kallsyms_symbol", "name_off");
-        		MEMBER_OFFSET_INIT(kallsyms_section_start,
-				"kallsyms_section", "start");
-        		MEMBER_OFFSET_INIT(kallsyms_section_size,
-				"kallsyms_section", "size");
-        		MEMBER_OFFSET_INIT(kallsyms_section_name_off,
-				"kallsyms_section", "name_off");
-			STRUCT_SIZE_INIT(kallsyms_symbol, "kallsyms_symbol");
-			STRUCT_SIZE_INIT(kallsyms_section, "kallsyms_section");
+	STRUCT_SIZE_INIT(pt_regs, "pt_regs");
+	STRUCT_SIZE_INIT(softirq_state, "softirq_state");
+	STRUCT_SIZE_INIT(desc_struct, "desc_struct");
+
+	STRUCT_SIZE_INIT(char_device_struct, "char_device_struct");
+	if (VALID_STRUCT(char_device_struct)) {
+		MEMBER_OFFSET_INIT(char_device_struct_next,
+			"char_device_struct", "next");
+		MEMBER_OFFSET_INIT(char_device_struct_name,
+			"char_device_struct", "name");
+		MEMBER_OFFSET_INIT(char_device_struct_fops,
+			"char_device_struct", "fops");
+		MEMBER_OFFSET_INIT(char_device_struct_major,
+			"char_device_struct", "major");
+	}
+
+	MEMBER_OFFSET_INIT(module_kallsyms_start, "module", 
+		"kallsyms_start");
+
+	STRUCT_SIZE_INIT(kallsyms_header, "kallsyms_header");
+
+	if (VALID_MEMBER(module_kallsyms_start) &&
+	    VALID_SIZE(kallsyms_header)) {
+        	MEMBER_OFFSET_INIT(kallsyms_header_sections,
+			"kallsyms_header", "sections");
+        	MEMBER_OFFSET_INIT(kallsyms_header_section_off,
+			"kallsyms_header", "section_off");
+        	MEMBER_OFFSET_INIT(kallsyms_header_symbols,
+			"kallsyms_header", "symbols");
+        	MEMBER_OFFSET_INIT(kallsyms_header_symbol_off,
+			"kallsyms_header", "symbol_off");
+        	MEMBER_OFFSET_INIT(kallsyms_header_string_off,
+			"kallsyms_header", "string_off");
+        	MEMBER_OFFSET_INIT(kallsyms_symbol_section_off,
+			"kallsyms_symbol", "section_off");
+        	MEMBER_OFFSET_INIT(kallsyms_symbol_symbol_addr,
+			"kallsyms_symbol", "symbol_addr");
+        	MEMBER_OFFSET_INIT(kallsyms_symbol_name_off,
+			"kallsyms_symbol", "name_off");
+        	MEMBER_OFFSET_INIT(kallsyms_section_start,
+			"kallsyms_section", "start");
+        	MEMBER_OFFSET_INIT(kallsyms_section_size,
+			"kallsyms_section", "size");
+        	MEMBER_OFFSET_INIT(kallsyms_section_name_off,
+			"kallsyms_section", "name_off");
+		STRUCT_SIZE_INIT(kallsyms_symbol, "kallsyms_symbol");
+		STRUCT_SIZE_INIT(kallsyms_section, "kallsyms_section");
 			
-			if (!(kt->flags & NO_KALLSYMS))
-				kt->flags |= KALLSYMS_V1;
-		}
+		if (!(kt->flags & NO_KALLSYMS))
+			kt->flags |= KALLSYMS_V1;
+	}
 
-		MEMBER_OFFSET_INIT(module_num_symtab, "module", "num_symtab");
+	MEMBER_OFFSET_INIT(module_num_symtab, "module", "num_symtab");
 
-		if (VALID_MEMBER(module_num_symtab)) {
-			MEMBER_OFFSET_INIT(module_symtab, "module", "symtab");
-			MEMBER_OFFSET_INIT(module_strtab, "module", "strtab");
+	if (VALID_MEMBER(module_num_symtab)) {
+		MEMBER_OFFSET_INIT(module_symtab, "module", "symtab");
+		MEMBER_OFFSET_INIT(module_strtab, "module", "strtab");
 			
-			if (!(kt->flags & NO_KALLSYMS))
-				kt->flags |= KALLSYMS_V2;
+		if (!(kt->flags & NO_KALLSYMS))
+			kt->flags |= KALLSYMS_V2;
+	}
+
+	if (!(kt->flags & DWARF_UNWIND))
+		kt->flags |= NO_DWARF_UNWIND; 
+
+	/* 
+	 *  OpenVZ 
+	 */
+	if (kernel_symbol_exists("pcpu_info") && 
+	    STRUCT_EXISTS("pcpu_info") && STRUCT_EXISTS("vcpu_struct")) {
+		MEMBER_OFFSET_INIT(pcpu_info_vcpu, "pcpu_info", "vcpu");
+		MEMBER_OFFSET_INIT(pcpu_info_idle, "pcpu_info", "idle");
+		MEMBER_OFFSET_INIT(vcpu_struct_rq, "vcpu_struct", "rq");
+		STRUCT_SIZE_INIT(pcpu_info, "pcpu_info");
+		STRUCT_SIZE_INIT(vcpu_struct, "vcpu_struct");
+		kt->flags |= ARCH_OPENVZ;
+	}
+
+	BUG_bytes_init();
+}
+
+/*
+ *  If the cpu_present_map, cpu_online_map and cpu_possible_maps exist,
+ *  set up the kt->cpu_flags[NR_CPUS] with their settings.
+ */ 
+static void
+cpu_maps_init(void)
+{
+        int i, c, m, cpu, len;
+        char *buf;
+        ulong *maskptr;
+	struct mapinfo {
+		ulong cpu_flag;
+		char *name;
+	} mapinfo[] = {
+		{ POSSIBLE, "cpu_possible_map" },
+		{ PRESENT, "cpu_present_map" },
+		{ ONLINE, "cpu_online_map" },
+	};
+
+	if ((len = STRUCT_SIZE("cpumask_t")) < 0)
+		len = sizeof(ulong);
+
+	buf = GETBUF(len);
+
+	for (m = 0; m < sizeof(mapinfo)/sizeof(struct mapinfo); m++) {
+		if (!kernel_symbol_exists(mapinfo[m].name))
+			continue;
+
+		if (!readmem(symbol_value(mapinfo[m].name), KVADDR, buf, len,
+		    mapinfo[m].name, RETURN_ON_ERROR)) {
+			error(WARNING, "cannot read %s\n", mapinfo[m].name);
+			continue;
 		}
-		break;
+
+		maskptr = (ulong *)buf;
+		for (i = 0; i < (len/sizeof(ulong)); i++, maskptr++) {
+			if (*maskptr == 0)
+				continue;
+			for (c = 0; c < BITS_PER_LONG; c++)
+				if (*maskptr & (0x1UL << c)) {
+					cpu = (i * BITS_PER_LONG) + c;
+					kt->cpu_flags[cpu] |= mapinfo[m].cpu_flag;
+				}
+		}
+
+		if (CRASHDEBUG(1)) {
+			fprintf(fp, "%s: ", mapinfo[m].name);
+			for (i = 0; i < NR_CPUS; i++) {
+				if (kt->cpu_flags[i] & mapinfo[m].cpu_flag)
+					fprintf(fp, "%d ", i);
+			}
+			fprintf(fp, "\n");
+		}
+
+	}
+
+	FREEBUF(buf);
+}
+
+/*
+ *  Determine whether a cpu is in one of the cpu masks.
+ */
+int
+in_cpu_map(int map, int cpu)
+{
+	if (cpu >= (kt->kernel_NR_CPUS ? kt->kernel_NR_CPUS : NR_CPUS)) {
+		error(INFO, "in_cpu_map: invalid cpu: %d\n", cpu);
+		return FALSE;
+	}
+
+	switch (map)
+	{
+	case POSSIBLE:
+		if (!kernel_symbol_exists("cpu_possible_map")) {
+			error(INFO, "cpu_possible_map does not exist\n");
+			return FALSE;
+		}
+		return (kt->cpu_flags[cpu] & POSSIBLE);
+
+	case PRESENT:
+		if (!kernel_symbol_exists("cpu_present_map")) {
+			error(INFO, "cpu_present_map does not exist\n");
+			return FALSE;
+		}
+		return (kt->cpu_flags[cpu] & PRESENT);
+
+	case ONLINE:
+		if (!kernel_symbol_exists("cpu_online_map")) {
+			error(INFO, "cpu_online_map does not exist\n");
+			return FALSE;
+		}
+		return (kt->cpu_flags[cpu] & ONLINE);
 	}
+
+	return FALSE;
 }
 
+
 /*
  *  For lack of a better manner of verifying that the namelist and dumpfile
  *  (or live kernel) match up, verify that the Linux banner is where
@@ -377,7 +637,7 @@
 {
 	char buf[BUFSIZE];
 	ulong linux_banner;
-        int argc;
+        int argc, len;
         char *arglist[MAXARGS];
 	char *p1, *p2;
 	struct syment *sp;
@@ -389,7 +649,7 @@
 
 	if (!(sp = symbol_search("linux_banner")))
 		error(FATAL, "linux_banner symbol does not exist?\n");
-	else if (sp->type == 'R')
+	else if ((sp->type == 'R') || (sp->type == 'r'))
 		linux_banner = symbol_value("linux_banner");
 	else
 		get_symbol_data("linux_banner", sizeof(ulong), &linux_banner);
@@ -405,9 +665,10 @@
 		error(WARNING, "cannot read linux_banner string\n");
 
 	if (ACTIVE()) {
-		if (strlen(kt->proc_version) && !STREQ(buf, kt->proc_version)) {
+		len = strlen(kt->proc_version);
+		if ((len > 0) && (strncmp(buf, kt->proc_version, len) != 0)) {
                		if (CRASHDEBUG(1)) {
-                        	fprintf(fp, "/proc/version:\n%s", 
+                        	fprintf(fp, "/proc/version:\n%s\n", 
 					kt->proc_version);
                         	fprintf(fp, "linux_banner:\n%s\n", buf);
                 	}
@@ -422,7 +683,7 @@
                         	fprintf(fp, "linux_banner:\n%s\n", buf);
 			goto bad_match;
 		}
-		strcpy(kt->proc_version, buf);
+		strcpy(kt->proc_version, strip_linefeeds(buf));
 	}
 
 	verify_namelist();
@@ -471,6 +732,9 @@
 		}
 	}
 
+	if (CRASHDEBUG(1))
+		gdb_readnow_warning();
+
 	return;
 
 bad_match:
@@ -614,6 +878,10 @@
 	if (pc->flags & KERNEL_DEBUG_QUERY)
 		return;
 
+	/* the kerntypes may not match in terms of gcc version or SMP */
+	if (LKCD_KERNTYPES())
+		return;
+
 	if (!strlen(kt->utsname.version))
 		return;
 
@@ -633,7 +901,7 @@
 		if (!strstr(buffer, "Linux version 2."))
 			continue;
 
-                if (STREQ(buffer, kt->proc_version)) {
+                if (strstr(buffer, kt->proc_version)) {
                 	found = TRUE;
 			break;
 		}
@@ -680,7 +948,7 @@
 	if (found) {
                 if (CRASHDEBUG(1)) {
                 	fprintf(fp, "verify_namelist:\n");
-			fprintf(fp, "/proc/version:\n%s", kt->proc_version);
+			fprintf(fp, "/proc/version:\n%s\n", kt->proc_version);
 			fprintf(fp, "utsname version: %s\n",
 				kt->utsname.version);
 			fprintf(fp, "%s:\n%s\n", namelist, buffer);
@@ -689,8 +957,12 @@
 	}
 
         if (CRASHDEBUG(1)) {
+		error(WARNING, 
+		    "\ncannot find matching kernel version in %s file:\n\n",
+			namelist);
+			
                	fprintf(fp, "verify_namelist:\n");
-                fprintf(fp, "/proc/version:\n%s", kt->proc_version);
+                fprintf(fp, "/proc/version:\n%s\n", kt->proc_version);
                 fprintf(fp, "utsname version: %s\n", kt->utsname.version);
                 fprintf(fp, "%s:\n%s\n", namelist, buffer2);
         }
@@ -740,7 +1012,7 @@
 {
 	int c;
 	int do_load_module_filter, do_machdep_filter, reverse; 
-	int unfiltered, user_mode, count_entered;
+	int unfiltered, user_mode, count_entered, bug_bytes_entered;
 	ulong curaddr;
 	ulong revtarget;
 	ulong count;
@@ -754,7 +1026,16 @@
 	char buf4[BUFSIZE];
 	char buf5[BUFSIZE];
 	
-	reverse = count_entered = FALSE;
+	if ((argcnt == 2) && STREQ(args[1], "-b")) {
+		fprintf(fp, "encoded bytes being skipped after ud2a: ");
+		if (kt->BUG_bytes < 0)
+			fprintf(fp, "undetermined\n");
+		else
+			fprintf(fp, "%d\n", kt->BUG_bytes);
+		return;
+	}
+
+	reverse = count_entered = bug_bytes_entered = FALSE;
 	sp = NULL;
 	unfiltered = user_mode = do_machdep_filter = do_load_module_filter = 0;
 
@@ -763,7 +1044,7 @@
 	req->flags |= GNU_FROM_TTY_OFF|GNU_RETURN_ON_ERROR;
 	req->count = 1;
 
-        while ((c = getopt(argcnt, args, "ulrx")) != EOF) {
+        while ((c = getopt(argcnt, args, "ulrxb:B:")) != EOF) {
                 switch(c)
 		{
 		case 'x':
@@ -786,6 +1067,12 @@
 			BZERO(buf4, BUFSIZE);
 			break;
 
+		case 'B':
+		case 'b':
+			kt->BUG_bytes = atoi(optarg);
+			bug_bytes_entered = TRUE;
+			break;
+
 		default:
 			argerrs++;
 			break;
@@ -846,7 +1133,7 @@
 		if (user_mode) {
                 	sprintf(buf1, "x/%ldi 0x%lx",  
 				req->count ? req->count : 1, req->addr);
-			pc->cmdgenspec = pc->cmdgencur;
+			pc->curcmd_flags |= MEMTYPE_UVADDR;
         		gdb_pass_through(buf1, NULL, 0);
 			return;
 		}
@@ -962,7 +1249,9 @@
 			close_tmpfile();
 		}
         }
-        else cmd_usage(pc->curcmd, SYNOPSIS);
+        else if (bug_bytes_entered)
+		return;
+	else cmd_usage(pc->curcmd, SYNOPSIS);
 
 	if (!reverse) {
 		FREEBUF(req->buf);
@@ -1053,6 +1342,185 @@
 	FREEBUF(req);
 }
 
+/*
+ *  x86 and x86_64 kernels may have file/line-number encoding
+ *  asm()'d in just after the "ud2a" instruction, which confuses
+ *  the disassembler and the x86 backtracer.  Determine the 
+ *  number of bytes to skip.
+ */
+static void
+BUG_bytes_init(void)
+{
+	if (machine_type("X86"))
+		kt->BUG_bytes = BUG_x86();
+	else if (machine_type("X86_64"))
+		kt->BUG_bytes = BUG_x86_64();
+}
+
+static int
+BUG_x86(void)
+{
+	struct syment *sp, *spn;
+	char buf1[BUFSIZE];
+	char buf2[BUFSIZE];
+	char *arglist[MAXARGS];
+	ulong vaddr, fileptr;
+	int found;
+
+	/*
+	 *  Prior to 2.4.19, a call to do_BUG() preceded
+	 *  the standalone ud2a instruction.
+	 */ 
+	if (THIS_KERNEL_VERSION < LINUX(2,4,19))
+		return 0;
+
+	/*
+	 *  2.6.20 introduced __bug_table support for i386, 
+	 *  but even if CONFIG_DEBUG_BUGVERBOSE is not configured,
+	 *  the ud2a stands alone.
+	 */
+	if (THIS_KERNEL_VERSION >= LINUX(2,6,20))
+		return 0;
+
+	/*
+	 *  For previous kernel versions, it may depend upon 
+	 *  whether CONFIG_DEBUG_BUGVERBOSE was configured:
+	 *
+	 *   #ifdef CONFIG_DEBUG_BUGVERBOSE
+	 *   #define BUG()                           \
+	 *    __asm__ __volatile__(  "ud2\n"         \
+	 *                           "\t.word %c0\n" \
+	 *                           "\t.long %c1\n" \
+	 *                            : : "i" (__LINE__), "i" (__FILE__))
+	 *   #else
+	 *   #define BUG() __asm__ __volatile__("ud2\n")
+	 *   #endif
+	 *
+  	 *  But that's not necessarily true, since there are
+	 *  pre-2.6.11 versions that force it like so:
+	 *
+         *   #if 1   /- Set to zero for a slightly smaller kernel -/
+         *   #define BUG()                           \
+         *    __asm__ __volatile__(  "ud2\n"         \
+         *                           "\t.word %c0\n" \
+         *                           "\t.long %c1\n" \
+         *                            : : "i" (__LINE__), "i" (__FILE__))
+         *   #else
+         *   #define BUG() __asm__ __volatile__("ud2\n")
+         *   #endif
+	 */
+
+	/*
+	 *  This works if in-kernel config data is available.
+	 */
+	if ((THIS_KERNEL_VERSION >= LINUX(2,6,11)) &&
+	    (kt->flags & BUGVERBOSE_OFF))
+		return 0;
+
+	/*
+	 *  At this point, it's a pretty safe bet that it's configured,
+	 *  but to be sure, disassemble a known BUG() caller and
+	 *  verify that the encoding is there.
+	 */
+
+#define X86_BUG_BYTES (6)  /* sizeof(short) + sizeof(pointer) */
+
+	if (!(sp = symbol_search("do_exit")) ||
+	    !(spn = next_symbol(NULL, sp)))
+		return X86_BUG_BYTES;
+
+	sprintf(buf1, "x/%ldi 0x%lx", spn->value - sp->value, sp->value);
+
+	found = FALSE;
+	open_tmpfile();
+	gdb_pass_through(buf1, pc->tmpfile, GNU_RETURN_ON_ERROR);
+	rewind(pc->tmpfile);
+	while (fgets(buf2, BUFSIZE, pc->tmpfile)) {
+		if (parse_line(buf2, arglist) < 3)
+			continue;
+
+		if ((vaddr = htol(arglist[0], RETURN_ON_ERROR, NULL)) >= spn->value)
+			continue; 
+
+		if (STREQ(arglist[2], "ud2a")) {
+			found = TRUE;
+			break;
+		}
+	}
+	close_tmpfile();
+
+        if (!found || !readmem(vaddr+4, KVADDR, &fileptr, sizeof(ulong),
+            "BUG filename pointer", RETURN_ON_ERROR|QUIET))
+		return X86_BUG_BYTES;
+
+	if (!IS_KVADDR(fileptr)) {
+		if (CRASHDEBUG(1))
+			fprintf(fp, 
+			    "no filename pointer: kt->BUG_bytes: 0\n");
+		return 0;
+	}
+
+	if (!read_string(fileptr, buf1, BUFSIZE-1))
+		error(WARNING, 
+		    "cannot read BUG (ud2a) encoded filename address: %lx\n",
+			fileptr);
+	else if (CRASHDEBUG(1))
+		fprintf(fp, "BUG bytes filename encoding: [%s]\n", buf1);
+
+	return X86_BUG_BYTES;
+}
+
+static int
+BUG_x86_64(void)
+{
+        /*
+         *  2.6.20 introduced __bug_table support for x86_64,
+         *  but even if CONFIG_DEBUG_BUGVERBOSE is not configured,
+	 *  the ud2a stands alone.
+         */
+        if (THIS_KERNEL_VERSION >= LINUX(2,6,20))
+                return 0;
+
+	/*
+	 *  The original bug_frame structure looks like this, which
+	 *  causes the disassembler to go off into the weeds:
+	 *
+	 *    struct bug_frame { 
+	 *        unsigned char ud2[2];          
+	 *        char *filename;  
+	 *        unsigned short line; 
+	 *    } 
+	 *  
+	 *  In 2.6.13, fake push and ret instructions were encoded 
+	 *  into the frame so that the disassembly would at least 
+	 *  "work", although the two fake instructions show nonsensical
+	 *  arguments:
+	 *
+	 *    struct bug_frame {
+	 *        unsigned char ud2[2];
+	 *        unsigned char push;
+	 *        signed int filename;
+	 *        unsigned char ret;
+	 *        unsigned short line;
+	 *    }
+	 */  
+
+	if (STRUCT_EXISTS("bug_frame"))
+		return (int)(STRUCT_SIZE("bug_frame") - 2);
+
+	return 0;
+}
+
+
+/*
+ *  Callback from gdb disassembly code.
+ */
+int
+kernel_BUG_encoding_bytes(void)
+{
+	return kt->BUG_bytes;
+}
+
 #ifdef NOT_USED
 /*
  *  To avoid premature stoppage/extension of a dis <function> that includes
@@ -1094,7 +1562,8 @@
 }
 
 #define FRAMESIZE_DEBUG_MESSAGE \
-"usage: bt -F [size|clear|dump|seek|noseek|validate|novalidate] [-I eip]\n  If eip:  set its associated framesize to size.\n           \"validate/novalidate\" will turn on/off V bit for this eip entry.\n  If !eip: \"clear\" will clear the framesize cache and RA seek/noseek flags.\n           \"dump\" will dump the current framesize cache entries.\n           \"seek/noseek\" turns on/off RA seeking.\n           \"validate/novalidate\" turns on/off V bit for all current entries.\n"
+"\nx86 usage: bt -F [size|clear|dump|seek|noseek|validate|novalidate] [-I eip]\n  If eip:  set its associated framesize to size.\n           \"validate/novalidate\" will turn on/off V bit for this eip entry.\n  If !eip: \"clear\" will clear the framesize cache and RA seek/noseek flags.\n           \"dump\" will dump the current framesize cache entries.\n           \"seek/noseek\" turns on/off RA seeking.\n           \"validate/novalidate\" turns on/off V bit for all current entries.\n\nx86_64 usage: bt -F [clear|dump|validate] [-I rip]\n  If rip:  \"validate\" will verbosely recalculate the framesize.\n  If !rip: \"clear\" will clear the framesize cache.\n           \"dump\" will dump the current framesize cache entries.\n"
+
 
 /*
  *  Display a kernel stack backtrace.  Arguments may be any number pid or task
@@ -1108,18 +1577,25 @@
  *     -s  displays arguments symbolically.
  */
 
+void
+clone_bt_info(struct bt_info *orig, struct bt_info *new,
+	      struct task_context *tc)
+{
+	BCOPY(orig, new, sizeof(*new));
+	new->stackbuf = NULL;
+	new->tc = tc;
+	new->task = tc->task;
+	new->stackbase = GET_STACKBASE(tc->task);
+	new->stacktop = GET_STACKTOP(tc->task);
+}
+
 #define BT_SETUP(TC)                                          \
-	BCOPY(&bt_setup, bt, sizeof(struct bt_info));         \
+	clone_bt_info(&bt_setup, bt, (TC));         	      \
         if (refptr) {                                         \
 		BZERO(&reference, sizeof(struct reference));  \
 		bt->ref = &reference;                         \
         	bt->ref->str = refptr;                        \
-	}                                                     \
-        bt->tc = (TC);                                        \
-        bt->task = ((TC)->task);			      \
-	bt->stackbase = GET_STACKBASE((TC)->task);            \
-	bt->stacktop = GET_STACKTOP((TC)->task);              \
-	bt->stackbuf = NULL;
+	}
  
 void
 cmd_bt(void)
@@ -1140,17 +1616,45 @@
 	bt = &bt_info;
 	BZERO(bt, sizeof(struct bt_info));
 
-        while ((c = getopt(argcnt, args, "fF:I:S:aloreEgstd:R:")) != EOF) {
-                switch(c)
+	if (kt->flags & USE_OLD_BT)
+		bt->flags |= BT_OLD_BACK_TRACE;
+
+        while ((c = getopt(argcnt, args, "fF:I:S:aloreEgstTd:R:O")) != EOF) {
+                switch (c)
 		{
 		case 'f':
 			bt->flags |= BT_FULL;
 			break;
 
 		case 'o':
+			if (XEN_HYPER_MODE())
+				option_not_supported(c);
 			bt->flags |= BT_OLD_BACK_TRACE;
 			break;
 
+		case 'O':
+			if (!(machine_type("X86") || machine_type("X86_64")) ||
+			    XEN_HYPER_MODE()) 
+				option_not_supported(c);
+			else if (kt->flags & USE_OLD_BT) { 
+				/* 
+				 *  Make this setting idempotent across the use of
+				 *  $HOME/.crashrc, ./.crashrc, and "-i input" files. 
+				 *  If we've been here before during initialization,
+				 *  leave it alone.
+			 	 */
+				if (pc->flags & INIT_IFILE) {
+					error(INFO, "use old bt method by default (already set)\n");
+					return;
+				}
+				kt->flags &= ~USE_OLD_BT;
+				error(INFO, "use new bt method by default\n");
+			} else {
+				kt->flags |= USE_OLD_BT;
+				error(INFO, "use old bt method by default\n");
+			}
+			return;
+
 		case 'R':
 			if (refptr) 
 				error(INFO, "only one -R option allowed\n");
@@ -1166,11 +1670,15 @@
 			break;
 
 		case 'E':
+			if (XEN_HYPER_MODE())
+				option_not_supported(c);
 			bt->flags |= BT_EFRAME_SEARCH|BT_EFRAME_SEARCH2;
 			bt->hp = &hook;
 			break;
 
 		case 'e':
+			if (XEN_HYPER_MODE())
+				option_not_supported(c);
 			bt->flags |= BT_EFRAME_SEARCH;
 			break;
 
@@ -1217,6 +1725,9 @@
 			} else if (*optarg == '-') {
 				hook.esp = dtol(optarg+1, FAULT_ON_ERROR, NULL);
 				hook.esp = (ulong)(0 - (long)hook.esp);
+			} else if (STREQ(optarg, "dwarf") || STREQ(optarg, "cfi")) {
+                        	if (!(kt->flags & DWARF_UNWIND_CAPABLE))
+					return;
 			} else
 				hook.esp = dtol(optarg, FAULT_ON_ERROR, NULL);
 			break;
@@ -1241,6 +1752,8 @@
 			bt->flags |= BT_SYMBOLIC_ARGS;
 			break;
 
+		case 'T':
+			bt->flags |= BT_TEXT_SYMBOLS_ALL;
 		case 't':
 			bt->flags |= BT_TEXT_SYMBOLS;
 			break;
@@ -1286,12 +1799,50 @@
                 return;
 	}
 
+	if (XEN_HYPER_MODE()) {
+#ifdef XEN_HYPERVISOR_ARCH
+		/* "task" means vcpu for xen hypervisor */
+		if (active) {
+			for (c = 0; c < XEN_HYPER_MAX_CPUS(); c++) {
+				if (!xen_hyper_test_pcpu_id(c))
+					continue;
+				fake_tc.task = xen_hyper_pcpu_to_active_vcpu(c);
+				BT_SETUP(&fake_tc);
+			        if (!BT_REFERENCE_CHECK(bt))
+					xen_hyper_print_bt_header(fp, fake_tc.task, 
+						subsequent++);
+				back_trace(bt);
+			}
+		} else {
+			if (args[optind]) {
+				fake_tc.task = xen_hyper_pcpu_to_active_vcpu(
+				    convert(args[optind], 0, NULL, NUM_DEC | NUM_HEX));
+			} else {
+				fake_tc.task = XEN_HYPER_VCPU_LAST_CONTEXT()->vcpu;
+			}
+			BT_SETUP(&fake_tc);
+			if (!BT_REFERENCE_CHECK(bt))
+				xen_hyper_print_bt_header(fp, fake_tc.task, 0);
+			back_trace(bt);
+		}
+		return;
+#else
+		error(FATAL, XEN_HYPERVISOR_NOT_SUPPORTED);
+#endif
+	}
+
 	if (active) {
 		if (ACTIVE())
 			error(FATAL, 
 				"-a option not supported on a live system\n");
 
 		for (c = 0; c < NR_CPUS; c++) {
+			if (setjmp(pc->foreach_loop_env)) {
+				free_all_bufs();
+				continue;
+			}
+			pc->flags |= IN_FOREACH;
+
 			if ((tc = task_to_context(tt->panic_threads[c]))) {
 				BT_SETUP(tc);
 				if (!BT_REFERENCE_CHECK(bt))
@@ -1299,6 +1850,7 @@
 				back_trace(bt);
 			}
 		}
+                pc->flags &= ~IN_FOREACH;
 
 		return;
 	}
@@ -1350,9 +1902,10 @@
 	char buf[BUFSIZE];
 
 	if (bt->flags & BT_TEXT_SYMBOLS) {
-		fprintf(fp, "%sSTART: %s at %lx\n",
-			space(VADDR_PRLEN > 8 ? 14 : 6),
-		        closest_symbol(eip), eip);
+		if (!(bt->flags & BT_TEXT_SYMBOLS_ALL))
+			fprintf(fp, "%sSTART: %s at %lx\n",
+				space(VADDR_PRLEN > 8 ? 14 : 6),
+		        	closest_symbol(eip), eip);
 	}
 
 	if (bt->hp) 
@@ -1435,6 +1988,9 @@
 		     i < LONGS_PER_STACK; i++, up++) {
 			if (is_kernel_text(*up))
 				fprintf(fp, "%lx: %s\n", 
+					tt->flags & THREAD_INFO ?
+					bt->tc->thread_info + 
+					(i * sizeof(long)) :
 					bt->task + (i * sizeof(long)),
 					value_to_symstr(*up, buf, 0));
 		}
@@ -1461,20 +2017,26 @@
 	if (bt->hp) {
 		if (bt->hp->esp && !INSTACK(bt->hp->esp, bt))
 			error(INFO, 
-			    "invalid stack address for this task: %lx\n",
-				bt->hp->esp);
+			    "invalid stack address for this task: %lx\n    (valid range: %lx - %lx)\n",
+				bt->hp->esp, bt->stackbase, bt->stacktop);
 		eip = bt->hp->eip;
 		esp = bt->hp->esp;
 
 		machdep->get_stack_frame(bt, eip ? NULL : &eip, 
 			esp ? NULL : &esp);
 	 
-        } else if (NETDUMP_DUMPFILE())
+        } else if (XEN_HYPER_MODE())
+		machdep->get_stack_frame(bt, &eip, &esp);
+	else if (NETDUMP_DUMPFILE())
                 get_netdump_regs(bt, &eip, &esp);
+	else if (KDUMP_DUMPFILE())
+                get_kdump_regs(bt, &eip, &esp);
 	else if (DISKDUMP_DUMPFILE())
                 get_diskdump_regs(bt, &eip, &esp);
         else if (LKCD_DUMPFILE())
                 get_lkcd_regs(bt, &eip, &esp);
+	else if (XENDUMP_DUMPFILE())
+		get_xendump_regs(bt, &eip, &esp);
         else
                 machdep->get_stack_frame(bt, &eip, &esp);
 
@@ -1486,6 +2048,13 @@
 	if (bt->flags & 
 	    (BT_TEXT_SYMBOLS|BT_TEXT_SYMBOLS_PRINT|BT_TEXT_SYMBOLS_NOPRINT)) {
 
+		if (bt->flags & BT_TEXT_SYMBOLS_ALL) {
+			esp = bt->stackbase + 
+				((tt->flags & THREAD_INFO) ?
+				SIZE(thread_info) : SIZE(task_struct));
+			eip = 0;
+		}
+
 		if (machdep->flags & MACHDEP_BT_TEXT) {
 			bt->instptr = eip;
 			bt->stkptr = esp;
@@ -1510,8 +2079,13 @@
         		{
         		case BT_HARDIRQ:
 				btloc.hp->eip = symbol_value("do_IRQ");
-                		btloc.hp->esp = ULONG(bt->stackbuf +
-                        	    SIZE(irq_ctx) - (sizeof(unsigned int)*2));
+				if (symbol_exists("__do_IRQ"))
+					btloc.hp->esp = ULONG(bt->stackbuf +
+					    OFFSET(thread_info_previous_esp));
+				else
+					btloc.hp->esp = ULONG(bt->stackbuf +
+					    SIZE(irq_ctx) - 
+					    (sizeof(char *)*2));
 				fprintf(fp, "--- <hard IRQ> ---\n");
                 		break;
 
@@ -1538,7 +2112,7 @@
 		BCOPY(bt, &btsave, sizeof(struct bt_info));
 
 	if (CRASHDEBUG(4))
-		dump_bt_info(bt);
+		dump_bt_info(bt, "back_trace");
 
 	machdep->back_trace(bt);
 
@@ -1546,7 +2120,15 @@
 		goto complete_trace;
 
 	if (BT_REFERENCE_FOUND(bt)) {
+#ifdef XEN_HYPERVISOR_ARCH
+		if (XEN_HYPER_MODE())
+			xen_hyper_print_bt_header(fp, bt->task, 0);
+		else
+			print_task_header(fp, task_to_context(bt->task), 0);
+#else
 		print_task_header(fp, task_to_context(bt->task), 0);
+#endif /* XEN_HYPERVISOR_ARCH */
+
 		BCOPY(&btsave, bt, sizeof(struct bt_info));
 		bt->ref = NULL;
 		machdep->back_trace(bt);
@@ -1562,6 +2144,8 @@
 restore_stack(struct bt_info *bt)
 {
 	ulonglong type;
+	struct syment *sp;
+	ulong retvaddr;
 
 	bt->instptr = bt->stkptr = 0;
 	type = 0;
@@ -1569,14 +2153,30 @@
 	switch (bt->flags & (BT_HARDIRQ|BT_SOFTIRQ)) 
 	{ 
 	case BT_HARDIRQ:
-		bt->instptr = symbol_value("do_IRQ");
-		bt->stkptr = ULONG(bt->stackbuf + 
-			SIZE(irq_ctx) - (sizeof(unsigned int)*2));
+		retvaddr = ULONG(bt->stackbuf +
+			SIZE(irq_ctx) - sizeof(char *));
+		if ((sp = value_search(retvaddr, NULL)) && 
+			STREQ(sp->name, "do_IRQ"))
+			bt->instptr = retvaddr; 
+		else
+			bt->instptr = symbol_value("do_IRQ");
+		if (symbol_exists("__do_IRQ"))
+            		bt->stkptr = ULONG(bt->stackbuf +
+                     		OFFSET(thread_info_previous_esp));
+		else
+			bt->stkptr = ULONG(bt->stackbuf + 
+				SIZE(irq_ctx) - (sizeof(char *)*2));
 		type = BT_HARDIRQ;
 		break;
 
 	case BT_SOFTIRQ:
-		bt->instptr = symbol_value("do_softirq");
+		retvaddr = ULONG(bt->stackbuf +
+			SIZE(irq_ctx) - sizeof(char *));
+		if ((sp = value_search(retvaddr, NULL)) && 
+			STREQ(sp->name, "do_softirq"))
+			bt->instptr = retvaddr; 
+		else
+			bt->instptr = symbol_value("do_softirq");
                	bt->stkptr = ULONG(bt->stackbuf +
                        	OFFSET(thread_info_previous_esp));
 		type = BT_SOFTIRQ;
@@ -1635,7 +2235,8 @@
 		if ((p1 = strstr(buf, ":"))) {
 			esp = eip = 0;
                 	*p1 = NULLCHAR;
-			if ((esp = htol(buf, RETURN_ON_ERROR, NULL)) != BADADDR)
+			if (((esp = htol(buf, RETURN_ON_ERROR, NULL)) != BADADDR)
+			    && INSTACK(esp, bt))
                                 eip = GET_STACK_ULONG(esp);
 			if (esp && eip) {
 				hooks[cnt].esp = esp;
@@ -1660,12 +2261,14 @@
  *  Debug routine most likely useful from above in back_trace()
  */
 void
-dump_bt_info(struct bt_info *bt)
+dump_bt_info(struct bt_info *bt, char *where)
 {
+	fprintf(fp, "[%lx] %s:\n", (ulong)bt, where);
 	fprintf(fp, "        task: %lx\n", bt->task);
 	fprintf(fp, "       flags: %llx\n", bt->flags);
 	fprintf(fp, "     instptr: %lx\n", bt->instptr);
 	fprintf(fp, "      stkptr: %lx\n", bt->stkptr);
+	fprintf(fp, "        bptr: %lx\n", bt->bptr);
 	fprintf(fp, "   stackbase: %lx\n", bt->stackbase);
 	fprintf(fp, "    stacktop: %lx\n", bt->stacktop);
 	fprintf(fp, "          tc: %lx ", (ulong)bt->tc);
@@ -1700,6 +2303,11 @@
 		return;
 	}
 
+	/* try to get it from the header */
+	if (get_lkcd_regs_for_cpu(bt, eip, esp) == 0)
+		return;
+
+	/* if that fails: do guessing */
 	sysrq_eip = sysrq_esp = 0;
 
 	for (i = 0, up = (ulong *)bt->stackbuf; i < LONGS_PER_STACK; i++, up++){
@@ -1721,6 +2329,25 @@
                         *esp = *(up-1);
                         return;
                 }
+		/* Egenera */
+                if (STREQ(sym, "netdump_ipi")) {
+                        *eip = *up;
+                        *esp = bt->task + 
+				((char *)(up-1) - bt->stackbuf);
+                        return;
+                }
+		if (STREQ(sym, "dump_execute")) {
+                        *eip = *up;
+                        *esp = bt->stackbase + 
+				((char *)(up) - bt->stackbuf);
+                        return;
+		}
+		if (STREQ(sym, "vmdump_nmi_callback")) {
+                        *eip = *up;
+                        *esp = bt->stackbase + 
+				((char *)(up) - bt->stackbuf);
+                        return;
+		}
                 if (STREQ(sym, "smp_stop_cpu_interrupt")) {
                         *eip = *up;
                         *esp = bt->task + 
@@ -1837,8 +2464,8 @@
 					return;
 				}
 
-				if (IS_VMALLOC_ADDR(list.next) &&
-				    IS_VMALLOC_ADDR(list.prev)) {
+				if (IS_VMALLOC_ADDR((ulong)list.next) &&
+				    IS_VMALLOC_ADDR((ulong)list.prev)) {
 					kt->kernel_module = sp->value;
 					kt->module_list = (ulong)list.next;
 					modules_found = TRUE;
@@ -1873,14 +2500,17 @@
 	kallsymsbuf = kt->flags & KALLSYMS_V1 ?
 		GETBUF(SIZE(kallsyms_header)) : NULL;
 
+	please_wait("gathering module symbol data");
+
         for (mod = kt->module_list; mod != kt->kernel_module; mod = mod_next) {
-		if (CRASHDEBUG(7))
+		if (CRASHDEBUG(3))
 			fprintf(fp, "module: %lx\n", mod);
 
                 if (!readmem(mod, KVADDR, modbuf, SIZE(module), 
 		    "module struct", RETURN_ON_ERROR|QUIET)) {
                         error(WARNING,
-                            "cannot access vmalloc'd module memory\n\n");
+                            "%scannot access vmalloc'd module memory\n\n",
+				DUMPFILE() ? "\n" : "");
                         kt->mods_installed = 0;
                         kt->flags |= NO_MODULE_ACCESS;
                         FREEBUF(modbuf); 
@@ -1914,7 +2544,8 @@
 				    kallsymsbuf, SIZE(kallsyms_header), 
 				    "kallsyms_header", RETURN_ON_ERROR|QUIET)) {
 	                        	error(WARNING,
-                                      "cannot access module kallsyms_header\n");
+                                      "%scannot access module kallsyms_header\n",
+					    DUMPFILE() ? "\n" : "");
 				} else {
 					nsyms = UINT(kallsymsbuf +
 				 	    OFFSET(kallsyms_header_symbols));
@@ -1924,7 +2555,10 @@
 			break;
 
 		case KALLSYMS_V2:
-			numksyms = ULONG(modbuf + OFFSET(module_num_symtab));
+			if (THIS_KERNEL_VERSION >= LINUX(2,6,27))
+				numksyms = UINT(modbuf + OFFSET(module_num_symtab));
+			else
+				numksyms = ULONG(modbuf + OFFSET(module_num_symtab));
 			total += numksyms; 
 			break;
 		}
@@ -1947,6 +2581,8 @@
 		store_module_symbols_v2(total, kt->mods_installed);
 		break;
 	}
+
+	please_wait_done();
 }
 
 
@@ -2035,8 +2671,12 @@
 				case KMOD_V2:
         				module_name = modbuf + 
 						OFFSET(module_name);
-					mod_size = LONG(modbuf + 
-						OFFSET(module_core_size));
+					if (THIS_KERNEL_VERSION >= LINUX(2,6,27))
+						mod_size = UINT(modbuf +
+							OFFSET(module_core_size));
+					else
+						mod_size = ULONG(modbuf +
+							OFFSET(module_core_size));
                 			if (strlen(module_name) < MAX_MOD_NAME)
                         			strcpy(buf, module_name);
                 			else 
@@ -2112,7 +2752,7 @@
 	address = 0;
 	flag = LIST_MODULE_HDR;
 
-        while ((c = getopt(argcnt, args, "rd:Ds:St:")) != EOF) {
+        while ((c = getopt(argcnt, args, "rd:Ds:St:o")) != EOF) {
                 switch(c)
 		{
                 case 'r':
@@ -2145,6 +2785,19 @@
                                 cmd_usage(pc->curcmd, SYNOPSIS);
                         break;
 
+                /*
+                 *  Revert to using old-style add-symbol-file command
+		 *  for KMOD_V2 kernels.
+                 */
+                case 'o':
+			if (flag) 
+				cmd_usage(pc->curcmd, SYNOPSIS);
+			if (kt->flags & KMOD_V1)
+				error(INFO, 
+				    "-o option is not applicable to this kernel version\n");
+                        st->flags |= USE_OLD_ADD_SYM;
+			return;
+
 		case 't':
 			if (is_directory(optarg))
 				tree = optarg;
@@ -2459,7 +3112,7 @@
 
 
 static char *
-find_module_objfile(char *modref, char *filename, char *tree)
+module_objfile_search(char *modref, char *filename, char *tree)
 {
 	char buf[BUFSIZE];
 	char file[BUFSIZE];
@@ -2477,16 +3130,20 @@
 		strcpy(file, filename);
 #ifdef MODULES_IN_CWD
        else {
-                sprintf(file, "%s.o", modref);
-                if (access(file, R_OK) == 0) {
-                        retbuf = GETBUF(strlen(file)+1);
-                        strcpy(retbuf, file);
-                        if (CRASHDEBUG(1))
-                            	fprintf(fp, 
-				    "find_module_objfile: [%s] file in cwd\n",
-                                	retbuf);
-                        return retbuf;
-                }
+		char *fileext[] = { "ko", "o"};
+		int i;
+		for (i = 0; i < 2; i++) {
+			sprintf(file, "%s.%s", modref, fileext[i]);
+			if (access(file, R_OK) == 0) {
+				retbuf = GETBUF(strlen(file)+1);
+				strcpy(retbuf, file);
+				if (CRASHDEBUG(1))
+					fprintf(fp, 
+					    "find_module_objfile: [%s] file in cwd\n",
+						retbuf);
+				return retbuf;
+			}
+		}
 	}
 #else
 	else 
@@ -2505,6 +3162,8 @@
 	if ((st->flags & INSMOD_BUILTIN) && !filename) {
 		sprintf(buf, "__insmod_%s_O/", modref);
 		if (symbol_query(buf, NULL, &sp) == 1) {
+                        if (CRASHDEBUG(1))
+                                fprintf(fp, "search: INSMOD_BUILTIN %s\n", sp->name);
 			BZERO(buf, BUFSIZE);
 			p1 = strstr(sp->name, "/");
 			if ((p2 = strstr(sp->name, file)))
@@ -2578,7 +3237,7 @@
 	retbuf = search_directory_tree(dir, file);
 
 	if (!retbuf) {
-		sprintf(dir, "/lib/modules/%s", kt->utsname.release);
+		sprintf(dir, "/lib/modules/%s/updates", kt->utsname.release);
 		if (!(retbuf = search_directory_tree(dir, file))) {
 			switch (kt->flags & (KMOD_V1|KMOD_V2))
 			{
@@ -2589,13 +3248,68 @@
 		}
 	}
 
-	return retbuf;
-}
-
-
-/*
- *  Unlink any temporary remote module object files.
- */
+	if (!retbuf) {
+		sprintf(dir, "/lib/modules/%s", kt->utsname.release);
+		if (!(retbuf = search_directory_tree(dir, file))) {
+			switch (kt->flags & (KMOD_V1|KMOD_V2))
+			{
+			case KMOD_V2:
+				sprintf(file, "%s.ko", modref);
+				retbuf = search_directory_tree(dir, file);
+			}
+		}
+	}
+
+	return retbuf;
+}
+
+/*
+ *  First look for a module based upon its reference name.
+ *  If that fails, try replacing any underscores in the
+ *  reference name with a dash.  
+ *  If that fails, because of intermingled dashes and underscores, 
+ *  try a regex expression.
+ *
+ *  Example: module name "dm_mod" comes from "dm-mod.ko" objfile
+ *           module name "dm_region_hash" comes from "dm-region_hash.ko" objfile
+ */
+static char *
+find_module_objfile(char *modref, char *filename, char *tree)
+{
+	char * retbuf;
+	char tmpref[BUFSIZE];
+	int i, c;
+
+	retbuf = module_objfile_search(modref, filename, tree);
+
+	if (!retbuf) {
+		strncpy(tmpref, modref, BUFSIZE);
+		for (c = 0; c < BUFSIZE && tmpref[c]; c++)
+			if (tmpref[c] == '_')
+				tmpref[c] = '-';
+		retbuf = module_objfile_search(tmpref, filename, tree);
+	}
+
+	if (!retbuf && (count_chars(modref, '_') > 1)) {
+		for (i = c = 0; modref[i]; i++) {
+			if (modref[i] == '_') {
+				tmpref[c++] = '[';
+				tmpref[c++] = '_';
+				tmpref[c++] = '-';
+				tmpref[c++] = ']';
+			} else
+				tmpref[c++] = modref[i];
+		} 
+		tmpref[c] = NULLCHAR;
+		retbuf = module_objfile_search(tmpref, filename, tree);
+	}
+
+	return retbuf;
+}
+
+/*
+ *  Unlink any temporary remote module object files.
+ */
 void
 unlink_module(struct load_module *load_module)
 {
@@ -2651,7 +3365,7 @@
 dump_log(int msg_level)
 {
 	int i;
-	ulong log_buf, log_start, logged_chars;
+	ulong log_buf, logged_chars;
 	char *buf;
 	char last;
 	ulong index;
@@ -2678,13 +3392,16 @@
 
 	buf = GETBUF(log_buf_len);
 	log_wrap = FALSE;
-	get_symbol_data("log_start", sizeof(ulong), &log_start);
 	get_symbol_data("logged_chars", sizeof(ulong), &logged_chars);
         readmem(log_buf, KVADDR, buf,
         	log_buf_len, "log_buf contents", FAULT_ON_ERROR);
 
-	log_start &= log_buf_len-1;
-	index = (logged_chars < log_buf_len) ? 0 : log_start;
+	if (logged_chars < log_buf_len) {
+		index = 0;
+	} else {
+		get_symbol_data("log_end", sizeof(ulong), &index);
+		index &= log_buf_len-1;
+	} 
 
 	if ((logged_chars < log_buf_len) && (index == 0) && (buf[index] == '<'))
 		loglevel = TRUE;
@@ -2787,6 +3504,8 @@
         do {
                 if (sflag)
                         dump_sys_call_table(args[optind], cnt++);
+		else if (STREQ(args[optind], "config"))
+			read_in_kernel_config(IKCFG_READ);
                 else
                         cmd_usage(args[optind], COMPLETE_HELP);
                 optind++;
@@ -2867,6 +3586,9 @@
 		if (NETDUMP_DUMPFILE() && is_partial_netdump())
 			fprintf(fp, "  [PARTIAL DUMP]");
 
+		if (DISKDUMP_DUMPFILE() && is_partial_diskdump())
+			fprintf(fp, "  [PARTIAL DUMP]");
+
 		fprintf(fp, "\n");
 	}
 	
@@ -2876,7 +3598,7 @@
         	get_symbol_data("xtime", sizeof(struct timespec), &kt->date);
         fprintf(fp, "        DATE: %s\n", 
 		strip_linefeeds(ctime(&kt->date.tv_sec))); 
-        fprintf(fp, "      UPTIME: %s\n", get_uptime(buf)); 
+        fprintf(fp, "      UPTIME: %s\n", get_uptime(buf, NULL)); 
         fprintf(fp, "LOAD AVERAGE: %s\n", get_loadavg(buf)); 
 	fprintf(fp, "       TASKS: %ld\n", RUNNING_TASKS());
 	fprintf(fp, "    NODENAME: %s\n", uts->nodename); 
@@ -2891,10 +3613,17 @@
 #ifdef WHO_CARES
 	fprintf(fp, "  DOMAINNAME: %s\n", uts->domainname);
 #endif
+	if (XENDUMP_DUMPFILE() && (kt->xen_flags & XEN_SUSPEND))
+		return;
+
 	if (DUMPFILE()) {
 		fprintf(fp, "       PANIC: ");
 		if (machdep->flags & HWRESET)
-			fprintf(fp, "HARDWARE RESET\n");
+			fprintf(fp, "(HARDWARE RESET)\n");
+		else if (machdep->flags & INIT)
+			fprintf(fp, "(INIT)\n");
+		else if (machdep->flags & MCA)
+			fprintf(fp, "(MCA)\n");
 		else {
         		strip_linefeeds(get_panicmsg(buf));
 			fprintf(fp, "\"%s\"%s\n", buf, 
@@ -2952,28 +3681,42 @@
 /*
  *  Calculate and return the uptime.
  */
-
-static char *
-get_uptime(char *buf)
+char *
+get_uptime(char *buf, ulonglong *j64p)
 {
-	ulong jiffies; 
-
-	get_symbol_data("jiffies", sizeof(long), &jiffies);
+	ulong jiffies, tmp1, tmp2;
+	ulonglong jiffies_64, wrapped;
 
-	if ((machine_type("S390") || machine_type("S390X")) &&
-	    (THIS_KERNEL_VERSION >= LINUX(2,6,0))) 
-		jiffies -= ((unsigned long)(unsigned int)(-300*machdep->hz));
-	else if (symbol_exists("jiffies_64") && BITS64() && 
-		(((ulonglong)jiffies & 0xffffffff00000000ULL) == 
-		0x100000000ULL))
-		jiffies &= 0xffffffff;
-
-	convert_time((ulonglong)jiffies, buf);
+	if (symbol_exists("jiffies_64")) {
+		get_symbol_data("jiffies_64", sizeof(ulonglong), &jiffies_64);
+		if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) {
+			wrapped = (jiffies_64 & 0xffffffff00000000ULL);
+			if (wrapped) {
+				wrapped -= 0x100000000ULL;
+				jiffies_64 &= 0x00000000ffffffffULL;
+				jiffies_64 |= wrapped;
+                		jiffies_64 += (ulonglong)(300*machdep->hz);
+			} else {
+				tmp1 = (ulong)(uint)(-300*machdep->hz);
+				tmp2 = (ulong)jiffies_64;
+				jiffies_64 = (ulonglong)(tmp2 - tmp1);
+			}
+		}
+		if (buf)
+			convert_time(jiffies_64, buf);
+		if (j64p)
+			*j64p = jiffies_64;
+	} else {
+		get_symbol_data("jiffies", sizeof(long), &jiffies);
+		if (buf)
+			convert_time((ulonglong)jiffies, buf);
+		if (j64p)
+			*j64p = (ulonglong)jiffies;
+	}
 
 	return buf;
 }
 
-
 #define FSHIFT          11              /* nr of bits of precision */
 #define FIXED_1 (1<<FSHIFT)
 #define LOAD_INT(x) ((x) >> FSHIFT)
@@ -3048,9 +3791,9 @@
 	struct syment *sp, *spn;
         long size;
 #ifdef S390X
-	unsigned int *sct, *sys_call_table, addr;
+	unsigned int *sct, *sys_call_table, sys_ni_syscall, addr;
 #else
-	ulong *sys_call_table, *sct, addr;
+	ulong *sys_call_table, *sct, sys_ni_syscall, addr;
 #endif
 	if (GDB_PATCHED())
 		error(INFO, "line numbers are not available\n"); 
@@ -3068,6 +3811,8 @@
         readmem(symbol_value("sys_call_table"), KVADDR, sys_call_table,
                 size, "sys_call_table", FAULT_ON_ERROR);
 
+	sys_ni_syscall = symbol_value("sys_ni_syscall");
+
 	if (spec)
 		open_tmpfile();
 
@@ -3080,13 +3825,17 @@
 					"%3x  " : "%3d  ", i);
 				fprintf(fp, 
 			    	    "invalid sys_call_table entry: %lx (%s)\n", 
-					*sct, value_to_symstr(*sct, buf1, 0));
+					(unsigned long)*sct,
+					value_to_symstr(*sct, buf1, 0));
 			}
 			continue;
 		}
 		
 		fprintf(fp, (output_radix == 16) ? "%3x  " : "%3d  ", i);
-		fprintf(fp, "%-26s ", scp);
+  		if (sys_ni_syscall && *sct == sys_ni_syscall)
+			fprintf(fp, "%-26s ", "sys_ni_syscall");
+		else
+			fprintf(fp, "%-26s ", scp);
 
 		/*
 		 *  For system call symbols whose first instruction is
@@ -3181,16 +3930,16 @@
  *  "help -k" output
  */
 void
-dump_kernel_table(void)
+dump_kernel_table(int verbose)
 {
-	int i;
+	int i, j, more, nr_cpus;
         struct new_utsname *uts;
         int others;
 
         others = 0;
         uts = &kt->utsname;
 
-        fprintf(fp, "         flags: %lx  (", kt->flags);
+        fprintf(fp, "         flags: %lx\n  (", kt->flags);
 	if (kt->flags & NO_MODULE_ACCESS)
 		fprintf(fp, "%sNO_MODULE_ACCESS", others++ ? "|" : "");
 	if (kt->flags & TVEC_BASES_V1)
@@ -3225,6 +3974,32 @@
 		fprintf(fp, "%sKMOD_V2", others++ ? "|" : "");
 	if (kt->flags & KALLSYMS_V2)
 		fprintf(fp, "%sKALLSYMS_V2", others++ ? "|" : "");
+	if (kt->flags & USE_OLD_BT)
+		fprintf(fp, "%sUSE_OLD_BT", others++ ? "|" : "");
+	if (kt->flags & ARCH_XEN)
+		fprintf(fp, "%sARCH_XEN", others++ ? "|" : "");
+	if (kt->flags & ARCH_OPENVZ)
+		fprintf(fp, "%sARCH_OPENVZ", others++ ? "|" : "");
+	if (kt->flags & ARCH_PVOPS)
+		fprintf(fp, "%sARCH_PVOPS", others++ ? "|" : "");
+	if (kt->flags & NO_IKCONFIG)
+		fprintf(fp, "%sNO_IKCONFIG", others++ ? "|" : "");
+	if (kt->flags & DWARF_UNWIND)
+		fprintf(fp, "%sDWARF_UNWIND", others++ ? "|" : "");
+	if (kt->flags & NO_DWARF_UNWIND)
+		fprintf(fp, "%sNO_DWARF_UNWIND", others++ ? "|" : "");
+	if (kt->flags & DWARF_UNWIND_MEMORY)
+		fprintf(fp, "%sDWARF_UNWIND_MEMORY", others++ ? "|" : "");
+	if (kt->flags & DWARF_UNWIND_EH_FRAME)
+		fprintf(fp, "%sDWARF_UNWIND_EH_FRAME", others++ ? "|" : "");
+	if (kt->flags & DWARF_UNWIND_MODULES)
+		fprintf(fp, "%sDWARF_UNWIND_MODULES", others++ ? "|" : "");
+	if (kt->flags & BUGVERBOSE_OFF)
+		fprintf(fp, "%sBUGVERBOSE_OFF", others++ ? "|" : "");
+	if (kt->flags & RELOC_SET)
+		fprintf(fp, "%sRELOC_SET", others++ ? "|" : "");
+	if (kt->flags & RELOC_FORCE)
+		fprintf(fp, "%sRELOC_FORCE", others++ ? "|" : "");
 	fprintf(fp, ")\n");
         fprintf(fp, "         stext: %lx\n", kt->stext);
         fprintf(fp, "         etext: %lx\n", kt->etext);
@@ -3234,8 +4009,10 @@
         fprintf(fp, "      init_end: %lx\n", kt->init_end);
         fprintf(fp, "           end: %lx\n", kt->end);
         fprintf(fp, "          cpus: %d\n", kt->cpus);
+        fprintf(fp, " cpus_override: %s\n", kt->cpus_override);
         fprintf(fp, "       NR_CPUS: %d (compiled-in to this version of %s)\n",
 		NR_CPUS, pc->program_name); 
+	fprintf(fp, "kernel_NR_CPUS: %d\n", kt->kernel_NR_CPUS);
 	if (kt->display_bh == display_bh_1)
         	fprintf(fp, "    display_bh: display_bh_1()\n");
 	else if (kt->display_bh == display_bh_2)
@@ -3263,21 +4040,141 @@
 		kt->kernel_version[1], kt->kernel_version[2]);
 	fprintf(fp, "   gcc_version: %d.%d.%d\n", kt->gcc_version[0], 
 		kt->gcc_version[1], kt->gcc_version[2]);
+	fprintf(fp, "     BUG_bytes: %d\n", kt->BUG_bytes);
+	fprintf(fp, "      relocate: %lx\n", kt->relocate);
 	fprintf(fp, " runq_siblings: %d\n", kt->runq_siblings);
 	fprintf(fp, "  __rq_idx[NR_CPUS]: ");
-	for (i = 0; i < NR_CPUS; i++) 
+	nr_cpus = kt->kernel_NR_CPUS ? kt->kernel_NR_CPUS : NR_CPUS;
+	for (i = 0; i < nr_cpus; i++) {
+		if (!(kt->__rq_idx)) {
+			fprintf(fp, "(unused)");
+			break;
+		}
 		fprintf(fp, "%ld ", kt->__rq_idx[i]);
+		for (j = i, more = FALSE; j < nr_cpus; j++) {
+			if (kt->__rq_idx[j])
+				more = TRUE;
+		}
+		if (!more) {
+			fprintf(fp, "...");
+			break;
+		}
+	}
 	fprintf(fp, "\n __cpu_idx[NR_CPUS]: ");
-	for (i = 0; i < NR_CPUS; i++) 
+	for (i = 0; i < nr_cpus; i++) {
+		if (!(kt->__cpu_idx)) {
+			fprintf(fp, "(unused)");
+			break;
+		}
 		fprintf(fp, "%ld ", kt->__cpu_idx[i]);
+		for (j = i, more = FALSE; j < nr_cpus; j++) {
+			if (kt->__cpu_idx[j])
+				more = TRUE;
+		}
+		if (!more) {
+			fprintf(fp, "...");
+			break;
+		}
+	}
 	fprintf(fp, "\n __per_cpu_offset[NR_CPUS]:");
-	for (i = 0; i < NR_CPUS; i++) 
+	for (i = 0; i < nr_cpus; i++) {
 		fprintf(fp, "%s%.*lx ", (i % 4) == 0 ? "\n    " : "",
 			LONG_PRLEN, kt->__per_cpu_offset[i]);
-	fprintf(fp, "\n cpu_flags[NR_CPUS]:");
-	for (i = 0; i < NR_CPUS; i++) 
+		if ((i % 4) == 0) {
+			for (j = i, more = FALSE; j < nr_cpus; j++) {
+				if (kt->__per_cpu_offset[j])
+					more = TRUE;
+			}
+		}
+		if (!more) {
+			fprintf(fp, "...");
+			break;
+		}
+
+	}
+	fprintf(fp, "\n cpu_flags[NR_CPUS]: ");
+	for (i = 0; i < nr_cpus; i++) {
+		if (!(kt->cpu_flags)) {
+			fprintf(fp, "(unused)\n");
+			goto no_cpu_flags;
+		}
 		fprintf(fp, "%lx ", kt->cpu_flags[i]);
+		for (j = i, more = FALSE; j < nr_cpus; j++) {
+			if (kt->cpu_flags[j])
+				more = TRUE;
+		}
+		if (!more) {
+			fprintf(fp, "...");
+			break;
+		}
+	}
 	fprintf(fp, "\n");
+	fprintf(fp, "       cpu_possible_map: ");
+	if (kernel_symbol_exists("cpu_possible_map")) {
+		for (i = 0; i < nr_cpus; i++) {
+			if (kt->cpu_flags[i] & POSSIBLE)
+				fprintf(fp, "%d ", i);
+		}
+		fprintf(fp, "\n");
+	} else
+		fprintf(fp, "(does not exist)\n");
+	fprintf(fp, "        cpu_present_map: ");
+	if (kernel_symbol_exists("cpu_present_map")) {
+		for (i = 0; i < nr_cpus; i++) {
+			if (kt->cpu_flags[i] & PRESENT)
+				fprintf(fp, "%d ", i);
+		}
+		fprintf(fp, "\n");
+	} else
+		fprintf(fp, "(does not exist)\n");
+	fprintf(fp, "         cpu_online_map: ");
+	if (kernel_symbol_exists("cpu_online_map")) {
+		for (i = 0; i < nr_cpus; i++) {
+			if (kt->cpu_flags[i] & ONLINE)
+				fprintf(fp, "%d ", i);
+		}
+		fprintf(fp, "\n");
+	} else
+		fprintf(fp, "(does not exist)\n");
+no_cpu_flags:
+	others = 0;
+	fprintf(fp, "     xen_flags: %lx (", kt->xen_flags);
+        if (kt->xen_flags & WRITABLE_PAGE_TABLES)
+                fprintf(fp, "%sWRITABLE_PAGE_TABLES", others++ ? "|" : "");
+        if (kt->xen_flags & SHADOW_PAGE_TABLES)
+                fprintf(fp, "%sSHADOW_PAGE_TABLES", others++ ? "|" : "");
+        if (kt->xen_flags & CANONICAL_PAGE_TABLES)
+                fprintf(fp, "%sCANONICAL_PAGE_TABLES", others++ ? "|" : "");
+        if (kt->xen_flags & XEN_SUSPEND)
+                fprintf(fp, "%sXEN_SUSPEND", others++ ? "|" : "");
+	fprintf(fp, ")\n");
+	fprintf(fp, "               m2p_page: %lx\n", (ulong)kt->m2p_page);
+        fprintf(fp, "phys_to_machine_mapping: %lx\n", kt->phys_to_machine_mapping);
+        fprintf(fp, "         p2m_table_size: %ld\n", kt->p2m_table_size);
+	fprintf(fp, " p2m_mapping_cache[%d]: %s\n", P2M_MAPPING_CACHE,
+		 verbose ? "" : "(use \"help -K\" to view cache contents)");
+	for (i = 0; verbose && (i < P2M_MAPPING_CACHE); i++) {
+		if (!kt->p2m_mapping_cache[i].mapping)
+			continue;
+		fprintf(fp, "       [%d] mapping: %lx start: %lx end: %lx (%ld mfns)\n",
+			i, kt->p2m_mapping_cache[i].mapping,
+			kt->p2m_mapping_cache[i].start,
+			kt->p2m_mapping_cache[i].end,
+			kt->p2m_mapping_cache[i].end -  kt->p2m_mapping_cache[i].start + 1);
+        }
+	fprintf(fp, "      last_mapping_read: %lx\n", kt->last_mapping_read);
+	fprintf(fp, "        p2m_cache_index: %ld\n", kt->p2m_cache_index);
+	fprintf(fp, "     p2m_pages_searched: %ld\n", kt->p2m_pages_searched);
+	fprintf(fp, "     p2m_mfn_cache_hits: %ld ", kt->p2m_mfn_cache_hits);
+	if (kt->p2m_pages_searched)
+		fprintf(fp, "(%ld%%)\n", kt->p2m_mfn_cache_hits * 100 / kt->p2m_pages_searched);
+	else
+		fprintf(fp, "\n");
+	fprintf(fp, "    p2m_page_cache_hits: %ld ", kt->p2m_page_cache_hits);
+	if (kt->p2m_pages_searched)
+		fprintf(fp, "(%ld%%)\n", kt->p2m_page_cache_hits * 100 / kt->p2m_pages_searched);
+	else
+		fprintf(fp, "\n");
 }
 
 /*
@@ -3314,7 +4211,7 @@
 	if (machine_type("S390") || machine_type("S390X"))
 		command_not_supported();
 
-        while ((c = getopt(argcnt, args, "db")) != EOF) {
+        while ((c = getopt(argcnt, args, "dbu")) != EOF) {
                 switch(c)
                 {
 		case 'd':
@@ -3344,6 +4241,17 @@
 			kt->display_bh();
 			return;
 
+		case 'u':
+			pc->curcmd_flags |= IRQ_IN_USE;
+			if (kernel_symbol_exists("no_irq_chip"))
+				pc->curcmd_private = (ulonglong)symbol_value("no_irq_chip");
+			else if (kernel_symbol_exists("no_irq_type"))
+				pc->curcmd_private = (ulonglong)symbol_value("no_irq_type");
+			else
+				error(WARNING, 
+       "irq: -u option ignored: \"no_irq_chip\" or \"no_irq_type\" symbols do not exist\n");
+			break;
+
                 default:
                         argerrs++;
                         break;
@@ -3362,6 +4270,8 @@
 		return;
 	}
 
+	pc->curcmd_flags &= ~IRQ_IN_USE;
+
 	while (args[optind]) {
 		i = dtoi(args[optind], FAULT_ON_ERROR, NULL);
 		if (i >= nr_irqs)
@@ -3402,13 +4312,22 @@
 
         readmem(irq_desc_addr + OFFSET(irq_desc_t_status), KVADDR, &status,
                 sizeof(int), "irq_desc entry", FAULT_ON_ERROR);
-        readmem(irq_desc_addr + OFFSET(irq_desc_t_handler), KVADDR, &handler,
-                sizeof(long), "irq_desc entry", FAULT_ON_ERROR);
+	if (VALID_MEMBER(irq_desc_t_handler))
+	        readmem(irq_desc_addr + OFFSET(irq_desc_t_handler), KVADDR,
+        	        &handler, sizeof(long), "irq_desc entry",
+			FAULT_ON_ERROR);
+	else if (VALID_MEMBER(irq_desc_t_chip))
+	        readmem(irq_desc_addr + OFFSET(irq_desc_t_chip), KVADDR,
+        	        &handler, sizeof(long), "irq_desc entry",
+			FAULT_ON_ERROR);
         readmem(irq_desc_addr + OFFSET(irq_desc_t_action), KVADDR, &action,
                 sizeof(long), "irq_desc entry", FAULT_ON_ERROR);
         readmem(irq_desc_addr + OFFSET(irq_desc_t_depth), KVADDR, &depth,
                 sizeof(int), "irq_desc entry", FAULT_ON_ERROR);
 
+	if (!action && (handler == (ulong)pc->curcmd_private))
+		return;
+
 	fprintf(fp, "    IRQ: %d\n", irq);
 	fprintf(fp, " STATUS: %x %s", status, status ? "(" : "");
 	others = 0;
@@ -3441,19 +4360,30 @@
 	} else
 		fprintf(fp, "%lx\n", handler);
 
-	if (handler) { 
-        	readmem(handler+OFFSET(hw_interrupt_type_typename), KVADDR, 
-			&tmp1, sizeof(void *),
-                	"hw_interrupt_type typename", FAULT_ON_ERROR);
+	if (handler) {
+		if (VALID_MEMBER(hw_interrupt_type_typename))
+	        	readmem(handler+OFFSET(hw_interrupt_type_typename),
+				KVADDR,	&tmp1, sizeof(void *),
+        	        	"hw_interrupt_type typename", FAULT_ON_ERROR);
+		else if (VALID_MEMBER(irq_chip_typename))
+	        	readmem(handler+OFFSET(irq_chip_typename),
+				KVADDR,	&tmp1, sizeof(void *),
+                		"hw_interrupt_type typename", FAULT_ON_ERROR);
+
 	 	fprintf(fp, "         typename: %lx  ", tmp1);
 		BZERO(buf, BUFSIZE);
         	if (read_string(tmp1, buf, BUFSIZE-1))
 			fprintf(fp, "\"%s\"", buf);
 		fprintf(fp, "\n");
 
-		readmem(handler+OFFSET(hw_interrupt_type_startup), KVADDR,
-			&tmp1, sizeof(void *),
-			"hw_interrupt_type startup", FAULT_ON_ERROR);
+		if (VALID_MEMBER(hw_interrupt_type_startup))
+			readmem(handler+OFFSET(hw_interrupt_type_startup),
+				KVADDR,	&tmp1, sizeof(void *),
+				"hw_interrupt_type startup", FAULT_ON_ERROR);
+		else if (VALID_MEMBER(irq_chip_startup))
+			readmem(handler+OFFSET(irq_chip_startup),
+				KVADDR,	&tmp1, sizeof(void *),
+				"hw_interrupt_type startup", FAULT_ON_ERROR);
 		fprintf(fp, "          startup: %lx  ", tmp1); 
 		if (is_kernel_text(tmp1)) 
 			fprintf(fp, "<%s>", value_to_symstr(tmp1, buf, 0));
@@ -3464,9 +4394,15 @@
                                 	value_to_symstr(tmp2, buf, 0));
 		fprintf(fp, "\n");
 
-                readmem(handler+OFFSET(hw_interrupt_type_shutdown), KVADDR,
-                        &tmp1, sizeof(void *),
-                        "hw_interrupt_type shutdown", FAULT_ON_ERROR);
+		if (VALID_MEMBER(hw_interrupt_type_shutdown))
+	                readmem(handler+OFFSET(hw_interrupt_type_shutdown),
+				KVADDR, &tmp1, sizeof(void *),
+	                        "hw_interrupt_type shutdown", FAULT_ON_ERROR);
+		else if (VALID_MEMBER(irq_chip_shutdown))
+	                readmem(handler+OFFSET(irq_chip_shutdown),
+				KVADDR, &tmp1, sizeof(void *),
+	                        "hw_interrupt_type shutdown", FAULT_ON_ERROR);
+
                 fprintf(fp, "         shutdown: %lx  ", tmp1);
                 if (is_kernel_text(tmp1))
                         fprintf(fp, "<%s>", value_to_symstr(tmp1, buf, 0));
@@ -3494,9 +4430,14 @@
 	                fprintf(fp, "\n");
 		}
 
-                readmem(handler+OFFSET(hw_interrupt_type_enable), KVADDR,
-                        &tmp1, sizeof(void *),
-                        "hw_interrupt_type enable", FAULT_ON_ERROR);
+		if (VALID_MEMBER(hw_interrupt_type_enable))
+	                readmem(handler+OFFSET(hw_interrupt_type_enable),
+				KVADDR, &tmp1, sizeof(void *),
+	                        "hw_interrupt_type enable", FAULT_ON_ERROR);
+		else if (VALID_MEMBER(irq_chip_enable))
+	                readmem(handler+OFFSET(irq_chip_enable),
+				KVADDR, &tmp1, sizeof(void *),
+	                        "hw_interrupt_type enable", FAULT_ON_ERROR);
                 fprintf(fp, "           enable: %lx  ", tmp1);
                 if (is_kernel_text(tmp1))
                         fprintf(fp, "<%s>", value_to_symstr(tmp1, buf, 0));
@@ -3507,9 +4448,14 @@
                                         value_to_symstr(tmp2, buf, 0));
                 fprintf(fp, "\n");
 
-                readmem(handler+OFFSET(hw_interrupt_type_disable), KVADDR,
-                        &tmp1, sizeof(void *),
-                        "hw_interrupt_type disable", FAULT_ON_ERROR);
+		if (VALID_MEMBER(hw_interrupt_type_disable))
+	                readmem(handler+OFFSET(hw_interrupt_type_disable),
+				KVADDR, &tmp1, sizeof(void *),
+	                        "hw_interrupt_type disable", FAULT_ON_ERROR);
+		else if (VALID_MEMBER(irq_chip_disable))
+	                readmem(handler+OFFSET(irq_chip_disable),
+				KVADDR, &tmp1, sizeof(void *),
+	                        "hw_interrupt_type disable", FAULT_ON_ERROR);
                 fprintf(fp, "          disable: %lx  ", tmp1);
                 if (is_kernel_text(tmp1))
                         fprintf(fp, "<%s>", value_to_symstr(tmp1, buf, 0));
@@ -3534,6 +4480,84 @@
                                 	fprintf(fp, "<%s>",
                                         	value_to_symstr(tmp2, buf, 0));
                 	fprintf(fp, "\n");
+		} else if (VALID_MEMBER(irq_chip_ack)) {
+                	readmem(handler+OFFSET(irq_chip_ack), KVADDR,
+                        	&tmp1, sizeof(void *),
+                        	"irq_chip ack", FAULT_ON_ERROR);
+                	fprintf(fp, "              ack: %lx  ", tmp1);
+                	if (is_kernel_text(tmp1))
+                        	fprintf(fp, "<%s>",
+					value_to_symstr(tmp1, buf, 0));
+                	else if (readmem(tmp1, KVADDR, &tmp2,
+                        	sizeof(ulong), "ack indirection",
+                        	RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2))
+                                	fprintf(fp, "<%s>",
+                                        	value_to_symstr(tmp2, buf, 0));
+                	fprintf(fp, "\n");
+		}
+
+		if (VALID_MEMBER(irq_chip_mask)) {
+			readmem(handler+OFFSET(irq_chip_mask), KVADDR,
+				&tmp1, sizeof(void *),
+				"irq_chip mask", FAULT_ON_ERROR);
+                        fprintf(fp, "             mask: %lx  ", tmp1);
+                        if (is_kernel_text(tmp1))
+                                fprintf(fp, "<%s>",
+                                        value_to_symstr(tmp1, buf, 0));
+                        else if (readmem(tmp1, KVADDR, &tmp2,
+                                sizeof(ulong), "mask indirection",
+                                RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2))
+                                        fprintf(fp, "<%s>",
+                                                value_to_symstr(tmp2, buf, 0));
+                        fprintf(fp, "\n");
+		}
+		
+		if (VALID_MEMBER(irq_chip_mask_ack)) {
+			readmem(handler+OFFSET(irq_chip_mask_ack), KVADDR,
+				&tmp1, sizeof(void *),
+				"irq_chip mask_ack", FAULT_ON_ERROR);
+                        fprintf(fp, "         mask_ack: %lx  ", tmp1);
+                        if (is_kernel_text(tmp1))
+                                fprintf(fp, "<%s>",
+                                        value_to_symstr(tmp1, buf, 0));
+                        else if (readmem(tmp1, KVADDR, &tmp2,
+                                sizeof(ulong), "mask_ack indirection",
+                                RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2))
+                                        fprintf(fp, "<%s>",
+                                                value_to_symstr(tmp2, buf, 0));
+                        fprintf(fp, "\n");
+		}
+
+		if (VALID_MEMBER(irq_chip_unmask)) {
+			readmem(handler+OFFSET(irq_chip_unmask), KVADDR,
+				&tmp1, sizeof(void *),
+				"irq_chip unmask", FAULT_ON_ERROR);
+                        fprintf(fp, "           unmask: %lx  ", tmp1);
+                        if (is_kernel_text(tmp1))
+                                fprintf(fp, "<%s>",
+                                        value_to_symstr(tmp1, buf, 0));
+                        else if (readmem(tmp1, KVADDR, &tmp2,
+                                sizeof(ulong), "unmask indirection",
+                                RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2))
+                                        fprintf(fp, "<%s>",
+                                                value_to_symstr(tmp2, buf, 0));
+                        fprintf(fp, "\n");
+		}
+
+		if (VALID_MEMBER(irq_chip_eoi)) {
+			readmem(handler+OFFSET(irq_chip_eoi), KVADDR,
+				&tmp1, sizeof(void *),
+				"irq_chip eoi", FAULT_ON_ERROR);
+                        fprintf(fp, "              eoi: %lx  ", tmp1);
+                        if (is_kernel_text(tmp1))
+                                fprintf(fp, "<%s>",
+                                        value_to_symstr(tmp1, buf, 0));
+                        else if (readmem(tmp1, KVADDR, &tmp2,
+                                sizeof(ulong), "eoi indirection",
+                                RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2))
+                                        fprintf(fp, "<%s>",
+                                                value_to_symstr(tmp2, buf, 0));
+                        fprintf(fp, "\n");
 		}
 
 		if (VALID_MEMBER(hw_interrupt_type_end)) {
@@ -3550,6 +4574,20 @@
                                         fprintf(fp, "<%s>",
                                                 value_to_symstr(tmp2, buf, 0));
                         fprintf(fp, "\n");
+		} else if (VALID_MEMBER(irq_chip_end)) {
+                	readmem(handler+OFFSET(irq_chip_end), KVADDR,
+                        	&tmp1, sizeof(void *),
+                        	"irq_chip end", FAULT_ON_ERROR);
+                        fprintf(fp, "              end: %lx  ", tmp1);
+                        if (is_kernel_text(tmp1))
+                                fprintf(fp, "<%s>",
+                                        value_to_symstr(tmp1, buf, 0));
+                        else if (readmem(tmp1, KVADDR, &tmp2,
+                                sizeof(ulong), "end indirection",
+                                RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2))
+                                        fprintf(fp, "<%s>",
+                                                value_to_symstr(tmp2, buf, 0));
+                        fprintf(fp, "\n");
 		}
 
 		if (VALID_MEMBER(hw_interrupt_type_set_affinity)) {
@@ -3567,6 +4605,66 @@
                                         fprintf(fp, "<%s>",
                                                 value_to_symstr(tmp2, buf, 0));
                         fprintf(fp, "\n");
+		} else if (VALID_MEMBER(irq_chip_set_affinity)) {
+                	readmem(handler+OFFSET(irq_chip_set_affinity),
+				KVADDR, &tmp1, sizeof(void *),
+                        	"irq_chip set_affinity",
+				FAULT_ON_ERROR);
+                        fprintf(fp, "     set_affinity: %lx  ", tmp1);
+                        if (is_kernel_text(tmp1))
+                                fprintf(fp, "<%s>",
+                                        value_to_symstr(tmp1, buf, 0));
+                        else if (readmem(tmp1, KVADDR, &tmp2,
+                                sizeof(ulong), "set_affinity indirection",
+                                RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2))
+                                        fprintf(fp, "<%s>",
+                                                value_to_symstr(tmp2, buf, 0));
+                        fprintf(fp, "\n");
+		}
+		if (VALID_MEMBER(irq_chip_retrigger)) {
+			readmem(handler+OFFSET(irq_chip_retrigger), KVADDR,
+				&tmp1, sizeof(void *),
+				"irq_chip retrigger", FAULT_ON_ERROR);
+                        fprintf(fp, "        retrigger: %lx  ", tmp1);
+                        if (is_kernel_text(tmp1))
+                                fprintf(fp, "<%s>",
+                                        value_to_symstr(tmp1, buf, 0));
+                        else if (readmem(tmp1, KVADDR, &tmp2,
+                                sizeof(ulong), "retrigger indirection",
+                                RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2))
+                                        fprintf(fp, "<%s>",
+                                                value_to_symstr(tmp2, buf, 0));
+                        fprintf(fp, "\n");
+		}
+		if (VALID_MEMBER(irq_chip_set_type)) {
+			readmem(handler+OFFSET(irq_chip_set_type), KVADDR,
+				&tmp1, sizeof(void *),
+				"irq_chip set_type", FAULT_ON_ERROR);
+                        fprintf(fp, "         set_type: %lx  ", tmp1);
+                        if (is_kernel_text(tmp1))
+                                fprintf(fp, "<%s>",
+                                        value_to_symstr(tmp1, buf, 0));
+                        else if (readmem(tmp1, KVADDR, &tmp2,
+                                sizeof(ulong), "set_type indirection",
+                                RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2))
+                                        fprintf(fp, "<%s>",
+                                                value_to_symstr(tmp2, buf, 0));
+                        fprintf(fp, "\n");
+		}
+		if (VALID_MEMBER(irq_chip_set_wake)) {
+			readmem(handler+OFFSET(irq_chip_set_wake), KVADDR,
+				&tmp1, sizeof(void *),
+				"irq_chip set wake", FAULT_ON_ERROR);
+                        fprintf(fp, "         set_wake: %lx  ", tmp1);
+                        if (is_kernel_text(tmp1))
+                                fprintf(fp, "<%s>",
+                                        value_to_symstr(tmp1, buf, 0));
+                        else if (readmem(tmp1, KVADDR, &tmp2,
+                                sizeof(ulong), "set_wake indirection",
+                                RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2))
+                                        fprintf(fp, "<%s>",
+                                                value_to_symstr(tmp2, buf, 0));
+                        fprintf(fp, "\n");
 		}
 	}
 
@@ -4146,7 +5244,7 @@
 }
 
 /*
- *  2.6 per-cpu timers, using "per_cpu__tvec_bases".  XXX
+ *  2.6 per-cpu timers, using "per_cpu__tvec_bases".
  */
 
 static void
@@ -4169,8 +5267,20 @@
          */
         vec_root_size = (i = ARRAY_LENGTH(tvec_root_s_vec)) ?
                 i : get_array_length("tvec_root_s.vec", NULL, SIZE(list_head));
+	if (!vec_root_size && 
+	    (i = get_array_length("tvec_root.vec", NULL, SIZE(list_head))))
+		vec_root_size = i;
+	if (!vec_root_size)
+		error(FATAL, "cannot determine tvec_root.vec[] array size\n");
+
         vec_size = (i = ARRAY_LENGTH(tvec_s_vec)) ?
                 i : get_array_length("tvec_s.vec", NULL, SIZE(list_head));
+	if (!vec_size &&
+	    (i = get_array_length("tvec.vec", NULL, SIZE(list_head))))
+		vec_size = i;
+	if (!vec_size)
+		error(FATAL, "cannot determine tvec.vec[] array size\n");
+
         vec = (ulong *)GETBUF(SIZE(list_head) * MAX(vec_root_size, vec_size));
 	cpu = 0;
 
@@ -4220,8 +5330,12 @@
         else
                 tvec_bases =  symbol_value("per_cpu__tvec_bases");
 
-        fprintf(fp, "TVEC_BASES[%d]: %lx\n", cpu,
-                tvec_bases + SIZE(tvec_t_base_s));
+	if (symbol_exists("boot_tvec_bases")) {
+		readmem(tvec_bases, KVADDR, &tvec_bases, sizeof(void *),
+                        "per-cpu tvec_bases", FAULT_ON_ERROR);
+        }
+
+        fprintf(fp, "TVEC_BASES[%d]: %lx\n", cpu, tvec_bases);
 		
         sprintf(buf1, "%ld", highest);
         flen = MAX(strlen(buf1), strlen("JIFFIES"));
@@ -4320,6 +5434,11 @@
 		else		
 			tvec_bases =  symbol_value("per_cpu__tvec_bases");
 
+		if (symbol_exists("boot_tvec_bases")) {
+			readmem(tvec_bases, KVADDR, &tvec_bases, sizeof(void *), 
+				"per-cpu tvec_bases", FAULT_ON_ERROR);
+		}
+
                 tv[1].base = tvec_bases +
                         OFFSET(tvec_t_base_s_tv1);
                 tv[1].end = tv[1].base + SIZE(tvec_root_s);
@@ -4475,9 +5594,16 @@
                 ld->start = vec[i];
                 ld->list_head_offset = offset;
 		ld->end = vec_kvaddr;
+		ld->flags = RETURN_ON_LIST_ERROR;
 
                 hq_open();
-                timer_cnt = do_list(ld);
+		if ((timer_cnt = do_list(ld)) == -1) {
+			/* Ignore chains with errors */
+			error(INFO, 
+	      	      "ignoring faulty timer list at index %d of timer array\n",
+				i/2);
+			continue; 
+		}
                 if (!timer_cnt)
                 	continue;
                 timer_list = (ulong *)GETBUF(timer_cnt * sizeof(ulong));
@@ -4708,21 +5834,627 @@
 		machdep->last_pgd_read = 0;
 		machdep->last_pmd_read = 0;
 		machdep->last_ptbl_read = 0;
+		if (machdep->clear_machdep_cache)
+			machdep->clear_machdep_cache();
 	}
 }
 
 /*
- *  For kernels containing cpu_online_map, count the bits.
+ *  If it exists, return the number of cpus in the cpu_online_map.
  */
 int
 get_cpus_online()
 {
-	ulong cpu_online_map;
+	int i, len, online;
+	struct gnu_request req;
+	char *buf;
+	ulong *maskptr;
 
 	if (!symbol_exists("cpu_online_map")) 
 		return 0;
 
-	get_symbol_data("cpu_online_map", sizeof(ulong), &cpu_online_map);
+	if (LKCD_KERNTYPES()) {
+		if ((len = STRUCT_SIZE("cpumask_t")) < 0)
+			error(FATAL, "cannot determine type cpumask_t\n");
+	} else
+		len = get_symbol_type("cpu_online_map", NULL, &req) ==
+			TYPE_CODE_UNDEF ?  sizeof(ulong) : req.length;
+	buf = GETBUF(len);
 
-	return count_bits_long(cpu_online_map);
+	online = 0;
+
+        if (readmem(symbol_value("cpu_online_map"), KVADDR, buf, len,
+            "cpu_online_map", RETURN_ON_ERROR)) {
+
+		maskptr = (ulong *)buf;
+		for (i = 0; i < (len/sizeof(ulong)); i++, maskptr++)
+			online += count_bits_long(*maskptr);
+
+		FREEBUF(buf);
+		if (CRASHDEBUG(1))
+			error(INFO, "get_cpus_online: online: %d\n", online);
+	}
+
+	return online;
+}
+
+/*
+ *  If it exists, return the number of cpus in the cpu_present_map.
+ */
+int
+get_cpus_present()
+{
+	int i, len, present;
+	struct gnu_request req;
+	char *buf;
+	ulong *maskptr;
+
+	if (!symbol_exists("cpu_present_map")) 
+		return 0;
+
+	if (LKCD_KERNTYPES()) {
+		if ((len = STRUCT_SIZE("cpumask_t")) < 0)
+			error(FATAL, "cannot determine type cpumask_t\n");
+	} else
+		len = get_symbol_type("cpu_present_map", NULL, &req) ==
+			TYPE_CODE_UNDEF ?  sizeof(ulong) : req.length;
+	buf = GETBUF(len);
+
+	present = 0;
+
+        if (readmem(symbol_value("cpu_present_map"), KVADDR, buf, len,
+            "cpu_present_map", RETURN_ON_ERROR)) {
+
+		maskptr = (ulong *)buf;
+		for (i = 0; i < (len/sizeof(ulong)); i++, maskptr++)
+			present += count_bits_long(*maskptr);
+
+		FREEBUF(buf);
+		if (CRASHDEBUG(1))
+			error(INFO, "get_cpus_present: present: %d\n", present);
+	}
+
+	return present;
+}
+
+/*
+ *  If it exists, return the number of cpus in the cpu_possible_map.
+ */
+int
+get_cpus_possible()
+{
+	int i, len, possible;
+	struct gnu_request req;
+	char *buf;
+	ulong *maskptr;
+
+	if (!symbol_exists("cpu_possible_map"))
+		return 0;
+
+	if (LKCD_KERNTYPES()) {
+		if ((len = STRUCT_SIZE("cpumask_t")) < 0)
+			error(FATAL, "cannot determine type cpumask_t\n");
+	} else
+		len = get_symbol_type("cpu_possible_map", NULL, &req) ==
+			TYPE_CODE_UNDEF ?  sizeof(ulong) : req.length;
+	buf = GETBUF(len);
+
+	possible = 0;
+
+	if (readmem(symbol_value("cpu_possible_map"), KVADDR, buf, len,
+		"cpu_possible_map", RETURN_ON_ERROR)) {
+
+		maskptr = (ulong *)buf;
+		for (i = 0; i < (len/sizeof(ulong)); i++, maskptr++)
+			possible += count_bits_long(*maskptr);
+
+		FREEBUF(buf);
+		if (CRASHDEBUG(1))
+			error(INFO, "get_cpus_possible: possible: %d\n",
+				possible);
+	}
+
+	return possible;
+}
+
+/*
+ *  Xen machine-address to pseudo-physical-page translator.
+ */ 
+ulonglong
+xen_m2p(ulonglong machine)
+{
+	ulong mfn, pfn;
+
+	mfn = XEN_MACHINE_TO_MFN(machine);
+	pfn = __xen_m2p(machine, mfn);
+
+	if (pfn == XEN_MFN_NOT_FOUND) {
+		if (CRASHDEBUG(1))
+			error(INFO, 
+			    "xen_machine_to_pseudo_PAE: machine address %lx not found\n",
+                           	 machine);
+		return XEN_MACHADDR_NOT_FOUND;
+	}
+
+	return XEN_PFN_TO_PSEUDO(pfn);
+}
+
+static ulong
+__xen_m2p(ulonglong machine, ulong mfn)
+{
+	ulong mapping, kmfn, pfn, p, i, c;
+	ulong start, end;
+	ulong *mp;
+
+	mp = (ulong *)kt->m2p_page;
+	mapping = kt->phys_to_machine_mapping;
+
+	/*
+	 *  Check the FIFO cache first.
+	 */
+	for (c = 0; c < P2M_MAPPING_CACHE; c++) {
+		if (kt->p2m_mapping_cache[c].mapping &&
+		    ((mfn >= kt->p2m_mapping_cache[c].start) && 
+		     (mfn <= kt->p2m_mapping_cache[c].end))) { 
+
+			if (kt->p2m_mapping_cache[c].mapping != kt->last_mapping_read) {
+                        	if (!readmem(kt->p2m_mapping_cache[c].mapping, KVADDR, 
+			       	    mp, PAGESIZE(), "phys_to_machine_mapping page (cached)", 
+			    	    RETURN_ON_ERROR))
+                                	error(FATAL, "cannot access "
+                                    	    "phys_to_machine_mapping page\n");
+				else
+					kt->last_mapping_read = kt->p2m_mapping_cache[c].mapping;
+			} else
+				kt->p2m_page_cache_hits++;
+
+                	for (i = 0; i < XEN_PFNS_PER_PAGE; i++) {
+				kmfn = (*(mp+i)) & ~XEN_FOREIGN_FRAME;
+                        	if (kmfn == mfn) {
+					p = P2M_MAPPING_TO_PAGE_INDEX(c);
+					pfn = p + i;
+
+                                	if (CRASHDEBUG(1))
+                                    	    console("(cached) mfn: %lx (%llx) p: %ld"
+                                        	" i: %ld pfn: %lx (%llx)\n",
+						mfn, machine, p,
+						i, pfn, XEN_PFN_TO_PSEUDO(pfn));
+					kt->p2m_mfn_cache_hits++;
+
+					return pfn;
+				}
+			}
+			/*
+			 *  Stale entry -- clear it out.
+			 */
+			kt->p2m_mapping_cache[c].mapping = 0;
+		}
+	}
+
+	/*
+	 *  The machine address was not cached, so search from the
+	 *  beginning of the phys_to_machine_mapping array, caching
+	 *  only the found machine address.
+	 */
+	for (p = 0; p < kt->p2m_table_size; p += XEN_PFNS_PER_PAGE) 
+	{
+		if (mapping != kt->last_mapping_read) {
+			if (!readmem(mapping, KVADDR, mp, PAGESIZE(), 
+		    	    "phys_to_machine_mapping page", RETURN_ON_ERROR))
+				error(FATAL, 
+			     	    "cannot access phys_to_machine_mapping page\n");
+			else
+				kt->last_mapping_read = mapping;
+		}
+
+		kt->p2m_pages_searched++;
+
+		if (search_mapping_page(mfn, &i, &start, &end)) {
+			pfn = p + i;
+			if (CRASHDEBUG(1))
+			    console("pages: %d mfn: %lx (%llx) p: %ld"
+				" i: %ld pfn: %lx (%llx)\n",
+				(p/XEN_PFNS_PER_PAGE)+1, mfn, machine,
+				p, i, pfn, XEN_PFN_TO_PSEUDO(pfn));
+
+			c = kt->p2m_cache_index;
+			kt->p2m_mapping_cache[c].start = start;
+			kt->p2m_mapping_cache[c].end = end;
+			kt->p2m_mapping_cache[c].mapping = mapping;
+			kt->p2m_cache_index = (c+1) % P2M_MAPPING_CACHE;
+
+			return pfn;
+		}
+
+		mapping += PAGESIZE();
+	}
+
+	if (CRASHDEBUG(1))
+		console("machine address %llx not found\n", machine);
+
+	return (XEN_MFN_NOT_FOUND);
+}
+
+/*
+ *  Search for an mfn in the current mapping page, and if found, 
+ *  determine the range of contiguous mfns that it's contained
+ *  within (if any). 
+ */
+#define PREV_UP    0x1
+#define NEXT_UP    0x2
+#define PREV_DOWN  0x4
+#define NEXT_DOWN  0x8
+
+static int
+search_mapping_page(ulong mfn, ulong *index, ulong *startptr, ulong *endptr)
+{
+	int n, found;
+	ulong i, kmfn;
+	ulong flags, start, end, next, prev, curr;
+	ulong *mp;
+
+	mp = (ulong *)kt->m2p_page;
+
+	for (i = 0, found = FALSE; i < XEN_PFNS_PER_PAGE; i++) {
+		kmfn = (*(mp+i)) & ~XEN_FOREIGN_FRAME;
+
+		if (kmfn == mfn) {
+			found = TRUE;
+			*index = i;
+			break;
+		}
+	}
+
+	if (found) {
+		flags = 0;
+		next = prev = XEN_MFN_NOT_FOUND;
+		start = end = kmfn;
+
+		if (i)
+			prev = (*(mp+(i-1))) & ~XEN_FOREIGN_FRAME;
+		if ((i+1) != XEN_PFNS_PER_PAGE)
+			next = (*(mp+(i+1))) & ~XEN_FOREIGN_FRAME;
+
+		if (prev == (kmfn-1))
+			flags |= PREV_UP;
+		else if (prev == (kmfn+1))
+			flags |= PREV_DOWN;
+
+		if (next == (kmfn+1))
+			flags |= NEXT_UP;
+		else if (next == (kmfn-1))
+			flags |= NEXT_DOWN;
+
+		/*  Should be impossible, but just in case... */
+		if ((flags & PREV_UP) && (flags & NEXT_DOWN))
+			flags &= ~NEXT_DOWN;
+		else if ((flags & PREV_DOWN) && (flags & NEXT_UP))
+			flags &= ~NEXT_UP;
+
+		if (flags & (PREV_UP|PREV_DOWN)) {
+			start = prev;
+
+			for (n = (i-2); n >= 0; n--) {
+				curr = (*(mp+n)) & ~XEN_FOREIGN_FRAME;
+				if (flags & PREV_UP) {
+					if (curr == (start-1))
+						start = curr;
+				} else {
+					if (curr == (start+1))
+						start = curr;
+				}
+			}
+
+		}
+
+		if (flags & (NEXT_UP|NEXT_DOWN)) {
+			end = next;
+
+			for (n = (i+2); n < XEN_PFNS_PER_PAGE; n++) {
+				curr = (*(mp+n)) & ~XEN_FOREIGN_FRAME;
+				if (flags & NEXT_UP) {
+					if (curr == (end+1))
+						end = curr;
+				} else {
+					if (curr == (end-1))
+						end = curr;
+				}
+			}
+
+
+		}
+
+		if (start > end) {
+			curr = start;
+			start = end;
+			end = curr;	
+		}
+
+		*startptr = start;
+		*endptr = end;
+
+		if (CRASHDEBUG(2))
+			fprintf(fp, "mfn: %lx -> start: %lx end: %lx (%ld mfns)\n", 
+				mfn, start, end, end - start);
+	}
+
+	return found;
+}
+
+
+
+/*
+ *  Read the relevant IKCONFIG (In Kernel Config) data if available.
+ */
+
+static char *ikconfig[] = {
+        "CONFIG_NR_CPUS",
+        "CONFIG_PGTABLE_4",
+        "CONFIG_HZ",
+	"CONFIG_DEBUG_BUGVERBOSE",
+        NULL,
+};
+
+void
+read_in_kernel_config(int command)
+{
+	struct syment *sp;
+	int ii, jj, ret, end, found=0;
+	unsigned long size, bufsz;
+	char *pos, *ln, *buf, *head, *tail, *val, *uncomp;
+	char line[512];
+	z_stream stream;
+
+	if ((kt->flags & NO_IKCONFIG) && !(pc->flags & RUNTIME))
+		return;
+
+	if ((sp = symbol_search("kernel_config_data")) == NULL) {
+		if (command == IKCFG_READ)
+			error(FATAL, 
+			    "kernel_config_data does not exist in this kernel\n");
+		return;
+	}
+	
+	/* We don't know how large IKCONFIG is, so we start with 
+	 * 32k, if we can't find MAGIC_END assume we didn't read 
+	 * enough, double it and try again.
+	 */
+	ii = 32;
+
+again:
+	size = ii * 1024;
+
+	if ((buf = (char *)malloc(size)) == NULL) {
+		error(WARNING, "cannot malloc IKCONFIG input buffer\n");
+		return;
+	}
+	
+        if (!readmem(sp->value, KVADDR, buf, size,
+            "kernel_config_data", RETURN_ON_ERROR)) {
+		error(WARNING, "cannot read kernel_config_data\n");
+		goto out2;
+	}
+		
+	/* Find the start */
+	if (strstr(buf, MAGIC_START))
+		head = buf + MAGIC_SIZE + 10; /* skip past MAGIC_START and gzip header */
+	else {
+		error(WARNING, "could not find MAGIC_START!\n");
+		goto out2;
+	}
+
+	tail = head;
+
+	end = strlen(MAGIC_END);
+
+	/* Find the end*/
+	while (tail < (buf + (size - 1))) {
+		
+		if (strncmp(tail, MAGIC_END, end)==0) {
+			found = 1;
+			break;
+		}
+		tail++;
+	}
+
+	if (found) {
+		bufsz = tail - head;
+		size = 10 * bufsz;
+		if ((uncomp = (char *)malloc(size)) == NULL) {
+			error(WARNING, "cannot malloc IKCONFIG output buffer\n");
+			goto out2;
+		}
+	} else {
+		if (ii > 512) {
+			error(WARNING, "could not find MAGIC_END!\n");
+			goto out2;
+		} else {
+			free(buf);
+			ii *= 2;
+			goto again;
+		}
+	}
+
+
+	/* initialize zlib */
+	stream.next_in = (Bytef *)head;
+	stream.avail_in = (uInt)bufsz;
+
+	stream.next_out = (Bytef *)uncomp;
+	stream.avail_out = (uInt)size;
+
+	stream.zalloc = NULL;
+	stream.zfree = NULL;
+	stream.opaque = NULL;
+
+	ret = inflateInit2(&stream, -MAX_WBITS);
+	if (ret != Z_OK) {
+		read_in_kernel_config_err(ret, "initialize");
+		goto out1;
+	}
+
+	ret = inflate(&stream, Z_FINISH);
+
+	if (ret != Z_STREAM_END) {
+		inflateEnd(&stream);
+		if (ret == Z_NEED_DICT || 
+		   (ret == Z_BUF_ERROR && stream.avail_in == 0)) {
+			read_in_kernel_config_err(Z_DATA_ERROR, "uncompress");
+			goto out1;
+		}
+		read_in_kernel_config_err(ret, "uncompress");
+		goto out1;
+	}
+	size = stream.total_out;
+
+	ret = inflateEnd(&stream);
+
+	pos = uncomp;
+
+	do {
+		ret = sscanf(pos, "%511[^\n]\n%n", line, &ii);
+		if (ret > 0) {
+			if ((command == IKCFG_READ) || CRASHDEBUG(8))
+				fprintf(fp, "%s\n", line);
+
+			pos += ii;
+
+			ln = line;
+				
+			/* skip leading whitespace */
+			while (whitespace(*ln))
+				ln++;
+
+			/* skip comments -- except when looking for "not set" */
+			if (*ln == '#') {
+				if (strstr(ln, "CONFIG_DEBUG_BUGVERBOSE") &&
+				    strstr(ln, "not set"))
+					kt->flags |= BUGVERBOSE_OFF;
+				continue;
+			}
+
+			/* Find '=' */
+			if ((head = strchr(ln, '=')) != NULL) {
+				*head = '\0';
+				val = head + 1;
+
+				head--;
+
+				/* skip trailing whitespace */
+				while (whitespace(*head)) {
+					*head = '\0';
+					head--;
+				}
+
+				/* skip whitespace */
+				while (whitespace(*val))
+					val++;
+
+			} else /* Bad line, skip it */
+				continue;
+
+			if (command != IKCFG_INIT)
+				continue;
+
+			for (jj = 0; ikconfig[jj]; jj++) {
+				 if (STREQ(ln, ikconfig[jj])) {
+
+					if (STREQ(ln, "CONFIG_NR_CPUS")) {
+						kt->kernel_NR_CPUS = atoi(val);
+						if (CRASHDEBUG(1)) 
+							error(INFO, 
+							    "CONFIG_NR_CPUS: %d\n",
+								kt->kernel_NR_CPUS);
+
+					} else if (STREQ(ln, "CONFIG_PGTABLE_4")) {
+						machdep->flags |= VM_4_LEVEL;
+						if (CRASHDEBUG(1))
+							error(INFO, "CONFIG_PGTABLE_4\n");
+
+					} else if (STREQ(ln, "CONFIG_HZ")) {
+						machdep->hz = atoi(val);
+						if (CRASHDEBUG(1))
+							error(INFO, 
+							    "CONFIG_HZ: %d\n",
+								machdep->hz);
+					}
+				}
+			}
+		}
+	} while (ret > 0);
+
+out1:
+	free(uncomp);
+out2:
+	free(buf);
+
+	return;
+}
+
+static void
+read_in_kernel_config_err(int e, char *msg)
+{
+	error(WARNING, "zlib could not %s\n", msg);
+	switch (e) {
+		case Z_OK:
+			fprintf(fp, "Z_OK\n");
+			break;
+
+		case Z_STREAM_END:
+			fprintf(fp, "Z_STREAM_END\n");
+			break;
+
+		case Z_NEED_DICT:
+			fprintf(fp, "Z_NEED_DICT\n");
+			break;
+		
+		case Z_ERRNO:
+			fprintf(fp, "Z_ERNO\n");
+			break;
+
+		case Z_STREAM_ERROR:
+			fprintf(fp, "Z_STREAM\n");
+			break;
+
+		case Z_DATA_ERROR: 
+			fprintf(fp, "Z_DATA_ERROR\n");
+			break;
+
+		case Z_MEM_ERROR: /* out of memory */
+			fprintf(fp, "Z_MEM_ERROR\n");
+			break;
+
+		case Z_BUF_ERROR: /* not enough room in output buf */
+			fprintf(fp, "Z_BUF_ERROR\n");
+			break;
+		
+		case Z_VERSION_ERROR:
+			fprintf(fp, "Z_VERSION_ERROR\n");
+			break;
+
+		default: 
+			fprintf(fp, "UNKNOWN ERROR: %d\n", e);
+			break;
+	}
+}
+
+/*
+ *  With the evidence available, attempt to pre-determine whether
+ *  this is a paravirt-capable kernel running as bare-metal, xen, 
+ *  kvm, etc. 
+ *
+ *  NOTE: Only bare-metal pv_ops kernels are supported so far. 
+ */
+void
+paravirt_init(void)
+{
+	/*
+	 *  pv_init_ops appears to be (as of 2.6.27) an arch-common
+	 *  symbol.  This may have to change.
+	 */
+	if (kernel_symbol_exists("pv_init_ops")) {
+		if (CRASHDEBUG(1))
+			error(INFO, "pv_init_ops exists: ARCH_PVOPS\n");
+		kt->flags |= ARCH_PVOPS;
+	}
 }
--- crash/unwind_x86_32_64.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/unwind_x86_32_64.c	2006-11-15 14:58:11.000000000 -0500
@@ -0,0 +1,1220 @@
+#if defined(X86_64)
+/*
+ * Support for genarating DWARF CFI based backtraces.
+ * Borrowed heavily from the kernel's implementation of unwinding using the
+ * DWARF CFI written by Jan Beulich
+ */
+
+#ifdef X86_64
+#include "unwind_x86_64.h"
+#endif
+#ifdef X86
+#include "unwind_x86.h"
+#endif
+
+#include "defs.h"
+
+#define MAX_STACK_DEPTH 8
+
+static struct local_unwind_table {
+        struct {
+                unsigned long pc;
+                unsigned long range;
+        } core, init;
+        void *address;
+        unsigned long size;
+} *local_unwind_tables, default_unwind_table;
+
+static int gather_in_memory_unwind_tables(void);
+static int populate_local_tables(ulong, char *);
+static int unwind_tables_cnt = 0;
+static struct local_unwind_table *find_table(unsigned long);
+static void dump_local_unwind_tables(void);
+
+static const struct {
+	unsigned offs:BITS_PER_LONG / 2;
+	unsigned width:BITS_PER_LONG / 2;
+} reg_info[] = {
+	UNW_REGISTER_INFO
+};
+
+#undef PTREGS_INFO
+#undef EXTRA_INFO
+
+#ifndef REG_INVALID
+#define REG_INVALID(r) (reg_info[r].width == 0)
+#endif
+
+#define DW_CFA_nop                          0x00
+#define DW_CFA_set_loc                      0x01
+#define DW_CFA_advance_loc1                 0x02
+#define DW_CFA_advance_loc2                 0x03
+#define DW_CFA_advance_loc4                 0x04
+#define DW_CFA_offset_extended              0x05
+#define DW_CFA_restore_extended             0x06
+#define DW_CFA_undefined                    0x07
+#define DW_CFA_same_value                   0x08
+#define DW_CFA_register                     0x09
+#define DW_CFA_remember_state               0x0a
+#define DW_CFA_restore_state                0x0b
+#define DW_CFA_def_cfa                      0x0c
+#define DW_CFA_def_cfa_register             0x0d
+#define DW_CFA_def_cfa_offset               0x0e
+#define DW_CFA_def_cfa_expression           0x0f
+#define DW_CFA_expression                   0x10
+#define DW_CFA_offset_extended_sf           0x11
+#define DW_CFA_def_cfa_sf                   0x12
+#define DW_CFA_def_cfa_offset_sf            0x13
+#define DW_CFA_val_offset                   0x14
+#define DW_CFA_val_offset_sf                0x15
+#define DW_CFA_val_expression               0x16
+#define DW_CFA_lo_user                      0x1c
+#define DW_CFA_GNU_window_save              0x2d
+#define DW_CFA_GNU_args_size                0x2e
+#define DW_CFA_GNU_negative_offset_extended 0x2f
+#define DW_CFA_hi_user                      0x3f
+
+#define DW_EH_PE_FORM     0x07
+#define DW_EH_PE_native   0x00
+#define DW_EH_PE_leb128   0x01
+#define DW_EH_PE_data2    0x02
+#define DW_EH_PE_data4    0x03
+#define DW_EH_PE_data8    0x04
+#define DW_EH_PE_signed   0x08
+#define DW_EH_PE_ADJUST   0x70
+#define DW_EH_PE_abs      0x00
+#define DW_EH_PE_pcrel    0x10
+#define DW_EH_PE_textrel  0x20
+#define DW_EH_PE_datarel  0x30
+#define DW_EH_PE_funcrel  0x40
+#define DW_EH_PE_aligned  0x50
+#define DW_EH_PE_indirect 0x80
+#define DW_EH_PE_omit     0xff
+
+#define min(x,y) ({ \
+        typeof(x) _x = (x);     \
+        typeof(y) _y = (y);     \
+        (void) (&_x == &_y);            \
+        _x < _y ? _x : _y; })
+
+#define max(x,y) ({ \
+        typeof(x) _x = (x);     \
+        typeof(y) _y = (y);     \
+        (void) (&_x == &_y);            \
+        _x > _y ? _x : _y; })
+#define STACK_LIMIT(ptr)     (((ptr) - 1) & ~(THREAD_SIZE - 1))
+
+typedef unsigned long uleb128_t;
+typedef   signed long sleb128_t;
+
+struct unwind_item {
+	enum item_location {
+		Nowhere,
+		Memory,
+		Register,
+		Value
+	} where;
+	uleb128_t value;
+};
+
+struct unwind_state {
+	uleb128_t loc, org;
+	const u8 *cieStart, *cieEnd;
+	uleb128_t codeAlign;
+	sleb128_t dataAlign;
+	struct cfa {
+		uleb128_t reg, offs;
+	} cfa;
+	struct unwind_item regs[ARRAY_SIZE(reg_info)];
+	unsigned stackDepth:8;
+	unsigned version:8;
+	const u8 *label;
+	const u8 *stack[MAX_STACK_DEPTH];
+};
+
+static const struct cfa badCFA = { ARRAY_SIZE(reg_info), 1 };
+
+static uleb128_t get_uleb128(const u8 **pcur, const u8 *end)
+{
+	const u8 *cur = *pcur;
+	uleb128_t value;
+	unsigned shift;
+
+	for (shift = 0, value = 0; cur < end; shift += 7) {
+		if (shift + 7 > 8 * sizeof(value)
+		    && (*cur & 0x7fU) >= (1U << (8 * sizeof(value) - shift))) {
+			cur = end + 1;
+			break;
+		}
+		value |= (uleb128_t)(*cur & 0x7f) << shift;
+		if (!(*cur++ & 0x80))
+			break;
+	}
+	*pcur = cur;
+
+	return value;
+}
+
+static sleb128_t get_sleb128(const u8 **pcur, const u8 *end)
+{
+	const u8 *cur = *pcur;
+	sleb128_t value;
+	unsigned shift;
+
+	for (shift = 0, value = 0; cur < end; shift += 7) {
+		if (shift + 7 > 8 * sizeof(value)
+		    && (*cur & 0x7fU) >= (1U << (8 * sizeof(value) - shift))) {
+			cur = end + 1;
+			break;
+		}
+		value |= (sleb128_t)(*cur & 0x7f) << shift;
+		if (!(*cur & 0x80)) {
+			value |= -(*cur++ & 0x40) << shift;
+			break;
+		}
+	}
+	*pcur = cur;
+
+	return value;
+}
+
+static unsigned long read_pointer(const u8 **pLoc,
+                                  const void *end,
+                                  signed ptrType)
+{
+	unsigned long value = 0;
+	union {
+		const u8 *p8;
+		const u16 *p16u;
+		const s16 *p16s;
+		const u32 *p32u;
+		const s32 *p32s;
+		const unsigned long *pul;
+	} ptr;
+
+	if (ptrType < 0 || ptrType == DW_EH_PE_omit)
+		return 0;
+	ptr.p8 = *pLoc;
+	switch(ptrType & DW_EH_PE_FORM) {
+	case DW_EH_PE_data2:
+		if (end < (const void *)(ptr.p16u + 1))
+			return 0;
+		if(ptrType & DW_EH_PE_signed)
+			value = get_unaligned(ptr.p16s++);
+		else
+			value = get_unaligned(ptr.p16u++);
+		break;
+	case DW_EH_PE_data4:
+#ifdef CONFIG_64BIT
+		if (end < (const void *)(ptr.p32u + 1))
+			return 0;
+		if(ptrType & DW_EH_PE_signed)
+			value = get_unaligned(ptr.p32s++);
+		else
+			value = get_unaligned(ptr.p32u++);
+		break;
+	case DW_EH_PE_data8:
+		BUILD_BUG_ON(sizeof(u64) != sizeof(value));
+#else
+		BUILD_BUG_ON(sizeof(u32) != sizeof(value));
+#endif
+	case DW_EH_PE_native:
+		if (end < (const void *)(ptr.pul + 1))
+			return 0;
+		value = get_unaligned(ptr.pul++);
+		break;
+	case DW_EH_PE_leb128:
+		BUILD_BUG_ON(sizeof(uleb128_t) > sizeof(value));
+		value = ptrType & DW_EH_PE_signed
+		        ? get_sleb128(&ptr.p8, end)
+		        : get_uleb128(&ptr.p8, end);
+		if ((const void *)ptr.p8 > end)
+			return 0;
+		break;
+	default:
+		return 0;
+	}
+	switch(ptrType & DW_EH_PE_ADJUST) {
+	case DW_EH_PE_abs:
+		break;
+	case DW_EH_PE_pcrel:
+		value += (unsigned long)*pLoc;
+		break;
+	default:
+		return 0;
+	}
+
+/*	TBD
+	if ((ptrType & DW_EH_PE_indirect)
+	    && __get_user(value, (unsigned long *)value))
+		return 0;
+*/
+	*pLoc = ptr.p8;
+
+	return value;
+}
+
+static signed fde_pointer_type(const u32 *cie)
+{
+	const u8 *ptr = (const u8 *)(cie + 2);
+	unsigned version = *ptr;
+
+	if (version != 1)
+		return -1; /* unsupported */
+	if (*++ptr) {
+		const char *aug;
+		const u8 *end = (const u8 *)(cie + 1) + *cie;
+		uleb128_t len;
+
+		/* check if augmentation size is first (and thus present) */
+		if (*ptr != 'z')
+			return -1;
+		/* check if augmentation string is nul-terminated */
+		if ((ptr = memchr(aug = (const void *)ptr, 0, end - ptr)) == NULL)
+			return -1;
+		++ptr; /* skip terminator */
+		get_uleb128(&ptr, end); /* skip code alignment */
+		get_sleb128(&ptr, end); /* skip data alignment */
+		/* skip return address column */
+		version <= 1 ? (void)++ptr : (void)get_uleb128(&ptr, end);
+		len = get_uleb128(&ptr, end); /* augmentation length */
+		if (ptr + len < ptr || ptr + len > end)
+			return -1;
+		end = ptr + len;
+		while (*++aug) {
+			if (ptr >= end)
+				return -1;
+			switch(*aug) {
+			case 'L':
+				++ptr;
+				break;
+			case 'P': {
+					signed ptrType = *ptr++;
+
+					if (!read_pointer(&ptr, end, ptrType) || 					     ptr > end)
+						return -1;
+				}
+				break;
+			case 'R':
+				return *ptr;
+			default:
+				return -1;
+			}
+		}
+	}
+	return DW_EH_PE_native|DW_EH_PE_abs;
+}
+
+static int advance_loc(unsigned long delta, struct unwind_state *state)
+{
+	state->loc += delta * state->codeAlign;
+
+	return delta > 0;
+}
+
+static void set_rule(uleb128_t reg,
+                     enum item_location where,
+                     uleb128_t value,
+                     struct unwind_state *state)
+{
+	if (reg < ARRAY_SIZE(state->regs)) {
+		state->regs[reg].where = where;
+		state->regs[reg].value = value;
+	}
+}
+
+static int processCFI(const u8 *start,
+                      const u8 *end,
+                      unsigned long targetLoc,
+                      signed ptrType,
+                      struct unwind_state *state)
+{
+	union {
+		const u8 *p8;
+		const u16 *p16;
+		const u32 *p32;
+	} ptr;
+	int result = 1;
+
+	if (start != state->cieStart) {
+		state->loc = state->org;
+		result = processCFI(state->cieStart, state->cieEnd, 0, ptrType, state);
+		if (targetLoc == 0 && state->label == NULL)
+			return result;
+	}
+	for (ptr.p8 = start; result && ptr.p8 < end; ) {
+		switch(*ptr.p8 >> 6) {
+			uleb128_t value;
+
+		case 0:
+			switch(*ptr.p8++) {
+			case DW_CFA_nop:
+				break;
+			case DW_CFA_set_loc:
+				if ((state->loc = read_pointer(&ptr.p8, end,
+								ptrType)) == 0)
+					result = 0;
+				break;
+			case DW_CFA_advance_loc1:
+				result = ptr.p8 < end && advance_loc(*ptr.p8++, state);
+				break;
+			case DW_CFA_advance_loc2:
+				result = ptr.p8 <= end + 2
+				         && advance_loc(*ptr.p16++, state);
+				break;
+			case DW_CFA_advance_loc4:
+				result = ptr.p8 <= end + 4
+				         && advance_loc(*ptr.p32++, state);
+				break;
+			case DW_CFA_offset_extended:
+				value = get_uleb128(&ptr.p8, end);
+				set_rule(value, Memory,
+					get_uleb128(&ptr.p8, end), state);
+				break;
+			case DW_CFA_val_offset:
+				value = get_uleb128(&ptr.p8, end);
+				set_rule(value, Value,
+					get_uleb128(&ptr.p8, end), state);
+				break;
+			case DW_CFA_offset_extended_sf:
+				value = get_uleb128(&ptr.p8, end);
+				set_rule(value, Memory,
+					get_sleb128(&ptr.p8, end), state);
+				break;
+			case DW_CFA_val_offset_sf:
+				value = get_uleb128(&ptr.p8, end);
+				set_rule(value, Value,
+					get_sleb128(&ptr.p8, end), state);
+				break;
+			case DW_CFA_restore_extended:
+			case DW_CFA_undefined:
+			case DW_CFA_same_value:
+				set_rule(get_uleb128(&ptr.p8, end), Nowhere, 0,	state);
+				break;
+			case DW_CFA_register:
+				value = get_uleb128(&ptr.p8, end);
+				set_rule(value, Register,
+				         get_uleb128(&ptr.p8, end), state);
+				break;
+			case DW_CFA_remember_state:
+				if (ptr.p8 == state->label) {
+					state->label = NULL;
+					return 1;
+				}
+				if (state->stackDepth >= MAX_STACK_DEPTH)
+					return 0;
+				state->stack[state->stackDepth++] = ptr.p8;
+				break;
+			case DW_CFA_restore_state:
+				if (state->stackDepth) {
+					const uleb128_t loc = state->loc;
+					const u8 *label = state->label;
+
+					state->label = state->stack[state->stackDepth - 1];
+					memcpy(&state->cfa, &badCFA, sizeof(state->cfa));
+					memset(state->regs, 0, sizeof(state->regs));
+					state->stackDepth = 0;
+					result = processCFI(start, end, 0, ptrType, state);
+					state->loc = loc;
+					state->label = label;
+				} else
+					return 0;
+				break;
+			case DW_CFA_def_cfa:
+				state->cfa.reg = get_uleb128(&ptr.p8, end);
+				/*nobreak*/
+			case DW_CFA_def_cfa_offset:
+				state->cfa.offs = get_uleb128(&ptr.p8, end);
+				break;
+			case DW_CFA_def_cfa_sf:
+				state->cfa.reg = get_uleb128(&ptr.p8, end);
+				/*nobreak*/
+			case DW_CFA_def_cfa_offset_sf:
+				state->cfa.offs = get_sleb128(&ptr.p8, end)
+				                  * state->dataAlign;
+				break;
+			case DW_CFA_def_cfa_register:
+				state->cfa.reg = get_uleb128(&ptr.p8, end);
+				break;
+			/*todo case DW_CFA_def_cfa_expression: */
+			/*todo case DW_CFA_expression: */
+			/*todo case DW_CFA_val_expression: */
+			case DW_CFA_GNU_args_size:
+				get_uleb128(&ptr.p8, end);
+				break;
+			case DW_CFA_GNU_negative_offset_extended:
+				value = get_uleb128(&ptr.p8, end);
+				set_rule(value, Memory, (uleb128_t)0 -
+				         get_uleb128(&ptr.p8, end), state);
+				break;
+			case DW_CFA_GNU_window_save:
+			default:
+				result = 0;
+				break;
+			}
+			break;
+		case 1:
+			result = advance_loc(*ptr.p8++ & 0x3f, state);
+			break;
+		case 2:
+			value = *ptr.p8++ & 0x3f;
+			set_rule(value, Memory, get_uleb128(&ptr.p8, end),
+				 state);
+			break;
+		case 3:
+			set_rule(*ptr.p8++ & 0x3f, Nowhere, 0, state);
+			break;
+		}
+		if (ptr.p8 > end)
+			result = 0;
+		if (result && targetLoc != 0 && targetLoc < state->loc)
+			return 1;
+	}
+
+	return result
+	   && ptr.p8 == end
+	   && (targetLoc == 0
+	    || (/*todo While in theory this should apply, gcc in practice omits
+	          everything past the function prolog, and hence the location
+	          never reaches the end of the function.
+	        targetLoc < state->loc &&*/ state->label == NULL));
+}
+
+
+/* Unwind to previous to frame.  Returns 0 if successful, negative
+ * number in case of an error. */
+int 
+unwind(struct unwind_frame_info *frame)
+{
+#define FRAME_REG(r, t) (((t *)frame)[reg_info[r].offs])
+	const u32 *fde = NULL, *cie = NULL;
+	const u8 *ptr = NULL, *end = NULL;
+	unsigned long startLoc = 0, endLoc = 0, cfa;
+	unsigned i;
+	signed ptrType = -1;
+	uleb128_t retAddrReg = 0;
+//	struct unwind_table *table;
+	void *unwind_table;
+	struct local_unwind_table *table;
+	struct unwind_state state;
+	u64 reg_ptr = 0;
+
+
+	if (UNW_PC(frame) == 0)
+		return -EINVAL;
+
+	if ((table = find_table(UNW_PC(frame)))) {
+//		unsigned long tableSize = unwind_table_size;
+		unsigned long tableSize = table->size;
+
+		unwind_table = table->address;
+
+		for (fde = unwind_table;
+		     tableSize > sizeof(*fde) && tableSize - sizeof(*fde) >= *fde;
+		     tableSize -= sizeof(*fde) + *fde,
+		     fde += 1 + *fde / sizeof(*fde)) {
+			if (!*fde || (*fde & (sizeof(*fde) - 1)))
+				break;
+			if (!fde[1])
+				continue; /* this is a CIE */
+			if ((fde[1] & (sizeof(*fde) - 1))
+			    || fde[1] > (unsigned long)(fde + 1)
+			                - (unsigned long)unwind_table)
+				continue; /* this is not a valid FDE */
+			cie = fde + 1 - fde[1] / sizeof(*fde);
+			if (*cie <= sizeof(*cie) + 4
+			    || *cie >= fde[1] - sizeof(*fde)
+			    || (*cie & (sizeof(*cie) - 1))
+			    || cie[1]
+			    || (ptrType = fde_pointer_type(cie)) < 0) {
+				cie = NULL; /* this is not a (valid) CIE */
+				continue;
+			}
+			ptr = (const u8 *)(fde + 2);
+			startLoc = read_pointer(&ptr,
+			                        (const u8 *)(fde + 1) + *fde,
+			                        ptrType);
+			endLoc = startLoc
+			         + read_pointer(&ptr,
+			                        (const u8 *)(fde + 1) + *fde,
+			                        ptrType & DW_EH_PE_indirect
+			                        ? ptrType
+			                        : ptrType & (DW_EH_PE_FORM|DW_EH_PE_signed));
+			if (UNW_PC(frame) >= startLoc && UNW_PC(frame) < endLoc)
+				break;
+			cie = NULL;
+		}
+	}
+	if (cie != NULL) {
+		memset(&state, 0, sizeof(state));
+		state.cieEnd = ptr; /* keep here temporarily */
+		ptr = (const u8 *)(cie + 2);
+		end = (const u8 *)(cie + 1) + *cie;
+		if ((state.version = *ptr) != 1)
+			cie = NULL; /* unsupported version */
+		else if (*++ptr) {
+			/* check if augmentation size is first (and thus present) */
+			if (*ptr == 'z') {
+				/* check for ignorable (or already handled)
+				 * nul-terminated augmentation string */
+				while (++ptr < end && *ptr)
+					if (strchr("LPR", *ptr) == NULL)
+						break;
+			}
+			if (ptr >= end || *ptr)
+				cie = NULL;
+		}
+		++ptr;
+	}
+	if (cie != NULL) {
+		/* get code aligment factor */
+		state.codeAlign = get_uleb128(&ptr, end);
+		/* get data aligment factor */
+		state.dataAlign = get_sleb128(&ptr, end);
+		if (state.codeAlign == 0 || state.dataAlign == 0 || ptr >= end)
+			cie = NULL;
+		else {
+			retAddrReg = state.version <= 1 ? *ptr++ : get_uleb128(&ptr, end);
+			/* skip augmentation */
+			if (((const char *)(cie + 2))[1] == 'z')
+				ptr += get_uleb128(&ptr, end);
+			if (ptr > end
+			   || retAddrReg >= ARRAY_SIZE(reg_info)
+			   || REG_INVALID(retAddrReg)
+			   || reg_info[retAddrReg].width != sizeof(unsigned long))
+				cie = NULL;
+		}
+	}
+	if (cie != NULL) {
+		state.cieStart = ptr;
+		ptr = state.cieEnd;
+		state.cieEnd = end;
+		end = (const u8 *)(fde + 1) + *fde;
+		/* skip augmentation */
+		if (((const char *)(cie + 2))[1] == 'z') {
+			uleb128_t augSize = get_uleb128(&ptr, end);
+
+			if ((ptr += augSize) > end)
+				fde = NULL;
+		}
+	}
+	if (cie == NULL || fde == NULL)
+		return -ENXIO;
+
+	state.org = startLoc;
+	memcpy(&state.cfa, &badCFA, sizeof(state.cfa));
+	/* process instructions */
+	if (!processCFI(ptr, end, UNW_PC(frame), ptrType, &state)
+	   || state.loc > endLoc
+	   || state.regs[retAddrReg].where == Nowhere
+	   || state.cfa.reg >= ARRAY_SIZE(reg_info)
+	   || reg_info[state.cfa.reg].width != sizeof(unsigned long)
+	   || state.cfa.offs % sizeof(unsigned long)) {
+		return -EIO;
+		}
+	/* update frame */
+	cfa = FRAME_REG(state.cfa.reg, unsigned long) + state.cfa.offs;
+	startLoc = min((unsigned long)UNW_SP(frame), cfa);
+	endLoc = max((unsigned long)UNW_SP(frame), cfa);
+	if (STACK_LIMIT(startLoc) != STACK_LIMIT(endLoc)) {
+		startLoc = min(STACK_LIMIT(cfa), cfa);
+		endLoc = max(STACK_LIMIT(cfa), cfa);
+	}
+#ifndef CONFIG_64BIT
+# define CASES CASE(8); CASE(16); CASE(32)
+#else
+# define CASES CASE(8); CASE(16); CASE(32); CASE(64)
+#endif
+	for (i = 0; i < ARRAY_SIZE(state.regs); ++i) {
+		if (REG_INVALID(i)) {
+			if (state.regs[i].where == Nowhere)
+				continue;
+			return -EIO;
+		}
+		switch(state.regs[i].where) {
+		default:
+			break;
+		case Register:
+			if (state.regs[i].value >= ARRAY_SIZE(reg_info)
+			   || REG_INVALID(state.regs[i].value)
+			   || reg_info[i].width > reg_info[state.regs[i].value].width){
+				return -EIO;
+	}
+			switch(reg_info[state.regs[i].value].width) {
+#define CASE(n) \
+			case sizeof(u##n): \
+				state.regs[i].value = FRAME_REG(state.regs[i].value, \
+				                                const u##n); \
+				break
+			CASES;
+#undef CASE
+			default:
+				return -EIO;
+			}
+			break;
+		}
+	}
+	for (i = 0; i < ARRAY_SIZE(state.regs); ++i) {
+		if (REG_INVALID(i))
+			continue;
+		switch(state.regs[i].where) {
+		case Nowhere:
+			if (reg_info[i].width != sizeof(UNW_SP(frame))
+			   || &FRAME_REG(i, __typeof__(UNW_SP(frame)))
+			      != &UNW_SP(frame))
+				continue;
+			UNW_SP(frame) = cfa;
+			break;
+		case Register:
+			switch(reg_info[i].width) {
+#define CASE(n) case sizeof(u##n): \
+				FRAME_REG(i, u##n) = state.regs[i].value; \
+				break
+			CASES;
+#undef CASE
+			default:
+				return -EIO;
+			}
+			break;
+		case Value:
+			if (reg_info[i].width != sizeof(unsigned long)){
+				return -EIO;}
+			FRAME_REG(i, unsigned long) = cfa + state.regs[i].value
+			                                    * state.dataAlign;
+			break;
+		case Memory: {
+				unsigned long addr = cfa + state.regs[i].value
+				                           * state.dataAlign;
+				if ((state.regs[i].value * state.dataAlign)
+				    % sizeof(unsigned long)
+				    || addr < startLoc
+				    || addr + sizeof(unsigned long) < addr
+				    || addr + sizeof(unsigned long) > endLoc){
+					return -EIO;}
+				switch(reg_info[i].width) {
+#define CASE(n)     case sizeof(u##n): \
+					readmem(addr, KVADDR, &reg_ptr,sizeof(u##n), "register", RETURN_ON_ERROR|QUIET); \
+					FRAME_REG(i, u##n) = (u##n)reg_ptr;\
+					break
+				CASES;
+#undef CASE
+				default:
+					return -EIO;
+				}
+			}
+			break;
+		}
+	}
+	return 0;
+#undef CASES
+#undef FRAME_REG
+}
+
+/*
+ *  Initialize the unwind table(s) in the best-case order:
+ *
+ *   1. Use the in-memory kernel and module unwind tables.
+ *   2. Use the in-memory kernel-only .eh_frame data. (possible?)
+ *   3. Use the kernel-only .eh_frame data from the vmlinux file.
+ */ 
+void 
+init_unwind_table(void)
+{
+	ulong unwind_table_size;
+	void *unwind_table;
+
+	kt->flags &= ~DWARF_UNWIND;
+
+	if (gather_in_memory_unwind_tables()) {
+                if (CRASHDEBUG(1))
+                        fprintf(fp, "init_unwind_table: DWARF_UNWIND_MEMORY (%d tables)\n",
+				unwind_tables_cnt);
+
+                kt->flags |= DWARF_UNWIND_MEMORY;
+		if (unwind_tables_cnt > 1)
+                	kt->flags |= DWARF_UNWIND_MODULES;
+                if (!(kt->flags & NO_DWARF_UNWIND))
+                        kt->flags |= DWARF_UNWIND;
+
+		return;
+	}
+
+	if (symbol_exists("__start_unwind") &&
+	    symbol_exists("__end_unwind")) {
+		unwind_table_size = symbol_value("__end_unwind") - 
+			symbol_value("__start_unwind");
+
+		if (!(unwind_table = malloc(unwind_table_size))) {
+			error(WARNING, "cannot malloc unwind table space\n");
+			goto try_eh_frame;
+		}
+
+		if (!readmem(symbol_value("__start_unwind"), KVADDR, unwind_table,
+            	    unwind_table_size, "unwind table", RETURN_ON_ERROR)) {
+			error(WARNING, "cannot read unwind table data\n");
+			free(unwind_table);
+			goto try_eh_frame;
+		}
+
+		kt->flags |= DWARF_UNWIND_MEMORY;
+		if (!(kt->flags & NO_DWARF_UNWIND))
+			kt->flags |= DWARF_UNWIND;
+
+		default_unwind_table.size = unwind_table_size;
+		default_unwind_table.address = unwind_table;
+
+		if (CRASHDEBUG(1)) 
+			fprintf(fp, "init_unwind_table: DWARF_UNWIND_MEMORY\n");
+
+		return;
+	}
+
+try_eh_frame:
+
+	if (st->dwarf_eh_frame_size) {
+		int fd;
+
+		unwind_table_size = st->dwarf_eh_frame_size;
+
+		if (!(unwind_table = malloc(unwind_table_size))) {
+			error(WARNING, "cannot malloc unwind table space\n");
+			return;
+		}
+
+		if ((fd = open(pc->namelist, O_RDONLY)) < 0) {
+			error(WARNING, "cannot open %s for .eh_frame data\n",
+				pc->namelist);
+			free(unwind_table);
+			return;
+		}
+
+		lseek(fd, st->dwarf_eh_frame_file_offset, SEEK_SET);
+
+		if (read(fd, unwind_table, st->dwarf_eh_frame_size) !=
+		    st->dwarf_eh_frame_size) {
+			error(WARNING, "cannot read .eh_frame data from %s\n",
+				pc->namelist);
+			free(unwind_table);
+			return;
+		}
+
+		close(fd);
+
+		default_unwind_table.size = unwind_table_size;
+		default_unwind_table.address = unwind_table;
+
+		kt->flags |= DWARF_UNWIND_EH_FRAME;
+		if (!(kt->flags & NO_DWARF_UNWIND))
+			kt->flags |= DWARF_UNWIND;
+
+		if (CRASHDEBUG(1)) 
+			fprintf(fp, "init_unwind_table: DWARF_UNWIND_EH_FRAME\n");
+
+		return;
+	}
+}
+
+/*
+ *  Find the appropriate kernel-only "root_table" unwind_table,
+ *  and pass it to populate_local_tables() to do the heavy lifting.
+ */
+static int 
+gather_in_memory_unwind_tables(void)
+{
+	int i, cnt, found;
+	struct syment *sp, *root_tables[10];
+	char *root_table_buf;
+	char buf[BUFSIZE];
+	ulong name;
+
+	STRUCT_SIZE_INIT(unwind_table, "unwind_table");
+	MEMBER_OFFSET_INIT(unwind_table_core, "unwind_table", "core");
+	MEMBER_OFFSET_INIT(unwind_table_init, "unwind_table", "init");
+	MEMBER_OFFSET_INIT(unwind_table_address, "unwind_table", "address");
+	MEMBER_OFFSET_INIT(unwind_table_size, "unwind_table", "size");
+	MEMBER_OFFSET_INIT(unwind_table_link, "unwind_table", "link");
+	MEMBER_OFFSET_INIT(unwind_table_name, "unwind_table", "name");
+
+	if (INVALID_SIZE(unwind_table) ||
+	    INVALID_MEMBER(unwind_table_core) ||
+	    INVALID_MEMBER(unwind_table_init) ||
+	    INVALID_MEMBER(unwind_table_address) ||
+	    INVALID_MEMBER(unwind_table_size) ||
+	    INVALID_MEMBER(unwind_table_link) ||
+	    INVALID_MEMBER(unwind_table_name)) {
+		if (CRASHDEBUG(1)) 
+			error(NOTE, 
+	    "unwind_table structure has changed, or does not exist in this kernel\n");
+		return 0;
+	}
+
+	/*
+	 *  Unfortunately there are two kernel root_table symbols.
+	 */
+	if (!(cnt = get_syment_array("root_table", root_tables, 10)))
+		return 0;
+
+	root_table_buf = GETBUF(SIZE(unwind_table));
+	for (i = found = 0; i < cnt; i++) {
+		sp = root_tables[i];
+		if (!readmem(sp->value, KVADDR, root_table_buf,
+                    SIZE(unwind_table), "root unwind_table", 
+		    RETURN_ON_ERROR|QUIET))
+			goto gather_failed;
+
+		name = ULONG(root_table_buf + OFFSET(unwind_table_name));
+		if (read_string(name, buf, strlen("kernel")+1) && 
+		    STREQ("kernel", buf)) {
+			found++;
+			if (CRASHDEBUG(1))
+				fprintf(fp, "root_table name: %lx [%s]\n", 
+					name, buf);
+			break;
+		}
+	}
+
+	if (!found)
+		goto gather_failed;
+
+	cnt = populate_local_tables(sp->value, root_table_buf);
+
+	FREEBUF(root_table_buf);
+	return cnt;
+
+gather_failed:
+
+	FREEBUF(root_table_buf);
+	return 0;
+}
+
+/*
+ *  Transfer the relevant data from the kernel and module unwind_table
+ *  structures to the local_unwind_table structures.
+ */
+static int
+populate_local_tables(ulong root, char *buf)
+{
+	struct list_data list_data, *ld;
+	int i, cnt;
+	ulong *table_list;
+	ulong vaddr;
+	struct local_unwind_table *tp;
+
+        ld = &list_data;
+        BZERO(ld, sizeof(struct list_data));
+        ld->start = root;
+        ld->member_offset = OFFSET(unwind_table_link);
+	if (CRASHDEBUG(1))
+        	ld->flags |= VERBOSE;
+
+	hq_open();
+        cnt = do_list(ld);
+        table_list = (ulong *)GETBUF(cnt * sizeof(ulong));
+	cnt = retrieve_list(table_list, cnt);
+	hq_close();
+
+	if (!(local_unwind_tables = 
+	    malloc(sizeof(struct local_unwind_table) * cnt))) {
+		error(WARNING, "cannot malloc unwind_table space (%d tables)\n",
+			cnt);
+		FREEBUF(table_list);
+		return 0;
+	}
+
+	for (i = 0; i < cnt; i++, tp++) {
+
+                if (!readmem(table_list[i], KVADDR, buf,
+                    SIZE(unwind_table), "unwind_table",
+                    RETURN_ON_ERROR|QUIET)) {
+			error(WARNING, "cannot read unwind_table\n");
+			goto failed;
+		}
+
+		tp = &local_unwind_tables[i];
+
+		/*
+		 *  Copy the required table info for find_table().
+		 */
+        	BCOPY(buf + OFFSET(unwind_table_core),
+                	(char *)&tp->core.pc, sizeof(ulong)*2);
+        	BCOPY(buf + OFFSET(unwind_table_init),
+                	(char *)&tp->init.pc, sizeof(ulong)*2);
+        	BCOPY(buf + OFFSET(unwind_table_size),
+                	(char *)&tp->size, sizeof(ulong));
+
+		/*
+		 *  Then read the DWARF CFI data.
+		 */
+		vaddr = ULONG(buf + OFFSET(unwind_table_address));
+
+		if (!(tp->address = malloc(tp->size))) {
+			error(WARNING, "cannot malloc unwind_table space\n");
+			goto failed;
+			break;
+		}
+                if (!readmem(vaddr, KVADDR, tp->address,
+                    tp->size, "DWARF CFI data", RETURN_ON_ERROR|QUIET)) {
+			error(WARNING, "cannot read unwind_table data\n");
+			goto failed;
+		}
+	}
+
+	unwind_tables_cnt = cnt;
+
+	if (CRASHDEBUG(7))
+		dump_local_unwind_tables();
+
+failed:
+
+	FREEBUF(table_list);
+	return unwind_tables_cnt;
+}
+
+/*
+ *  Find the unwind_table containing a pc.
+ */
+static struct local_unwind_table *
+find_table(unsigned long pc)
+{
+	int i;
+	struct local_unwind_table *tp, *table;
+
+	table = &default_unwind_table;
+
+        for (i = 0; i < unwind_tables_cnt; i++, tp++) {
+		tp = &local_unwind_tables[i];
+                if ((pc >= tp->core.pc
+                    && pc < tp->core.pc + tp->core.range)
+                    || (pc >= tp->init.pc
+                    && pc < tp->init.pc + tp->init.range)) {
+			table = tp;
+                        break;
+		}
+	}
+
+        return table;
+}
+
+static void 
+dump_local_unwind_tables(void)
+{
+	int i, others; 
+	struct local_unwind_table *tp;
+
+	others = 0;
+	fprintf(fp, "DWARF flags: (");
+        if (kt->flags & DWARF_UNWIND)
+                fprintf(fp, "%sDWARF_UNWIND", others++ ? "|" : "");
+        if (kt->flags & NO_DWARF_UNWIND)
+                fprintf(fp, "%sNO_DWARF_UNWIND", others++ ? "|" : "");
+        if (kt->flags & DWARF_UNWIND_MEMORY)
+                fprintf(fp, "%sDWARF_UNWIND_MEMORY", others++ ? "|" : "");
+        if (kt->flags & DWARF_UNWIND_EH_FRAME)
+                fprintf(fp, "%sDWARF_UNWIND_EH_FRAME", others++ ? "|" : "");
+        if (kt->flags & DWARF_UNWIND_MODULES)
+                fprintf(fp, "%sDWARF_UNWIND_MODULES", others++ ? "|" : "");
+	fprintf(fp, ")\n\n");
+
+	fprintf(fp, "default_unwind_table:\n");
+	fprintf(fp, "      address: %lx\n",
+		(ulong)default_unwind_table.address);
+	fprintf(fp, "         size: %ld\n\n",
+		(ulong)default_unwind_table.size);
+
+	fprintf(fp, "local_unwind_tables[%d]:\n", unwind_tables_cnt);
+        for (i = 0; i < unwind_tables_cnt; i++, tp++) {
+		tp = &local_unwind_tables[i];
+		fprintf(fp, "[%d]\n", i);
+		fprintf(fp, "         core: pc: %lx\n", tp->core.pc);
+		fprintf(fp, "        range: %ld\n", tp->core.range);
+		fprintf(fp, "     init: pc: %lx\n", tp->init.pc);
+		fprintf(fp, "        range: %ld\n", tp->init.range);
+		fprintf(fp, "      address: %lx\n", (ulong)tp->address);
+		fprintf(fp, "         size: %ld\n", tp->size);
+	}
+}
+
+
+int 
+dwarf_backtrace(struct bt_info *bt, int level, ulong stacktop)
+{
+	unsigned long bp, offset;
+	struct syment *sp;
+	char *name;
+	struct unwind_frame_info *frame;
+
+	frame = (struct unwind_frame_info *)GETBUF(sizeof(struct unwind_frame_info));
+//	frame->regs.rsp = bt->stkptr;
+//	frame->regs.rip = bt->instptr;
+	UNW_SP(frame) = bt->stkptr;
+	UNW_PC(frame) = bt->instptr;
+
+	/* read rbp from stack for non active tasks */
+	if (!(bt->flags & BT_DUMPFILE_SEARCH) && !bt->bptr) {
+//		readmem(frame->regs.rsp, KVADDR, &bp,
+		readmem(UNW_SP(frame), KVADDR, &bp,
+	                sizeof(unsigned long), "reading bp", FAULT_ON_ERROR);
+		frame->regs.rbp = bp;  /* fixme for x86 */
+	}
+
+	sp = value_search(UNW_PC(frame), &offset);
+	if (!sp) {
+		if (CRASHDEBUG(1))
+		    fprintf(fp, "unwind: cannot find symbol for PC: %lx\n", 
+			UNW_PC(frame));
+		goto bailout;
+	}
+
+	/*
+	 * If offset is zero, it means we have crossed over to the next
+	 *  function. Recalculate by adjusting the text address
+	 */
+	if (!offset) {
+		sp = value_search(UNW_PC(frame) - 1, &offset);
+		if (!sp) {
+			if (CRASHDEBUG(1))
+				fprintf(fp, 
+				    "unwind: cannot find symbol for PC: %lx\n",
+					UNW_PC(frame)-1);
+			goto bailout;
+		}
+	}
+		
+
+
+        name = sp->name;
+	fprintf(fp, " #%d [%016lx] %s at %016lx \n", level, UNW_SP(frame), name, UNW_PC(frame));
+
+	if (CRASHDEBUG(2))
+		fprintf(fp, "    < SP: %lx PC: %lx FP: %lx >\n", UNW_SP(frame), 
+			UNW_PC(frame), frame->regs.rbp);
+
+       	while ((UNW_SP(frame) < stacktop)
+				&& !unwind(frame) && UNW_PC(frame)) {
+		/* To prevent rip pushed on IRQ stack being reported both
+		 * both on the IRQ and process stacks
+		 */
+		if ((bt->flags & BT_IRQSTACK) && (UNW_SP(frame) >= stacktop - 16))
+			break;
+               	level++;
+		sp = value_search(UNW_PC(frame), &offset);
+		if (!sp) {
+			if (CRASHDEBUG(1))
+				fprintf(fp, 
+				    "unwind: cannot find symbol for PC: %lx\n",
+					UNW_PC(frame));
+			break;
+		}
+
+		/*
+		 * If offset is zero, it means we have crossed over to the next
+		 *  function. Recalculate by adjusting the text address
+		 */
+		if (!offset) {
+			sp = value_search(UNW_PC(frame) - 1, &offset);
+			if (!sp) {
+				if (CRASHDEBUG(1))
+					fprintf(fp,
+					    "unwind: cannot find symbol for PC: %lx\n",
+						UNW_PC(frame)-1);
+				goto bailout;
+			}
+		}
+	        name = sp->name;
+		fprintf(fp, "%s#%d [%016lx] %s at %016lx \n", level < 10 ? " " : "",
+			level, UNW_SP(frame), name, UNW_PC(frame));
+
+		if (CRASHDEBUG(2))
+			fprintf(fp, "    < SP: %lx PC: %lx FP: %lx >\n", UNW_SP(frame), 
+				UNW_PC(frame), frame->regs.rbp);
+       	}
+
+bailout:
+	FREEBUF(frame);
+	return ++level;
+}
+
+int 
+dwarf_print_stack_entry(struct bt_info *bt, int level)
+{
+	unsigned long offset;
+	struct syment *sp;
+	char *name;
+	struct unwind_frame_info *frame;
+
+	frame = (struct unwind_frame_info *)GETBUF(sizeof(struct unwind_frame_info));
+	UNW_SP(frame) = bt->stkptr;
+	UNW_PC(frame) = bt->instptr;
+
+	sp = value_search(UNW_PC(frame), &offset);
+	if (!sp) {
+		if (CRASHDEBUG(1))
+		    fprintf(fp, "unwind: cannot find symbol for PC: %lx\n",
+			UNW_PC(frame));
+		goto bailout;
+	}
+
+	/*
+	 * If offset is zero, it means we have crossed over to the next
+	 *  function. Recalculate by adjusting the text address
+	 */
+	if (!offset) {
+		sp = value_search(UNW_PC(frame) - 1, &offset);
+		if (!sp) {
+			if (CRASHDEBUG(1))
+				fprintf(fp,
+				    "unwind: cannot find symbol for PC: %lx\n",
+					UNW_PC(frame)-1);
+			goto bailout;
+		}
+	}
+        name = sp->name;
+	fprintf(fp, " #%d [%016lx] %s at %016lx \n", level, UNW_SP(frame), name, UNW_PC(frame));
+
+bailout:
+	FREEBUF(frame);
+	return level;
+}
+
+void
+dwarf_debug(struct bt_info *bt)
+{
+	struct unwind_frame_info *frame;
+	ulong bp;
+
+	if (!bt->hp->eip) {
+		dump_local_unwind_tables();
+		return;
+	}
+
+	if (!(kt->flags & DWARF_UNWIND_CAPABLE)) {
+		error(INFO, "not DWARF capable\n");
+		return;
+	}
+
+        frame = (struct unwind_frame_info *)GETBUF(sizeof(struct unwind_frame_info));
+
+	/*
+	 *  XXX: This only works for the first PC/SP pair seen in a normal
+	 *  backtrace, so it's not particularly helpful.  Ideally it should
+         *  be capable to take any PC/SP pair in a stack, but it appears to
+	 *  related to the rbp value. 
+	 */
+
+	UNW_PC(frame) = bt->hp->eip;
+	UNW_SP(frame) = bt->hp->esp;
+
+        readmem(UNW_SP(frame), KVADDR, &bp,
+ 		sizeof(unsigned long), "reading bp", FAULT_ON_ERROR);
+        frame->regs.rbp = bp;  /* fixme for x86 */
+
+	unwind(frame);
+
+	fprintf(fp, "frame size: %lx (%lx)\n", 
+		(ulong)UNW_SP(frame), (ulong)UNW_SP(frame) - bt->hp->esp);
+
+	FREEBUF(frame);
+}
+
+
+#endif 
--- crash/lkcd_vmdump_v2_v3.h.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/lkcd_vmdump_v2_v3.h	2008-02-20 12:12:46.000000000 -0500
@@ -1,8 +1,8 @@
 /* lkcd_vmdump_v2_v3.h - core analysis suite
  *
  * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc.
- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson
- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006 David Anderson
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006 Red Hat, Inc. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -51,6 +51,7 @@
 #define _ASM_VMDUMP_H
 
 /* necessary header files */
+typedef unsigned int u32;
 #include <asm/ptrace.h>                          /* for pt_regs             */
 
 /* definitions */
@@ -81,7 +82,11 @@
 	uint32_t             dha_eip;
 
 	/* the dump registers */
+#ifndef S390
+#ifndef S390X
 	struct pt_regs       dha_regs;
+#endif
+#endif
 
 } dump_header_asm_t;
 
@@ -97,6 +102,7 @@
  */
 
 #ifndef IA64
+typedef unsigned int u32;
 #include <asm/ptrace.h>                          /* for pt_regs             */
 #endif
 
--- crash/defs.h.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/defs.h	2009-02-05 10:43:34.000000000 -0500
@@ -1,8 +1,8 @@
 /* defs.h - core analysis suite
  *
  * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc.
- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson
- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 David Anderson
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 Red Hat, Inc. All rights reserved.
  * Copyright (C) 2002 Silicon Graphics, Inc.
  *
  * This program is free software; you can redistribute it and/or modify
@@ -46,6 +46,7 @@
 #include <sys/param.h>
 #include <sys/wait.h>
 #include <sys/time.h>
+#include <execinfo.h> /* backtrace() */
 
 #define BASELEVEL_REVISION  "4.0"
 
@@ -54,12 +55,16 @@
 
 #define TRUE  (1)
 #define FALSE (0)
+#define STR(x)	#x
+#ifndef offsetof
+#  define offsetof(TYPE, MEMBER) ((ulong)&((TYPE *)0)->MEMBER)
+#endif
 
 #ifdef X86
-#define NR_CPUS  (32)
+#define NR_CPUS  (256)
 #endif
 #ifdef X86_64
-#define NR_CPUS  (32)
+#define NR_CPUS  (512)
 #endif
 #ifdef ALPHA
 #define NR_CPUS  (64)
@@ -68,10 +73,10 @@
 #define NR_CPUS  (32)
 #endif
 #ifdef IA64
-#define NR_CPUS  (512)
+#define NR_CPUS  (4096)
 #endif
 #ifdef PPC64
-#define NR_CPUS  (128)
+#define NR_CPUS  (1024)
 #endif
 #ifdef S390
 #define NR_CPUS  (64)
@@ -88,8 +93,10 @@
 
 #define HIST_BLKSIZE  (4096)
 
-#define STREQ(A, B)      (A && B && (strcmp((char *)(A), (char *)(B)) == 0))
-#define STRNEQ(A, B)     (A && B && \
+static inline int string_exists(char *s) { return (s ? TRUE : FALSE); }
+#define STREQ(A, B)      (string_exists((char *)A) && string_exists((char *)B) && \
+	(strcmp((char *)(A), (char *)(B)) == 0))
+#define STRNEQ(A, B)     (string_exists((char *)A) && string_exists((char *)B) && \
         (strncmp((char *)(A), (char *)(B), strlen((char *)(B))) == 0))
 #define BZERO(S, N)      (memset(S, NULLCHAR, N))
 #define BCOPY(S, D, C)   (memcpy(D, S, C))
@@ -98,6 +105,7 @@
 #define LASTCHAR(s)      (s[strlen(s)-1])
 #define FIRSTCHAR(s)     (s[0])
 #define QUOTED_STRING(s) ((FIRSTCHAR(s) == '"') && (LASTCHAR(s) == '"'))
+#define PATHEQ(A, B)     ((A) && (B) && (pathcmp((char *)(A), (char *)(B)) == 0))
 
 #ifdef roundup
 #undef roundup
@@ -106,6 +114,8 @@
 
 typedef uint64_t physaddr_t;
 
+#define PADDR_NOT_AVAILABLE (0x1ULL)
+
 typedef unsigned long long int ulonglong;
 struct number_option {
         ulong num;
@@ -155,8 +165,8 @@
 #define UNLINK_MODULES     (0x1000000000ULL)
 #define S390D              (0x2000000000ULL)
 #define REM_S390D          (0x4000000000ULL)
-#define PC_UNUSED_1        (0x8000000000ULL)
-#define PC_UNUSED_2       (0x10000000000ULL)
+#define SYSRQ              (0x8000000000ULL)
+#define KDUMP             (0x10000000000ULL)
 #define NETDUMP           (0x20000000000ULL)
 #define REM_NETDUMP       (0x40000000000ULL)
 #define SYSMAP            (0x80000000000ULL)
@@ -169,11 +179,20 @@
 #define VERSION_QUERY   (0x4000000000000ULL)
 #define READNOW         (0x8000000000000ULL)
 #define NOCRASHRC      (0x10000000000000ULL)
+#define INIT_IFILE     (0x20000000000000ULL)
+#define XENDUMP        (0x40000000000000ULL)
+#define XEN_HYPER      (0x80000000000000ULL)
+#define XEN_CORE      (0x100000000000000ULL)
+#define PLEASE_WAIT   (0x200000000000000ULL)
+#define IFILE_ERROR   (0x400000000000000ULL)
+#define KERNTYPES     (0x800000000000000ULL)
+#define MINIMAL_MODE (0x1000000000000000ULL)
+#define CRASHBUILTIN (0x2000000000000000ULL)
 
 #define ACTIVE()            (pc->flags & LIVE_SYSTEM)
 #define DUMPFILE()          (!(pc->flags & LIVE_SYSTEM))
-#define MEMORY_SOURCES (NETDUMP|MCLXCD|LKCD|DEVMEM|S390D|MEMMOD|DISKDUMP)
-#define DUMPFILE_TYPES      (DISKDUMP|NETDUMP|MCLXCD|LKCD|S390D)
+#define MEMORY_SOURCES (NETDUMP|KDUMP|MCLXCD|LKCD|DEVMEM|S390D|MEMMOD|DISKDUMP|XENDUMP|CRASHBUILTIN)
+#define DUMPFILE_TYPES      (DISKDUMP|NETDUMP|KDUMP|MCLXCD|LKCD|S390D|XENDUMP)
 #define REMOTE()            (pc->flags & REMOTE_DAEMON)
 #define REMOTE_ACTIVE()     (pc->flags & REM_LIVE_SYSTEM) 
 #define REMOTE_DUMPFILE() \
@@ -182,16 +201,35 @@
 #define LKCD_DUMPFILE()     (pc->flags & (LKCD|REM_LKCD))
 #define NETDUMP_DUMPFILE()  (pc->flags & (NETDUMP|REM_NETDUMP))
 #define DISKDUMP_DUMPFILE() (pc->flags & DISKDUMP)
+#define KDUMP_DUMPFILE()    (pc->flags & KDUMP)
+#define XENDUMP_DUMPFILE()  (pc->flags & XENDUMP)
+#define XEN_HYPER_MODE()    (pc->flags & XEN_HYPER)
+#define SYSRQ_TASK(X)       ((pc->flags & SYSRQ) && is_task_active(X))
+#define XEN_CORE_DUMPFILE() (pc->flags & XEN_CORE)
+#define LKCD_KERNTYPES()    (pc->flags & KERNTYPES)
 
 #define NETDUMP_LOCAL    (0x1)  /* netdump_data flags */
 #define NETDUMP_REMOTE   (0x2)  
-#define NETDUMP_VALID()  (nd->flags & (NETDUMP_LOCAL|NETDUMP_REMOTE))
+#define VMCORE_VALID()   (nd->flags & (NETDUMP_LOCAL|NETDUMP_REMOTE|KDUMP_LOCAL))
 #define NETDUMP_ELF32    (0x4)
 #define NETDUMP_ELF64    (0x8)
 #define PARTIAL_DUMP    (0x10)  /* netdump or diskdump */
+#define KDUMP_ELF32     (0x20)
+#define KDUMP_ELF64     (0x40)
+#define KDUMP_LOCAL     (0x80)  
+
+#define DUMPFILE_FORMAT(flags) ((flags) & \
+		        (NETDUMP_ELF32|NETDUMP_ELF64|KDUMP_ELF32|KDUMP_ELF64))
+
+#define DISKDUMP_LOCAL      (0x1)
+#define KDUMP_CMPRS_LOCAL   (0x2)
+#define ERROR_EXCLUDED      (0x4)
+#define ZERO_EXCLUDED       (0x8)
+#define DISKDUMP_VALID()    (dd->flags & DISKDUMP_LOCAL)
+#define KDUMP_CMPRS_VALID() (dd->flags & KDUMP_CMPRS_LOCAL)
 
-#define DISKDUMP_LOCAL   (0x1)
-#define DISKDUMP_VALID() (dd->flags & DISKDUMP_LOCAL)
+#define XENDUMP_LOCAL    (0x1)
+#define XENDUMP_VALID()  (xd->flags & XENDUMP_LOCAL)
 
 #define CRASHDEBUG(x) (pc->debug >= (x))
 
@@ -210,6 +248,7 @@
 #define SEEK_ERROR       (-1)
 #define READ_ERROR       (-2)
 #define WRITE_ERROR      (-3)
+#define PAGE_EXCLUDED    (-4)
 
 #define RESTART()         (longjmp(pc->main_loop_env, 1))
 #define RESUME_FOREACH()  (longjmp(pc->foreach_loop_env, 1))
@@ -319,15 +358,28 @@
 #define SCROLL_NONE 0
 #define SCROLL_LESS 1
 #define SCROLL_MORE 2
+#define SCROLL_CRASHPAGER 3
 	ulong redirect;			/* per-cmd origin and output flags */
 	pid_t stdpipe_pid;              /* per-cmd standard output pipe's pid */
 	pid_t pipe_pid;                 /* per-cmd output pipe's pid */
 	pid_t pipe_shell_pid;           /* per-cmd output pipe's shell pid */
 	char pipe_command[BUFSIZE];     /* pipe command line */
+	struct command_table_entry *cmd_table;	/* linux/xen command table */
 	char *curcmd;                   /* currently-executing command */
 	char *lastcmd;                  /* previously-executed command */
 	ulong cmdgencur;		/* current command generation number */
-	ulong cmdgenspec;		/* specified command generation num */
+	ulong curcmd_flags;		/* general purpose per-command flag */
+#define XEN_MACHINE_ADDR    (0x1)
+#define REPEAT              (0x2)
+#define IDLE_TASK_SHOWN     (0x4)
+#define TASK_SPECIFIED      (0x8)
+#define MEMTYPE_UVADDR     (0x10)
+#define MEMTYPE_FILEADDR   (0x20)
+#define HEADER_PRINTED     (0x40)
+#define BAD_INSTRUCTION    (0x80)
+#define UD2A_INSTRUCTION  (0x100)
+#define IRQ_IN_USE        (0x200)
+	ulonglong curcmd_private;	/* general purpose per-command info */
 	int cur_gdb_cmd;                /* current gdb command */
 	int last_gdb_cmd;               /* previously-executed gdb command */
 	int sigint_cnt;                 /* number of ignored SIGINTs */
@@ -347,11 +399,11 @@
 	struct extension_table *curext; /* extension being loaded */
         int (*readmem)(int, void *, int, ulong, physaddr_t); /* memory access */
         int (*writemem)(int, void *, int, ulong, physaddr_t);/* memory access */
+	ulong ifile_in_progress;        /* original xxx_IFILE flags */
+	off_t ifile_offset;             /* current offset into input file */
+	char *runtime_ifile_cmd;        /* runtime command using input file */
 };
 
-#define UNIQUE_COMMAND(s) \
-	(STREQ(pc->curcmd, s) && (pc->cmdgencur == pc->cmdgenspec))
-
 #define READMEM  pc->readmem
 
 typedef void (*cmd_func_t)(void);
@@ -365,6 +417,7 @@
 
 #define REFRESH_TASK_TABLE (0x1)           /* command_table_entry flags */
 #define HIDDEN_COMMAND     (0x2)
+#define CLEANUP            (0x4)           /* for extensions only */
 
 /*
  *  A linked list of extension table structures keeps track of the current
@@ -407,9 +460,36 @@
 #define KALLSYMS_V2   (0x2000)
 #define TVEC_BASES_V2 (0x4000)
 #define GCC_3_3_3     (0x8000)
+#define USE_OLD_BT   (0x10000)
+#define ARCH_XEN     (0x20000)
+#define NO_IKCONFIG  (0x40000)
+#define DWARF_UNWIND (0x80000)
+#define NO_DWARF_UNWIND       (0x100000)
+#define DWARF_UNWIND_MEMORY   (0x200000)
+#define DWARF_UNWIND_EH_FRAME (0x400000)
+#define DWARF_UNWIND_CAPABLE  (DWARF_UNWIND_MEMORY|DWARF_UNWIND_EH_FRAME)
+#define DWARF_UNWIND_MODULES  (0x800000)
+#define BUGVERBOSE_OFF       (0x1000000)
+#define RELOC_SET            (0x2000000)
+#define RELOC_FORCE          (0x4000000)
+#define ARCH_OPENVZ          (0x8000000)
+#define ARCH_PVOPS          (0x10000000)
 
 #define GCC_VERSION_DEPRECATED (GCC_3_2|GCC_3_2_3|GCC_2_96|GCC_3_3_2|GCC_3_3_3)
 
+#define XEN()    (kt->flags & ARCH_XEN)
+#define OPENVZ() (kt->flags & ARCH_OPENVZ)
+#define PVOPS()  (kt->flags & ARCH_PVOPS)
+
+#define XEN_MACHINE_TO_MFN(m)    ((ulonglong)(m) >> PAGESHIFT())
+#define XEN_PFN_TO_PSEUDO(p)     ((ulonglong)(p) << PAGESHIFT())
+
+#define XEN_MFN_NOT_FOUND        (~0UL)
+#define XEN_PFNS_PER_PAGE        (PAGESIZE()/sizeof(ulong))
+#define XEN_FOREIGN_FRAME        (1UL << (BITS()-1))
+
+#define XEN_MACHADDR_NOT_FOUND   (~0ULL) 
+
 struct kernel_table {                   /* kernel data */
 	ulong flags;
 	ulong stext;
@@ -420,6 +500,7 @@
 	ulong init_end;
 	ulong end;
 	int cpus;
+	char *cpus_override;
 	void (*display_bh)(void);
         ulong module_list;
         ulong kernel_module;
@@ -430,11 +511,39 @@
 	uint kernel_version[3];
 	uint gcc_version[3];
 	int runq_siblings;
-	long __rq_idx[NR_CPUS];
-	long __cpu_idx[NR_CPUS];
+	int kernel_NR_CPUS;
 	long __per_cpu_offset[NR_CPUS];
-	long cpu_flags[NR_CPUS];
-#define NMI 0x1
+	long *__rq_idx;
+	long *__cpu_idx;
+	ulong *cpu_flags;
+#define POSSIBLE  (0x1)
+#define PRESENT   (0x2)
+#define ONLINE    (0x4)
+#define NMI       (0x8)
+	int BUG_bytes;
+	ulong xen_flags;
+#define WRITABLE_PAGE_TABLES    (0x1)
+#define SHADOW_PAGE_TABLES      (0x2)
+#define CANONICAL_PAGE_TABLES   (0x4)
+#define XEN_SUSPEND             (0x8)
+	char *m2p_page;
+	ulong phys_to_machine_mapping;
+	ulong p2m_table_size;
+#define P2M_MAPPING_CACHE    (512)
+	struct p2m_mapping_cache {
+		ulong mapping;
+		ulong start;
+		ulong end;
+	} p2m_mapping_cache[P2M_MAPPING_CACHE];
+#define P2M_MAPPING_TO_PAGE_INDEX(c) \
+   (((kt->p2m_mapping_cache[c].mapping - kt->phys_to_machine_mapping)/PAGESIZE()) \
+    * XEN_PFNS_PER_PAGE)
+	ulong last_mapping_read;
+	ulong p2m_cache_index;
+	ulong p2m_pages_searched;
+	ulong p2m_mfn_cache_hits;
+	ulong p2m_page_cache_hits;
+	ulong relocate;
 };
 
 /*
@@ -493,14 +602,14 @@
 	ulong retries;
         ulong panicmsg;
         int panic_processor;
-        ulong idle_threads[NR_CPUS];
-        ulong panic_threads[NR_CPUS];
-	ulong panic_ksp[NR_CPUS];
-	ulong active_set[NR_CPUS];
-	ulong hardirq_ctx[NR_CPUS];
-	ulong hardirq_tasks[NR_CPUS];
-	ulong softirq_ctx[NR_CPUS];
-	ulong softirq_tasks[NR_CPUS];
+        ulong *idle_threads;
+        ulong *panic_threads;
+	ulong *active_set;
+	ulong *panic_ksp;
+	ulong *hardirq_ctx;
+	ulong *hardirq_tasks;
+	ulong *softirq_ctx;
+	ulong *softirq_tasks;
         ulong panic_task;
 	ulong this_task;
 	int pidhash_len;
@@ -511,6 +620,7 @@
 	char *task_struct;
 	char *thread_info;
 	char *mm_struct;
+	ulong init_pid_ns;
 };
 
 #define TASK_INIT_DONE       (0x1)
@@ -527,6 +637,7 @@
 #define IRQSTACKS          (0x800)
 #define TIMESPEC          (0x1000)
 #define NO_TIMESPEC       (0x2000)
+#define ACTIVE_ONLY       (0x4000)
 
 #define TASK_SLUSH (20)
 
@@ -578,6 +689,7 @@
         ulonglong flags;
         ulong instptr;
         ulong stkptr;
+	ulong bptr;
 	ulong stackbase;
 	ulong stacktop;
 	char *stackbuf;
@@ -602,6 +714,8 @@
     (void *)(&bt->stackbuf[(ulong)STACK_OFFSET_TYPE(OFF)]), (size_t)(SZ))
 
 struct machine_specific;  /* uniquely defined below each machine's area */
+struct xendump_data;
+struct xen_kdump_data;
 
 struct machdep_table {
 	ulong flags;
@@ -645,14 +759,24 @@
         	char **file;
 	} *line_number_hooks;
 	ulong last_pgd_read;
+	ulong last_pud_read;
 	ulong last_pmd_read;
 	ulong last_ptbl_read;
 	char *pgd;
+	char *pud;
  	char *pmd;	
 	char *ptbl;
 	int ptrs_per_pgd;
 	char *cmdline_arg;
 	struct machine_specific *machspec;
+	ulong section_size_bits;
+	ulong max_physmem_bits;
+	ulong sections_per_root;
+	int (*xendump_p2m_create)(struct xendump_data *);
+	ulong (*xendump_panic_task)(struct xendump_data *);
+	void (*get_xendump_regs)(struct xendump_data *, struct bt_info *, ulong *, ulong *);
+	void (*clear_machdep_cache)(void);
+	int (*xen_kdump_p2m_create)(struct xen_kdump_data *);
 };
 
 /*
@@ -660,19 +784,25 @@
  *  as defined in their processor-specific files below. (see KSYMS_START defs).
  */
 #define HWRESET         (0x80000000)
-#define SYSRQ           (0x40000000)
-#define OMIT_FRAME_PTR  (0x20000000)
-#define FRAMESIZE_DEBUG (0x10000000)
-#define MACHDEP_BT_TEXT  (0x8000000)
-#define DEVMEMRD         (0x4000000)
-#define INIT             (0x2000000)
-#define SYSRQ_TASK(X)   ((machdep->flags & SYSRQ) && is_task_active(X))
+#define OMIT_FRAME_PTR  (0x40000000)
+#define FRAMESIZE_DEBUG (0x20000000)
+#define MACHDEP_BT_TEXT (0x10000000)
+#define DEVMEMRD         (0x8000000)
+#define INIT             (0x4000000)
+#define VM_4_LEVEL       (0x2000000)
+#define MCA              (0x1000000)
+#define PAE               (0x800000)
 
 extern struct machdep_table *machdep;
 
+#ifndef HZ
+#define HZ sysconf(_SC_CLK_TCK)
+#endif
+
 #define IS_LAST_PGD_READ(pgd)     ((ulong)(pgd) == machdep->last_pgd_read)
 #define IS_LAST_PMD_READ(pmd)     ((ulong)(pmd) == machdep->last_pmd_read)
 #define IS_LAST_PTBL_READ(ptbl)   ((ulong)(ptbl) == machdep->last_ptbl_read)
+#define IS_LAST_PUD_READ(pud)     ((ulong)(pud) == machdep->last_pud_read)
 
 #define FILL_PGD(PGD, TYPE, SIZE) 					    \
     if (!IS_LAST_PGD_READ(PGD)) {                                           \
@@ -681,6 +811,13 @@
             machdep->last_pgd_read = (ulong)(PGD);                          \
     }								            
 
+#define FILL_PUD(PUD, TYPE, SIZE) 					    \
+    if (!IS_LAST_PUD_READ(PUD)) {                                           \
+            readmem((ulonglong)((ulong)(PUD)), TYPE, machdep->pud,          \
+                    SIZE, "pud page", FAULT_ON_ERROR);                      \
+            machdep->last_pud_read = (ulong)(PUD);                          \
+    }
+
 #define FILL_PMD(PMD, TYPE, SIZE)			                    \
     if (!IS_LAST_PMD_READ(PMD)) {                                           \
             readmem((ulonglong)(PMD), TYPE, machdep->pmd,                   \
@@ -695,10 +832,12 @@
             machdep->last_ptbl_read = (ulong)(PTBL); 	                    \
     }
 
+#define SETUP_ENV  (0)
 #define PRE_SYMTAB (1)
 #define PRE_GDB    (2)
 #define POST_GDB   (3)
 #define POST_INIT  (4)
+#define POST_VM    (5)
 
 #define FOREACH_BT     (1)
 #define FOREACH_VM     (2)
@@ -737,6 +876,7 @@
 #define FOREACH_c_FLAG   (0x40000)
 #define FOREACH_f_FLAG   (0x80000)
 #define FOREACH_o_FLAG  (0x100000)
+#define FOREACH_T_FLAG  (0x200000)
 
 struct foreach_data {
 	ulong flags;
@@ -810,10 +950,15 @@
 	long task_struct_last_run;
 	long task_struct_timestamp;
 	long task_struct_thread_info;
+	long task_struct_nsproxy;
+	long task_struct_rlim;
 	long thread_info_task;
 	long thread_info_cpu;
 	long thread_info_previous_esp;
 	long thread_info_flags;
+	long nsproxy_mnt_ns;
+	long mnt_namespace_root;
+	long mnt_namespace_list;
 	long pid_link_pid;
 	long pid_hash_chain;
 	long hlist_node_next;
@@ -830,6 +975,8 @@
 	long tms_tms_stime;
 	long signal_struct_count;
 	long signal_struct_action;
+	long signal_struct_shared_pending;
+	long signal_struct_rlim;
 	long k_sigaction_sa;
 	long sigaction_sa_handler;
 	long sigaction_sa_flags;
@@ -875,8 +1022,14 @@
 	long mm_struct_mmap;
 	long mm_struct_pgd;
 	long mm_struct_rss;
+	long mm_struct_anon_rss;
+	long mm_struct_file_rss;
 	long mm_struct_total_vm;
 	long mm_struct_start_code;
+	long mm_struct_arg_start;
+	long mm_struct_arg_end;
+	long mm_struct_env_start;
+	long mm_struct_env_end;
         long vm_area_struct_vm_mm;
         long vm_area_struct_vm_next;
         long vm_area_struct_vm_end;
@@ -948,6 +1101,7 @@
 	long block_device_bd_disk;
 	long irq_desc_t_status;
 	long irq_desc_t_handler;
+	long irq_desc_t_chip;
 	long irq_desc_t_action;
 	long irq_desc_t_depth;
 	long irqdesc_action;
@@ -968,8 +1122,28 @@
 	long hw_interrupt_type_ack;
 	long hw_interrupt_type_end;
 	long hw_interrupt_type_set_affinity;
+	long irq_chip_typename;
+	long irq_chip_startup;
+	long irq_chip_shutdown;
+	long irq_chip_enable;
+	long irq_chip_disable;
+	long irq_chip_ack;
+	long irq_chip_end;
+	long irq_chip_set_affinity;
+	long irq_chip_mask;
+	long irq_chip_mask_ack;
+	long irq_chip_unmask;
+	long irq_chip_eoi;
+	long irq_chip_retrigger;
+	long irq_chip_set_type;
+	long irq_chip_set_wake;
 	long irq_cpustat_t___softirq_active;
 	long irq_cpustat_t___softirq_mask;
+	long fdtable_max_fds;
+	long fdtable_max_fdset;
+	long fdtable_open_fds;
+	long fdtable_fd;
+	long files_struct_fdt;
         long files_struct_max_fds;
         long files_struct_max_fdset;
         long files_struct_open_fds;
@@ -978,6 +1152,9 @@
         long file_f_dentry;
         long file_f_vfsmnt;
         long file_f_count;
+	long file_f_path;
+	long path_mnt;
+	long path_dentry;
         long fs_struct_root;
         long fs_struct_pwd;
         long fs_struct_rootmnt;
@@ -1067,6 +1244,8 @@
 	long net_device_type;
 	long net_device_addr_len;
 	long net_device_ip_ptr;
+	long net_device_dev_list;
+	long net_dev_base_head;
 	long device_next;
 	long device_name;
 	long device_type;
@@ -1088,6 +1267,8 @@
 	long inet_opt_dport;
 	long inet_opt_sport;
 	long inet_opt_num;
+	long ipv6_pinfo_rcv_saddr;
+	long ipv6_pinfo_daddr;
 	long timer_list_list;
 	long timer_list_next;
 	long timer_list_entry;
@@ -1123,6 +1304,7 @@
         long zone_struct_name;
         long zone_struct_size;
 	long zone_struct_memsize;
+	long zone_struct_zone_start_pfn;
         long zone_struct_zone_start_paddr;
         long zone_struct_zone_start_mapnr;
         long zone_struct_zone_mem_map;
@@ -1143,6 +1325,7 @@
 	long zone_pages_min;
 	long zone_pages_low;
 	long zone_pages_high;
+	long zone_vm_stat;
         long neighbour_next;
         long neighbour_primary_key;
         long neighbour_ha;
@@ -1210,7 +1393,72 @@
 	long x8664_pda_irqstackptr;
 	long x8664_pda_level4_pgt;
 	long x8664_pda_cpunumber;
+	long x8664_pda_me;
 	long tss_struct_ist;
+	long mem_section_section_mem_map;
+	long vcpu_guest_context_user_regs;
+	long cpu_user_regs_eip;
+	long cpu_user_regs_esp;
+	long cpu_user_regs_rip;
+	long cpu_user_regs_rsp;
+        long unwind_table_core;
+        long unwind_table_init;
+        long unwind_table_address;
+        long unwind_table_size;
+        long unwind_table_link;
+        long unwind_table_name;
+	long rq_cfs;
+	long rq_rt;
+	long rq_nr_running;
+	long cfs_rq_rb_leftmost;
+	long cfs_rq_nr_running;
+	long cfs_rq_tasks_timeline;
+	long task_struct_se;
+	long sched_entity_run_node;
+	long rt_rq_active;
+	long kmem_cache_size;
+	long kmem_cache_objsize;
+	long kmem_cache_offset;
+	long kmem_cache_order;
+	long kmem_cache_local_node;
+	long kmem_cache_objects;
+	long kmem_cache_inuse;
+	long kmem_cache_align;
+	long kmem_cache_name;
+	long kmem_cache_list;
+	long kmem_cache_node;
+	long kmem_cache_cpu_slab;
+	long page_inuse;
+/*	long page_offset;  use "old" page->offset */
+	long page_slab;
+	long page_first_page;
+	long page_freelist;
+	long kmem_cache_node_nr_partial;
+	long kmem_cache_node_nr_slabs;
+	long kmem_cache_node_partial;
+	long kmem_cache_node_full;
+	long pid_numbers;
+	long upid_nr;
+	long upid_ns;
+	long upid_pid_chain;
+	long pid_tasks;
+        long kmem_cache_cpu_freelist;
+        long kmem_cache_cpu_page;
+        long kmem_cache_cpu_node;
+	long kmem_cache_flags;
+	long zone_nr_active;
+	long zone_nr_inactive;
+	long zone_all_unreclaimable;
+	long zone_present_pages;
+	long zone_flags;
+	long zone_pages_scanned;
+	long pcpu_info_vcpu;
+	long pcpu_info_idle;
+	long vcpu_struct_rq;
+	long task_struct_sched_info;
+	long sched_info_last_arrival;
+	long page_objects;
+	long kmem_cache_oo;
 };
 
 struct size_table {         /* stash of commonly-used sizes */
@@ -1239,6 +1487,7 @@
 	long umode_t;
 	long dentry;
 	long files_struct;
+	long fdtable;
 	long fs_struct;
 	long file;
 	long inode;
@@ -1264,6 +1513,7 @@
 	long net_device;
 	long sock;
 	long signal_struct;
+	long sigpending_signal;
 	long signal_queue;
 	long sighand_struct;
 	long sigqueue;
@@ -1292,15 +1542,28 @@
 	long address_space;
 	long char_device_struct;
 	long inet_sock;
+	long in6_addr;
 	long socket;
 	long spinlock_t;
 	long radix_tree_root;
 	long radix_tree_node;
 	long x8664_pda;
+	long ppc64_paca;
 	long gate_struct;
 	long tss_struct;
 	long task_struct_start_time;
 	long cputime_t;
+	long mem_section;
+	long pid_link;
+	long unwind_table;
+	long rlimit;
+	long kmem_cache;
+	long kmem_cache_node;
+	long upid;
+	long kmem_cache_cpu;
+	long cfs_rq;
+	long pcpu_info;
+	long vcpu_struct;
 };
 
 struct array_table {
@@ -1327,6 +1590,9 @@
 	int free_area_DIMENSION;
 	int prio_array_queue;
 	int height_to_maxindex;
+	int pid_hash;
+	int kmem_cache_node;
+	int kmem_cache_cpu_slab;
 };
 
 /*
@@ -1342,7 +1608,12 @@
 #define MEMBER_OFFSET(X,Y)  datatype_info((X), (Y), NULL)
 #define MEMBER_EXISTS(X,Y)  (datatype_info((X), (Y), NULL) >= 0)
 #define MEMBER_SIZE_REQUEST ((struct datatype_member *)(-1))
+#define MEMBER_TYPE_REQUEST ((struct datatype_member *)(-3))
 #define MEMBER_SIZE(X,Y)    datatype_info((X), (Y), MEMBER_SIZE_REQUEST)
+#define MEMBER_TYPE(X,Y)    datatype_info((X), (Y), MEMBER_TYPE_REQUEST)
+
+#define ANON_MEMBER_OFFSET_REQUEST ((struct datatype_member *)(-2))
+#define ANON_MEMBER_OFFSET(X,Y)    datatype_info((X), (Y), ANON_MEMBER_OFFSET_REQUEST)
 
 /*
  *  The following set of macros can only be used with pre-intialized fields
@@ -1365,7 +1636,9 @@
 #define MEMBER_OFFSET_INIT(X, Y, Z) (ASSIGN_OFFSET(X) = MEMBER_OFFSET(Y, Z))
 #define STRUCT_SIZE_INIT(X, Y) (ASSIGN_SIZE(X) = STRUCT_SIZE(Y))
 #define ARRAY_LENGTH_INIT(A, B, C, D, E) ((A) = get_array_length(C, D, E))
+#define ARRAY_LENGTH_INIT_ALT(A, B, C, D, E) ((A) = get_array_length_alt(B, C, D, E))
 #define MEMBER_SIZE_INIT(X, Y, Z) (ASSIGN_SIZE(X) = MEMBER_SIZE(Y, Z))
+#define ANON_MEMBER_OFFSET_INIT(X, Y, Z) (ASSIGN_OFFSET(X) = ANON_MEMBER_OFFSET(Y, Z))
 
 /*
  *  For use with non-debug kernels.
@@ -1389,6 +1662,7 @@
 #define ULONGLONG(ADDR) *((ulonglong *)((char *)(ADDR)))
 #define ULONG_PTR(ADDR) *((ulong **)((char *)(ADDR)))
 #define USHORT(ADDR)    *((ushort *)((char *)(ADDR)))
+#define SHORT(ADDR)     *((short *)((char *)(ADDR)))
 #define VOID_PTR(ADDR)  *((void **)((char *)(ADDR)))
 
 struct node_table {
@@ -1396,6 +1670,7 @@
 	ulong pgdat;
 	ulong mem_map;
 	ulong size;
+	ulong present;
 	ulonglong start_paddr;
 	ulong start_mapnr;
 };
@@ -1420,8 +1695,10 @@
 	ulong kmem_max_limit;
 	ulong kmem_max_cpus;
 	ulong kmem_cache_count;
+	ulong kmem_cache_len_nodes;
 	ulong PG_reserved;
 	ulong PG_slab;
+	ulong PG_head_tail_mask;
 	int kmem_cache_namelen;
 	ulong page_hash_table;
 	int page_hash_table_len;
@@ -1441,17 +1718,42 @@
         ulong cached_vma_hits[VMA_CACHE];
         int vma_cache_index;
         ulong vma_cache_fills;
-};
-
-#define NODES                (0x1)
-#define ZONES                (0x2)
-#define PERCPU_KMALLOC_V1    (0x4)
-#define COMMON_VADDR         (0x8)
-#define KMEM_CACHE_INIT     (0x10)
-#define V_MEM_MAP           (0x20)
-#define PERCPU_KMALLOC_V2   (0x40)
-#define KMEM_CACHE_UNAVAIL  (0x80)
-#define DISCONTIGMEM       (0x100)
+	void *mem_sec;
+	char *mem_section;
+	int ZONE_HIGHMEM;
+	ulong *node_online_map;
+	int node_online_map_len;
+	int nr_vm_stat_items;
+	char **vm_stat_items;
+	int cpu_slab_type;
+	int nr_vm_event_items;
+	char **vm_event_items;
+};
+
+#define NODES                       (0x1)
+#define ZONES                       (0x2)
+#define PERCPU_KMALLOC_V1           (0x4)
+#define COMMON_VADDR                (0x8)
+#define KMEM_CACHE_INIT            (0x10)
+#define V_MEM_MAP                  (0x20)
+#define PERCPU_KMALLOC_V2          (0x40)
+#define KMEM_CACHE_UNAVAIL         (0x80)
+#define FLATMEM			  (0x100)
+#define DISCONTIGMEM		  (0x200)
+#define SPARSEMEM		  (0x400)
+#define SPARSEMEM_EX		  (0x800)
+#define PERCPU_KMALLOC_V2_NODES  (0x1000)
+#define KMEM_CACHE_DELAY         (0x2000)
+#define NODES_ONLINE             (0x4000)
+#define VM_STAT                  (0x8000)
+#define KMALLOC_SLUB            (0x10000)
+#define CONFIG_NUMA             (0x20000)
+#define VM_EVENT                (0x40000)
+
+#define IS_FLATMEM()		(vt->flags & FLATMEM)
+#define IS_DISCONTIGMEM()	(vt->flags & DISCONTIGMEM)
+#define IS_SPARSEMEM()		(vt->flags & SPARSEMEM)
+#define IS_SPARSEMEM_EX()	(vt->flags & SPARSEMEM_EX)
 
 #define COMMON_VADDR_SPACE() (vt->flags & COMMON_VADDR)
 #define PADDR_PRLEN          (vt->paddr_prlen)
@@ -1478,7 +1780,8 @@
 	long list_head_offset;
         ulong end;
 	ulong searchfor;
-	char *structname;
+	char **structname;
+	int structname_args;
 	char *header;
 };
 #define LIST_OFFSET_ENTERED  (VERBOSE << 1)
@@ -1503,18 +1806,18 @@
 	char argbuf[1];
 };
 
-static inline void
-save_return_address(ulong *retaddr)
-{
-	retaddr[0] = (ulong) __builtin_return_address(0);
-#if defined(X86) || defined(PPC) || defined(X86_64) || defined(PPC64)
-	if (__builtin_frame_address(1))
-		retaddr[1] = (ulong) __builtin_return_address(1);
-	if (__builtin_frame_address(2))
-                retaddr[2] = (ulong) __builtin_return_address(2);
-	if (__builtin_frame_address(3))
-                retaddr[3] = (ulong) __builtin_return_address(3);
-#endif
+#define NUMBER_STACKFRAMES 4
+
+#define SAVE_RETURN_ADDRESS(retaddr) \
+{ 									\
+	int i; 								\
+	int saved_stacks; 						\
+									\
+	saved_stacks = backtrace((void **)retaddr, NUMBER_STACKFRAMES); \
+									\
+	/* explicitely zero out the invalid addresses */		\
+	for (i = saved_stacks; i < NUMBER_STACKFRAMES; i++)		\
+		retaddr[i] = 0;						\
 }
 
 #endif /* !GDB_COMMON */
@@ -1584,8 +1887,11 @@
 	int mods_installed;
 	struct load_module *current;
 	struct load_module *load_modules;
+	off_t dwarf_eh_frame_file_offset;
+	ulong dwarf_eh_frame_size;
 };
 
+/* flags for st */
 #define KERNEL_SYMS        (0x1)
 #define MODULE_SYMS        (0x2)
 #define LOAD_MODULE_SYMS   (0x4)
@@ -1596,6 +1902,8 @@
 #define NO_SEC_CONTENTS   (0x40)
 #define FORCE_DEBUGINFO   (0x80)
 #define CRC_MATCHES      (0x100)
+#define ADD_SYMBOL_FILE  (0x200)
+#define USE_OLD_ADD_SYM  (0x400)
 
 #endif /* !GDB_COMMON */
 
@@ -1611,6 +1919,8 @@
 #define MOD_KALLSYMS    (0x8)
 #define MOD_INITRD     (0x10)
 
+#define SEC_FOUND       (0x10000)
+
 struct mod_section_data {
 #if defined(GDB_6_1)
         struct bfd_section *section;
@@ -1659,6 +1969,8 @@
 #define KVADDR             (0x1)
 #define UVADDR             (0x2)
 #define PHYSADDR           (0x4)
+#define XENMACHADDR        (0x8)
+#define FILEADDR          (0x10)
 #define AMBIGUOUS          (~0)
 
 #define USE_USER_PGD       (UVADDR << 2)
@@ -1680,6 +1992,33 @@
 #define VIRTPAGEBASE(X)  (((ulong)(X)) & (ulong)machdep->pagemask)
 #define PHYSPAGEBASE(X)  (((physaddr_t)(X)) & (physaddr_t)machdep->pagemask)
 
+/* 
+ * Sparse memory stuff
+ *  These must follow the definitions in the kernel mmzone.h
+ */
+#define SECTION_SIZE_BITS()	(machdep->section_size_bits)
+#define MAX_PHYSMEM_BITS()	(machdep->max_physmem_bits)
+#define SECTIONS_SHIFT()	(MAX_PHYSMEM_BITS() - SECTION_SIZE_BITS())
+#define PA_SECTION_SHIFT()	(SECTION_SIZE_BITS())
+#define PFN_SECTION_SHIFT()	(SECTION_SIZE_BITS() - PAGESHIFT())
+#define NR_MEM_SECTIONS()	(1UL << SECTIONS_SHIFT())
+#define PAGES_PER_SECTION()	(1UL << PFN_SECTION_SHIFT())
+#define PAGE_SECTION_MASK()	(~(PAGES_PER_SECTION()-1))
+
+#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT())
+#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT())
+
+#define SECTIONS_PER_ROOT()	(machdep->sections_per_root)
+
+/* CONFIG_SPARSEMEM_EXTREME */
+#define _SECTIONS_PER_ROOT_EXTREME()	(PAGESIZE() / SIZE(mem_section))
+/* !CONFIG_SPARSEMEM_EXTREME */
+#define _SECTIONS_PER_ROOT()	(1)
+
+#define SECTION_NR_TO_ROOT(sec)	((sec) / SECTIONS_PER_ROOT())
+#define NR_SECTION_ROOTS()	(NR_MEM_SECTIONS() / SECTIONS_PER_ROOT())
+#define SECTION_ROOT_MASK()	(SECTIONS_PER_ROOT() - 1)
+
 /*
  *  Machine specific stuff
  */
@@ -1689,8 +2028,8 @@
 #define MACHINE_TYPE       "X86"
 #define PTOV(X)            ((unsigned long)(X)+(machdep->kvbase))
 #define VTOP(X)            ((unsigned long)(X)-(machdep->kvbase))
-#define IS_VMALLOC_ADDR(X) ((ulong)(X) >= vt->vmalloc_start)
-#define KVBASE_MASK        (0x1fffff)
+#define IS_VMALLOC_ADDR(X) (vt->vmalloc_start && (ulong)(X) >= vt->vmalloc_start)
+#define KVBASE_MASK        (0x1ffffff)
 
 #define PGDIR_SHIFT_2LEVEL   (22)
 #define PTRS_PER_PTE_2LEVEL  (1024)
@@ -1721,25 +2060,94 @@
 
 #define SWP_TYPE(entry) (((entry) >> 1) & 0x3f)
 #define SWP_OFFSET(entry) ((entry) >> 8)
+#define __swp_type_PAE(entry)      (((entry) >> 32) & 0x1f)
+#define __swp_type_nonPAE(entry)   (((entry) >> 1) & 0x1f)
+#define __swp_offset_PAE(entry)    (((entry) >> 32) >> 5)
+#define __swp_offset_nonPAE(entry) ((entry) >> 8)
+#define __swp_type(entry)          (machdep->flags & PAE ? \
+				    __swp_type_PAE(entry) : __swp_type_nonPAE(entry))
+#define __swp_offset(entry)        (machdep->flags & PAE ? \
+				    __swp_offset_PAE(entry) : __swp_offset_nonPAE(entry))
 
 #define TIF_SIGPENDING  (2)
 
+// CONFIG_X86_PAE 
+#define _SECTION_SIZE_BITS_PAE_ORIG	30
+#define _SECTION_SIZE_BITS_PAE_2_6_26	29
+#define _MAX_PHYSMEM_BITS_PAE	36
+
+// !CONFIG_X86_PAE   
+#define _SECTION_SIZE_BITS	26
+#define _MAX_PHYSMEM_BITS	32
+
+#define IS_LAST_PMD_READ_PAE(pmd)     ((ulong)(pmd) == machdep->machspec->last_pmd_read_PAE)
+#define IS_LAST_PTBL_READ_PAE(ptbl)   ((ulong)(ptbl) == machdep->machspec->last_ptbl_read_PAE)
+
+#define FILL_PMD_PAE(PMD, TYPE, SIZE)			                    \
+    if (!IS_LAST_PMD_READ_PAE(PMD)) {                                       \
+            readmem((ulonglong)(PMD), TYPE, machdep->pmd,                   \
+	            SIZE, "pmd page", FAULT_ON_ERROR);                      \
+            machdep->machspec->last_pmd_read_PAE = (ulonglong)(PMD);        \
+    }					                                    
+
+#define FILL_PTBL_PAE(PTBL, TYPE, SIZE)			           	    \
+    if (!IS_LAST_PTBL_READ_PAE(PTBL)) {                                     \
+    	    readmem((ulonglong)(PTBL), TYPE, machdep->ptbl,                 \
+	            SIZE, "page table", FAULT_ON_ERROR);                    \
+            machdep->machspec->last_ptbl_read_PAE = (ulonglong)(PTBL); 	    \
+    }
+
 #endif  /* X86 */
 
 #ifdef X86_64 
 #define _64BIT_
 #define MACHINE_TYPE       "X86_64"
 
-#define USERSPACE_TOP         0x0000008000000000
-#define __START_KERNEL_map    0xffffffff80000000
-#define PAGE_OFFSET           0x0000010000000000
-
-#define VMALLOC_START   0xffffff0000000000
-#define VMALLOC_END     0xffffff7fffffffff
-#define MODULES_VADDR   0xffffffffa0000000
-#define MODULES_END     0xffffffffafffffff
+#define USERSPACE_TOP   (machdep->machspec->userspace_top)
+#define PAGE_OFFSET     (machdep->machspec->page_offset)
+#define VMALLOC_START   (machdep->machspec->vmalloc_start_addr)
+#define VMALLOC_END     (machdep->machspec->vmalloc_end)
+#define VMEMMAP_VADDR   (machdep->machspec->vmemmap_vaddr)
+#define VMEMMAP_END     (machdep->machspec->vmemmap_end)
+#define MODULES_VADDR   (machdep->machspec->modules_vaddr)
+#define MODULES_END     (machdep->machspec->modules_end)
+
+#define __START_KERNEL_map    0xffffffff80000000UL
 #define MODULES_LEN     (MODULES_END - MODULES_VADDR)
 
+#define USERSPACE_TOP_ORIG         0x0000008000000000
+#define PAGE_OFFSET_ORIG           0x0000010000000000
+#define VMALLOC_START_ADDR_ORIG    0xffffff0000000000
+#define VMALLOC_END_ORIG           0xffffff7fffffffff
+#define MODULES_VADDR_ORIG         0xffffffffa0000000
+#define MODULES_END_ORIG           0xffffffffafffffff
+ 
+#define USERSPACE_TOP_2_6_11       0x0000800000000000
+#define PAGE_OFFSET_2_6_11         0xffff810000000000
+#define VMALLOC_START_ADDR_2_6_11  0xffffc20000000000
+#define VMALLOC_END_2_6_11         0xffffe1ffffffffff
+#define MODULES_VADDR_2_6_11       0xffffffff88000000
+#define MODULES_END_2_6_11         0xfffffffffff00000
+
+#define VMEMMAP_VADDR_2_6_24       0xffffe20000000000
+#define VMEMMAP_END_2_6_24         0xffffe2ffffffffff
+
+#define PAGE_OFFSET_2_6_27         0xffff880000000000
+
+#define USERSPACE_TOP_XEN          0x0000800000000000
+#define PAGE_OFFSET_XEN            0xffff880000000000
+#define VMALLOC_START_ADDR_XEN     0xffffc20000000000
+#define VMALLOC_END_XEN            0xffffe1ffffffffff
+#define MODULES_VADDR_XEN          0xffffffff88000000
+#define MODULES_END_XEN            0xfffffffffff00000
+
+#define USERSPACE_TOP_XEN_RHEL4       0x0000008000000000
+#define PAGE_OFFSET_XEN_RHEL4         0xffffff8000000000
+#define VMALLOC_START_ADDR_XEN_RHEL4  0xffffff0000000000
+#define VMALLOC_END_XEN_RHEL4         0xffffff7fffffffff
+#define MODULES_VADDR_XEN_RHEL4       0xffffffffa0000000
+#define MODULES_END_XEN_RHEL4         0xffffffffafffffff
+
 #define PTOV(X)               ((unsigned long)(X)+(machdep->kvbase))
 #define VTOP(X)               x86_64_VTOP((ulong)(X))
 #define IS_VMALLOC_ADDR(X)    x86_64_IS_VMALLOC_ADDR((ulong)(X))
@@ -1757,12 +2165,37 @@
 #define pmd_index(address)  (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
 #define pte_index(address)  (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 
+#define IS_LAST_PML4_READ(pml4) ((ulong)(pml4) == machdep->machspec->last_pml4_read)
+
 #define FILL_PML4() { \
 	if (!(pc->flags & RUNTIME) || ACTIVE()) \
-                readmem(vt->kernel_pgd[0], KVADDR, machdep->machspec->pml4, \
+		if (!IS_LAST_PML4_READ(vt->kernel_pgd[0])) \
+                    readmem(vt->kernel_pgd[0], KVADDR, machdep->machspec->pml4, \
                         PAGESIZE(), "init_level4_pgt", FAULT_ON_ERROR); \
+                machdep->machspec->last_pml4_read = (ulong)(vt->kernel_pgd[0]); \
 	}
 
+#define FILL_PML4_HYPER() { \
+	if (!machdep->machspec->last_pml4_read) { \
+		unsigned long idle_pg_table = \
+		    symbol_exists("idle_pg_table_4") ? symbol_value("idle_pg_table_4") : \
+			symbol_value("idle_pg_table"); \
+		readmem(idle_pg_table, KVADDR, \
+			machdep->machspec->pml4, PAGESIZE(), "idle_pg_table", \
+			FAULT_ON_ERROR); \
+		machdep->machspec->last_pml4_read = idle_pg_table; \
+	}\
+}
+
+#define IS_LAST_UPML_READ(pml) ((ulong)(pml) == machdep->machspec->last_upml_read)
+
+#define FILL_UPML(PML, TYPE, SIZE) 					      \
+    if (!IS_LAST_UPML_READ(PML)) {                                             \
+            readmem((ulonglong)((ulong)(PML)), TYPE, machdep->machspec->upml, \
+                    SIZE, "pml page", FAULT_ON_ERROR);                        \
+            machdep->machspec->last_upml_read = (ulong)(PML);                 \
+    }								            
+
 /* 
  *  PHYSICAL_PAGE_MASK changed (enlarged) between 2.4 and 2.6, so
  *  for safety, use the 2.6 values to generate it.
@@ -1791,11 +2224,33 @@
 
 #define SWP_TYPE(entry) (((entry) >> 1) & 0x3f)
 #define SWP_OFFSET(entry) ((entry) >> 8)
+#define __swp_type(entry)   SWP_TYPE(entry)
+#define __swp_offset(entry) SWP_OFFSET(entry)
 
 #define TIF_SIGPENDING  (2)
 
 #define PAGEBASE(X)           (((ulong)(X)) & (ulong)machdep->pagemask)
 
+#define _CPU_PDA_READ2(CPU, BUFFER) \
+ 	((readmem(symbol_value("_cpu_pda"),				\
+		 KVADDR, &cpu_pda_addr, sizeof(unsigned long),		\
+		 "_cpu_pda addr", FAULT_ON_ERROR)) &&			\
+ 	(readmem(cpu_pda_addr + ((CPU) * sizeof(void *)),		\
+		 KVADDR, &cpu_pda_addr, sizeof(unsigned long),		\
+		 "_cpu_pda addr", FAULT_ON_ERROR)) &&			\
+	(cpu_pda_addr) &&						\
+	(readmem(cpu_pda_addr, KVADDR, (BUFFER), SIZE(x8664_pda),	\
+		 "cpu_pda entry", FAULT_ON_ERROR)))
+
+#define _CPU_PDA_READ(CPU, BUFFER) \
+	((STRNEQ("_cpu_pda", closest_symbol((symbol_value("_cpu_pda") +	\
+	     ((CPU) * sizeof(unsigned long)))))) &&			\
+ 	(readmem(symbol_value("_cpu_pda") + ((CPU) * sizeof(void *)),   \
+		 KVADDR, &cpu_pda_addr, sizeof(unsigned long),          \
+		 "_cpu_pda addr", FAULT_ON_ERROR)) &&	   	        \
+	(readmem(cpu_pda_addr, KVADDR, (BUFFER), SIZE(x8664_pda),       \
+		 "cpu_pda entry", FAULT_ON_ERROR)))
+
 #define CPU_PDA_READ(CPU, BUFFER) \
 	(STRNEQ("cpu_pda", closest_symbol((symbol_value("cpu_pda") +	\
 	     ((CPU) * SIZE(x8664_pda))))) &&				\
@@ -1806,6 +2261,9 @@
 #define VALID_LEVEL4_PGT_ADDR(X) \
 	(((X) == VIRTPAGEBASE(X)) && IS_KVADDR(X) && !IS_VMALLOC_ADDR(X))
 
+#define _SECTION_SIZE_BITS	27
+#define _MAX_PHYSMEM_BITS	40
+
 #endif  /* X86_64 */
 
 #ifdef ALPHA
@@ -1816,7 +2274,7 @@
 
 #define PTOV(X)            ((unsigned long)(X)+(machdep->kvbase))
 #define VTOP(X)            ((unsigned long)(X)-(machdep->kvbase))
-#define IS_VMALLOC_ADDR(X) ((ulong)(X) >= vt->vmalloc_start)
+#define IS_VMALLOC_ADDR(X) (vt->vmalloc_start && (ulong)(X) >= vt->vmalloc_start)
 #define KSEG_BASE_48_BIT   (0xffff800000000000)
 #define KSEG_BASE          (0xfffffc0000000000)
 #define _PFN_MASK          (0xFFFFFFFF00000000)
@@ -1848,6 +2306,8 @@
 
 #define SWP_TYPE(entry) (((entry) >> 32) & 0xff)
 #define SWP_OFFSET(entry) ((entry) >> 40)
+#define __swp_type(entry)   SWP_TYPE(entry)
+#define __swp_offset(entry) SWP_OFFSET(entry)
 
 #define TIF_SIGPENDING (2)
 
@@ -1861,7 +2321,7 @@
 
 #define PTOV(X)            ((unsigned long)(X)+(machdep->kvbase))
 #define VTOP(X)            ((unsigned long)(X)-(machdep->kvbase))
-#define IS_VMALLOC_ADDR(X) ((ulong)(X) >= vt->vmalloc_start)
+#define IS_VMALLOC_ADDR(X) (vt->vmalloc_start && (ulong)(X) >= vt->vmalloc_start)
 
 #define PGDIR_SHIFT   (22)
 #define PTRS_PER_PTE  (1024)
@@ -1881,9 +2341,14 @@
 
 #define SWP_TYPE(entry) (((entry) >> 1) & 0x7f)
 #define SWP_OFFSET(entry) ((entry) >> 8)
+#define __swp_type(entry)   SWP_TYPE(entry)
+#define __swp_offset(entry) SWP_OFFSET(entry)
 
 #define TIF_SIGPENDING (2)
 
+#define _SECTION_SIZE_BITS	24
+#define _MAX_PHYSMEM_BITS	44
+
 #endif  /* PPC */
 
 #ifdef IA64
@@ -1908,6 +2373,9 @@
 #define KERNEL_UNCACHED_BASE  ((ulong)KERNEL_UNCACHED_REGION << REGION_SHIFT)
 #define KERNEL_CACHED_BASE    ((ulong)KERNEL_CACHED_REGION << REGION_SHIFT)
 
+#define _SECTION_SIZE_BITS    30
+#define _MAX_PHYSMEM_BITS     50
+
 /*
  *  As of 2.6, these are no longer straight forward.
  */
@@ -1917,16 +2385,57 @@
 
 #define SWITCH_STACK_ADDR(X)  (ia64_get_switch_stack((ulong)(X)))
 
-#define PGDIR_SHIFT     (PAGESHIFT() + 2*(PAGESHIFT()-3))
-#define PMD_SHIFT       (PAGESHIFT() + (PAGESHIFT()-3))
-#define PTRS_PER_PGD    (((ulong)(1)) << (PAGESHIFT()-3))
-#define PTRS_PER_PMD    (((ulong)(1)) << (PAGESHIFT()-3))
-#define PTRS_PER_PTE    (((ulong)(1)) << (PAGESHIFT()-3))
-#define PTRS_PER_PAGE   (((ulong)(1)) << (PAGESHIFT()-3))
 #define __IA64_UL(x)           ((unsigned long)(x))
 #define IA64_MAX_PHYS_BITS  (50)  /* max # of phys address bits (architected) */
 
 /*
+ * How many pointers will a page table level hold expressed in shift 
+ */
+#define PTRS_PER_PTD_SHIFT	(PAGESHIFT()-3)
+
+/*
+ * Definitions for fourth level:
+ */
+#define PTRS_PER_PTE	(__IA64_UL(1) << (PTRS_PER_PTD_SHIFT))
+
+/*
+ * Definitions for third level:
+ *
+ * PMD_SHIFT determines the size of the area a third-level page table
+ * can map.
+ */
+#define PMD_SHIFT	(PAGESHIFT() + (PTRS_PER_PTD_SHIFT))
+#define PMD_SIZE	(1UL << PMD_SHIFT)
+#define PMD_MASK	(~(PMD_SIZE-1))
+#define PTRS_PER_PMD	(1UL << (PTRS_PER_PTD_SHIFT))
+
+/*
+ * PUD_SHIFT determines the size of the area a second-level page table
+ * can map
+ */
+#define PUD_SHIFT	(PMD_SHIFT + (PTRS_PER_PTD_SHIFT))
+#define PUD_SIZE	(1UL << PUD_SHIFT)
+#define PUD_MASK	(~(PUD_SIZE-1))
+#define PTRS_PER_PUD	(1UL << (PTRS_PER_PTD_SHIFT))
+
+/*
+ * Definitions for first level:
+ *
+ * PGDIR_SHIFT determines what a first-level page table entry can map.
+ */
+
+#define PGDIR_SHIFT_4L		(PUD_SHIFT + (PTRS_PER_PTD_SHIFT))
+#define PGDIR_SHIFT_3L		(PMD_SHIFT + (PTRS_PER_PTD_SHIFT))
+/* Turns out 4L & 3L PGDIR_SHIFT are the same (for now) */
+#define PGDIR_SHIFT		PGDIR_SHIFT_4L
+#define PGDIR_SIZE		(__IA64_UL(1) << PGDIR_SHIFT)
+#define PGDIR_MASK		(~(PGDIR_SIZE-1))
+#define PTRS_PER_PGD_SHIFT	PTRS_PER_PTD_SHIFT
+#define PTRS_PER_PGD		(1UL << PTRS_PER_PGD_SHIFT)
+#define USER_PTRS_PER_PGD	(5*PTRS_PER_PGD/8)	/* regions 0-4 are user regions */
+#define FIRST_USER_ADDRESS	0
+
+/*
  * First, define the various bits in a PTE.  Note that the PTE format
  * matches the VHPT short format, the firt doubleword of the VHPD long
  * format, and the first doubleword of the TLB insertion format.
@@ -1978,6 +2487,7 @@
 #define __DIRTY_BITS		_PAGE_ED | __DIRTY_BITS_NO_ED
 
 #define EFI_PAGE_SHIFT  (12)
+
 /*
  * NOTE: #include'ing <asm/efi.h> creates too many compiler problems, so
  * this stuff is hardwired here; it's probably etched in stone somewhere.
@@ -2020,6 +2530,8 @@
 
 #define SWP_TYPE(entry)    (((entry) >> 1) & 0xff)
 #define SWP_OFFSET(entry)  ((entry) >> 9)
+#define __swp_type(entry)    ((entry >> 2) & 0x7f)
+#define __swp_offset(entry)  ((entry << 1) >> 10)
 
 #define TIF_SIGPENDING (1)
 
@@ -2038,11 +2550,14 @@
 #define _64BIT_
 #define MACHINE_TYPE       "PPC64"
 
+#define PPC64_64K_PAGE_SIZE  65536
+#define PPC64_STACK_SIZE     16384
+
 #define PAGEBASE(X)  (((ulong)(X)) & (ulong)machdep->pagemask)
 
 #define PTOV(X)            ((unsigned long)(X)+(machdep->kvbase))
 #define VTOP(X)            ((unsigned long)(X)-(machdep->kvbase))
-#define IS_VMALLOC_ADDR(X) ((ulong)(X) >= vt->vmalloc_start)
+#define IS_VMALLOC_ADDR(X) (vt->vmalloc_start && (ulong)(X) >= vt->vmalloc_start)
 #define KERNELBASE      machdep->pageoffset
 
 #define PGDIR_SHIFT     (machdep->pageshift + (machdep->pageshift -3) + (machdep->pageshift - 2))
@@ -2067,6 +2582,33 @@
 #define PGD_OFFSET(vaddr)       ((vaddr >> PGDIR_SHIFT) & 0x7ff)
 #define PMD_OFFSET(vaddr)       ((vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
 
+/* 4-level page table support */
+
+/* 4K pagesize */
+#define PTE_INDEX_SIZE_L4_4K  9
+#define PMD_INDEX_SIZE_L4_4K  7
+#define PUD_INDEX_SIZE_L4_4K  7
+#define PGD_INDEX_SIZE_L4_4K  9
+#define PTE_SHIFT_L4_4K  17
+#define PMD_MASKED_BITS_4K  0
+
+/* 64K pagesize */
+#define PTE_INDEX_SIZE_L4_64K  12
+#define PMD_INDEX_SIZE_L4_64K  12
+#define PUD_INDEX_SIZE_L4_64K  0
+#define PGD_INDEX_SIZE_L4_64K  4
+#define PTE_SHIFT_L4_64K_V1  32
+#define PTE_SHIFT_L4_64K_V2  30
+#define PMD_MASKED_BITS_64K  0x1ff
+
+#define L4_OFFSET(vaddr)  ((vaddr >> (machdep->machspec->l4_shift)) & 0x1ff)
+
+#define PGD_OFFSET_L4(vaddr)	\
+	((vaddr >> (machdep->machspec->l3_shift)) & (machdep->machspec->ptrs_per_l3 - 1))
+
+#define PMD_OFFSET_L4(vaddr)	\
+	((vaddr >> (machdep->machspec->l2_shift)) & (machdep->machspec->ptrs_per_l2 - 1))
+
 #define _PAGE_PRESENT   0x001UL /* software: pte contains a translation */
 #define _PAGE_USER      0x002UL /* matches one of the PP bits */
 #define _PAGE_RW        0x004UL /* software: user write access allowed */
@@ -2080,6 +2622,8 @@
 
 #define SWP_TYPE(entry) (((entry) >> 1) & 0x7f)
 #define SWP_OFFSET(entry) ((entry) >> 8)
+#define __swp_type(entry)   SWP_TYPE(entry)
+#define __swp_offset(entry) SWP_OFFSET(entry)
 
 #define MSR_PR_LG	14	/* Problem State / Privilege Level */
 				/* Used to find the user or kernel-mode frame*/
@@ -2087,6 +2631,9 @@
 #define STACK_FRAME_OVERHEAD            112
 #define EXCP_FRAME_MARKER               0x7265677368657265
 
+#define _SECTION_SIZE_BITS	24
+#define _MAX_PHYSMEM_BITS	44
+
 #endif /* PPC64 */
 
 #ifdef S390
@@ -2095,7 +2642,7 @@
 
 #define PTOV(X)            ((unsigned long)(X)+(machdep->kvbase))
 #define VTOP(X)            ((unsigned long)(X)-(machdep->kvbase))
-#define IS_VMALLOC_ADDR(X) s390_IS_VMALLOC_ADDR(X)
+#define IS_VMALLOC_ADDR(X) (vt->vmalloc_start && (ulong)(X) >= vt->vmalloc_start)
 
 #define PTRS_PER_PTE    1024
 #define PTRS_PER_PMD    1
@@ -2105,9 +2652,14 @@
 #define SWP_TYPE(entry) (((entry) >> 2) & 0x1f)
 #define SWP_OFFSET(entry) ((((entry) >> 11) & 0xfffffffe) | \
                            (((entry) >> 7) & 0x1))
+#define __swp_type(entry)   SWP_TYPE(entry)
+#define __swp_offset(entry) SWP_OFFSET(entry)
 
 #define TIF_SIGPENDING (2)
 
+#define _SECTION_SIZE_BITS	25
+#define _MAX_PHYSMEM_BITS	31
+
 #endif  /* S390 */
 
 #ifdef S390X
@@ -2116,7 +2668,7 @@
 
 #define PTOV(X)            ((unsigned long)(X)+(machdep->kvbase))
 #define VTOP(X)            ((unsigned long)(X)-(machdep->kvbase))
-#define IS_VMALLOC_ADDR(X) ((ulong)(X) >= vt->vmalloc_start)
+#define IS_VMALLOC_ADDR(X) (vt->vmalloc_start && (ulong)(X) >= vt->vmalloc_start)
 #define PTRS_PER_PTE    512
 #define PTRS_PER_PMD    1024
 #define PTRS_PER_PGD    2048
@@ -2125,15 +2677,22 @@
 #define SWP_TYPE(entry)   (((entry) >> 2) & 0x1f)
 #define SWP_OFFSET(entry) ((((entry) >> 11) & 0xfffffffffffffffe) | \
                            (((entry) >> 7) & 0x1)) 
+#define __swp_type(entry)  SWP_TYPE(entry)
+#define __swp_offset(entry) SWP_OFFSET(entry)
 
 #define TIF_SIGPENDING (2)
 
+#define _SECTION_SIZE_BITS	28
+#define _MAX_PHYSMEM_BITS	42
+
 #endif  /* S390X */
 
 #ifdef PLATFORM
 
 #define SWP_TYPE(entry)   (error("PLATFORM_SWP_TYPE: TBD\n"))
 #define SWP_OFFSET(entry) (error("PLATFORM_SWP_OFFSET: TBD\n"))
+#define __swp_type(entry)   SWP_TYPE(entry)
+#define __swp_offset(entry) SWP_OFFSET(entry)
 
 #endif /* PLATFORM */
 
@@ -2185,7 +2744,10 @@
 #define BADVAL   ((ulong)(-1))
 #define UNUSED   (-1)
 
+#define UNINITIALIZED (BADVAL)
+
 #define BITS_PER_BYTE (8)
+#define BITS_PER_LONG (BITS_PER_BYTE * sizeof(long))
 
 /*
  *  precision lengths for fprintf
@@ -2199,9 +2761,10 @@
 
 #define MINSPACE  (-100)
 
-#define SYNOPSIS      (0x1)
-#define COMPLETE_HELP (0x2)
-#define PIPE_TO_LESS  (0x4)
+#define SYNOPSIS       (0x1)
+#define COMPLETE_HELP  (0x2)
+#define PIPE_TO_SCROLL (0x4)
+#define MUST_HELP      (0x8)
 
 #define LEFT_JUSTIFY   (1)
 #define RIGHT_JUSTIFY  (2)
@@ -2214,6 +2777,7 @@
 #define INT_DEC      (0x20)
 #define INT_HEX      (0x40)
 #define LONGLONG_HEX (0x80)
+#define ZERO_FILL   (0x100)
 
 #define INIT_TIME (1)
 #define RUN_TIME  (2)
@@ -2419,17 +2983,22 @@
 /*
  *  ps command options.
  */
-#define PS_BY_PID      (0x1)
-#define PS_BY_TASK     (0x2)
-#define PS_BY_CMD      (0x4)
-#define PS_SHOW_ALL    (0x8)
-#define PS_PPID_LIST  (0x10)
-#define PS_CHILD_LIST (0x20)
-#define PS_KERNEL     (0x40)
-#define PS_USER       (0x80)
-#define PS_TIMES     (0x100)
-#define PS_KSTACKP   (0x200)
-#define PS_LAST_RUN  (0x400)
+#define PS_BY_PID       (0x1)
+#define PS_BY_TASK      (0x2)
+#define PS_BY_CMD       (0x4)
+#define PS_SHOW_ALL     (0x8)
+#define PS_PPID_LIST   (0x10)
+#define PS_CHILD_LIST  (0x20)
+#define PS_KERNEL      (0x40)
+#define PS_USER        (0x80)
+#define PS_TIMES      (0x100)
+#define PS_KSTACKP    (0x200)
+#define PS_LAST_RUN   (0x400)
+#define PS_ARGV_ENVP  (0x800)
+#define PS_TGID_LIST (0x1000)
+#define PS_RLIMIT    (0x2000)
+
+#define PS_EXCLUSIVE (PS_TGID_LIST|PS_ARGV_ENVP|PS_TIMES|PS_CHILD_LIST|PS_PPID_LIST|PS_LAST_RUN|PS_RLIMIT)
 
 #define MAX_PS_ARGS    (100)   /* maximum command-line specific requests */
 
@@ -2461,7 +3030,7 @@
 extern struct program_context program_context, *pc;
 extern struct task_table task_table, *tt;
 extern struct kernel_table kernel_table, *kt;
-extern struct command_table_entry base_command_table[];
+extern struct command_table_entry linux_command_table[];
 extern char *args[MAXARGS];      
 extern int argcnt;            
 extern int argerrs;
@@ -2534,6 +3103,9 @@
 void cmd_gdb(void);          /* gdb_interface.c */
 void cmd_net(void);          /* net.c */
 void cmd_extend(void);       /* extensions.c */
+#if defined(S390) || defined(S390X)
+void cmd_s390dbf(void);
+#endif
 
 /*
  *  main.c
@@ -2571,6 +3143,7 @@
 #define machdep_init(X) ppc64_init(X)
 #endif
 int clean_exit(int);
+int untrusted_file(FILE *, char *);
 
 /*
  *  cmdline.c
@@ -2591,6 +3164,9 @@
 int interruptible(void);
 int received_SIGINT(void);
 void debug_redirect(char *);
+int CRASHPAGER_valid(void);
+char *setup_scroll_command(void);
+int minimal_functions(char *);
 
 /*
  *  tools.c
@@ -2658,6 +3234,7 @@
 int hq_open(void);
 int hq_close(void);
 int hq_enter(ulong);
+int hq_entry_exists(ulong);
 long get_embedded(void);
 void dump_embedded(char *);
 char *ordinal(ulong, char *);
@@ -2683,9 +3260,16 @@
 int clean_arg(void);
 int empty_list(ulong);
 int machine_type(char *);
+int machine_type_mismatch(char *, char *, char *, ulong);
 void command_not_supported(void);
 void option_not_supported(int);
-
+void please_wait(char *);
+void please_wait_done(void);
+int pathcmp(char *, char *);
+int calculate(char *, ulong *, ulonglong *, ulong);
+int endian_mismatch(char *, char, ulong);
+uint16_t swap16(uint16_t, int);
+uint32_t swap32(uint32_t, int);
 
 /* 
  *  symbols.c 
@@ -2721,9 +3305,11 @@
 struct syment *next_symbol(char *, struct syment *);
 struct syment *prev_symbol(char *, struct syment *);
 void get_symbol_data(char *, long, void *);
+int try_get_symbol_data(char *, long, void *);
 char *value_to_symstr(ulong, char *, ulong);
 char *value_symbol(ulong);
 ulong symbol_value(char *);
+ulong symbol_value_module(char *, char *);
 int symbol_exists(char *s);
 int kernel_symbol_exists(char *s);
 int get_syment_array(char *, struct syment **, int);
@@ -2738,9 +3324,12 @@
 void dump_struct_table(ulong);
 void dump_offset_table(char *, ulong);
 int is_elf_file(char *);
+int is_kernel(char *);
+int file_elf_version(char *);
 int is_system_map(char *);
 int select_namelist(char *);
 int get_array_length(char *, int *, long);
+int get_array_length_alt(char *, char *, int *, long);
 int builtin_array_length(char *, int, int *);
 char *get_line_number(ulong, char *, int);
 char *get_build_directory(char *);
@@ -2768,6 +3357,7 @@
 long OFFSET_option(long, long, char *, char *, int, char *, char *);
 long SIZE_option(long, long, char *, char *, int, char *, char *);
 void dump_trace(ulong *);
+int enumerator_value(char *, long *);
 
 /*  
  *  memory.c 
@@ -2807,6 +3397,7 @@
 char *swap_location(ulonglong, char *); 
 void clear_swap_info_cache(void);
 uint memory_page_size(void);
+void force_page_size(char *);
 ulong first_vmalloc_address(void);
 int l1_cache_size(void);
 int dumpfile_memory(int);
@@ -2838,6 +3429,7 @@
 void open_files_dump(ulong, int, struct reference *);
 void get_pathname(ulong, char *, int, int, ulong);
 ulong file_to_dentry(ulong);
+ulong file_to_vfsmnt(ulong);
 void nlm_files_dump(void);
 int get_proc_version(void);
 int file_checksum(char *, long *);
@@ -2874,6 +3466,7 @@
 void help_init(void);
 void cmd_usage(char *, int);
 void display_version(void);
+void display_help_screen(char *);
 #ifdef X86
 #define dump_machdep_table(X) x86_dump_machdep_table(X)
 #endif
@@ -2945,6 +3538,9 @@
 extern char *help_waitq[];
 extern char *help_whatis[];
 extern char *help_wr[];
+#if defined(S390) || defined(S390X)
+extern char *help_s390dbf[];
+#endif
 
 /*
  *  task.c
@@ -2962,10 +3558,13 @@
 ulong task_flags(ulong);
 ulong task_state(ulong);
 ulong task_mm(ulong, int);
+ulong task_tgid(ulong);
 ulonglong task_last_run(ulong);
+ulong vaddr_in_task_struct(ulong);
 int comm_exists(char *);
 struct task_context *task_to_context(ulong);
 struct task_context *pid_to_context(ulong);
+struct task_context *tgid_to_context(ulong);
 ulong stkptr_to_task(ulong);
 ulong task_to_thread_info(ulong);
 ulong task_to_stackbase(ulong);
@@ -3005,11 +3604,17 @@
  */
 void register_extension(struct command_table_entry *);
 void dump_extension_table(int);
+void load_extension(char *);
+void unload_extension(char *);
+/* Hooks for sial */
+unsigned long get_curtask(void);
+char *crash_global_cmd(void);
+struct command_table_entry *crash_cmd_table(void);
 
 /*
  *  kernel.c 
  */ 
-void kernel_init(int);
+void kernel_init(void);
 void module_init(void);
 void verify_version(void);
 void verify_spinlock(void);
@@ -3019,14 +3624,21 @@
 int is_system_call(char *, ulong);
 void generic_dump_irq(int);
 int generic_dis_filter(ulong, char *);
+int kernel_BUG_encoding_bytes(void);
 void display_sys_stats(void);
-void dump_kernel_table(void);
-void dump_bt_info(struct bt_info *);
+char *get_uptime(char *, ulonglong *);
+void clone_bt_info(struct bt_info *, struct bt_info *, struct task_context *);
+void dump_kernel_table(int);
+void dump_bt_info(struct bt_info *, char *where);
 void dump_log(int);
 void set_cpu(int);
 void clear_machdep_cache(void);
 struct stack_hook *gather_text_list(struct bt_info *);
 int get_cpus_online(void);
+int get_cpus_present(void);
+int get_cpus_possible(void);
+int in_cpu_map(int, int);
+void paravirt_init(void);
 void print_stack_text_syms(struct bt_info *, ulong, ulong);
 void back_trace(struct bt_info *);
 #define BT_RAW                     (0x1ULL)
@@ -3039,11 +3651,13 @@
 #define BT_EXCEPTION_FRAME        (0x80ULL)
 #define BT_LINE_NUMBERS          (0x100ULL)
 #define BT_USER_EFRAME           (0x200ULL)
+#define BT_INCOMPLETE_USER_EFRAME  (BT_USER_EFRAME)
 #define BT_SAVE_LASTSP           (0x400ULL)
 #define BT_FROM_EXCEPTION        (0x800ULL)
 #define BT_FROM_CALLFRAME       (0x1000ULL)
 #define BT_EFRAME_SEARCH        (0x2000ULL)
 #define BT_SPECULATE            (0x4000ULL)
+#define BT_FRAMESIZE_DISABLE   (BT_SPECULATE)
 #define BT_RESCHEDULE           (0x8000ULL)
 #define BT_SCHEDULE      (BT_RESCHEDULE)
 #define BT_RET_FROM_SMP_FORK   (0x10000ULL)
@@ -3069,6 +3683,8 @@
 #define BT_DUMPFILE_SEARCH (0x800000000ULL)
 #define BT_EFRAME_SEARCH2 (0x1000000000ULL)
 #define BT_START          (0x2000000000ULL)
+#define BT_TEXT_SYMBOLS_ALL  (0x4000000000ULL)     
+#define BT_XEN_STOP_THIS_CPU (0x8000000000ULL)
 
 #define BT_REF_HEXVAL         (0x1)
 #define BT_REF_SYMBOL         (0x2)
@@ -3101,6 +3717,17 @@
 #define TYPE_S390D       (REMOTE_VERBOSE << 6)
 #define TYPE_NETDUMP     (REMOTE_VERBOSE << 7)
 
+ulonglong xen_m2p(ulonglong);
+
+void read_in_kernel_config(int);
+
+#define IKCFG_INIT   (0)
+#define IKCFG_READ   (1)
+
+#define MAGIC_START  "IKCFG_ST"
+#define MAGIC_END    "IKCFG_ED"
+#define MAGIC_SIZE   (sizeof(MAGIC_START) - 1)
+
 /*
  *  dev.c
  */
@@ -3129,7 +3756,6 @@
 void x86_display_idt_table(void);
 #define display_idt_table() x86_display_idt_table()
 #define KSYMS_START    (0x1)
-#define PAE            (0x2)
 void x86_dump_eframe_common(struct bt_info *bt, ulong *, int);
 char *x86_function_called_by(ulong);
 struct syment *x86_jmp_error_code(ulong);
@@ -3140,6 +3766,8 @@
 	ulong entry_tramp_start;
 	ulong entry_tramp_end;
 	physaddr_t entry_tramp_start_phys;
+	ulonglong last_pmd_read_PAE;
+	ulonglong last_ptbl_read_PAE;
 };
 
 struct syment *x86_is_entry_tramp_address(ulong, ulong *); 
@@ -3194,19 +3822,54 @@
 #define NMI_STACK 2    /* ebase[] offset to NMI exception stack */
 
 struct machine_specific {
+	ulong userspace_top;
+	ulong page_offset;
+	ulong vmalloc_start_addr;
+	ulong vmalloc_end;
+	ulong vmemmap_vaddr;
+	ulong vmemmap_end;
+	ulong modules_vaddr;
+	ulong modules_end;
+	ulong phys_base;
         char *pml4;
+	char *upml;
+	ulong last_upml_read;
+	ulong last_pml4_read;
 	char *irqstack;
+	ulong irq_eframe_link;
 	struct x86_64_pt_regs_offsets pto;
 	struct x86_64_stkinfo stkinfo;
 };
 
 #define KSYMS_START    (0x1)
 #define PT_REGS_INIT   (0x2)
+#define VM_ORIG        (0x4)
+#define VM_2_6_11      (0x8)
+#define VM_XEN        (0x10)
+#define NO_TSS        (0x20)
+#define SCHED_TEXT    (0x40)
+#define PHYS_BASE     (0x80)
+#define VM_XEN_RHEL4 (0x100)
+#define VMEMMAP      (0x200)
+
+#define VM_FLAGS (VM_ORIG|VM_2_6_11|VM_XEN|VM_XEN_RHEL4)
 
 #define _2MB_PAGE_MASK (~((MEGABYTES(2))-1))
+
+#endif
+
+#if defined(X86) || defined(X86_64)
+
+/*
+ *  unwind_x86_32_64.c
+ */
+void init_unwind_table(void);
+int dwarf_backtrace(struct bt_info *, int, ulong);
+void dwarf_debug(struct bt_info *);
+int dwarf_print_stack_entry(struct bt_info *, int);
+
 #endif
 
-void x86_64_backtrace_notice(ulong);
 
 /*
  * ppc64.c
@@ -3240,13 +3903,42 @@
         ulong hwintrstack[NR_CPUS];
         char *hwstackbuf;
         uint hwstacksize;
-};
+        char *level4;
+        ulong last_level4_read;
+
+	uint l4_index_size;
+	uint l3_index_size;
+	uint l2_index_size;
+	uint l1_index_size;
+
+	uint ptrs_per_l3;
+	uint ptrs_per_l2;
+	uint ptrs_per_l1;
+
+	uint l4_shift;
+	uint l3_shift;
+	uint l2_shift;
+	uint l1_shift;
+
+	uint pte_shift;
+	uint l2_masked_bits;
+};
+
+#define IS_LAST_L4_READ(l4)   ((ulong)(l4) == machdep->machspec->last_level4_read)
+
+#define FILL_L4(L4, TYPE, SIZE) 						\
+    if (!IS_LAST_L4_READ(L4)) {							\
+            readmem((ulonglong)((ulong)(L4)), TYPE, machdep->machspec->level4,	\
+                    SIZE, "level4 page", FAULT_ON_ERROR);			\
+            machdep->machspec->last_level4_read = (ulong)(L4);			\
+    }								            
 
 void ppc64_init(int);
 void ppc64_dump_machdep_table(ulong);
 #define display_idt_table() \
         error(FATAL, "-d option is not applicable to PowerPC architecture\n")
 #define KSYMS_START (0x1)
+#define VM_ORIG     (0x2)
 #endif
 
 /*
@@ -3258,15 +3950,27 @@
 #define display_idt_table() \
         error(FATAL, "-d option is not applicable to PowerPC architecture\n")
 #define KSYMS_START (0x1)
+/* This should match PPC_FEATURE_BOOKE from include/asm-powerpc/cputable.h */
+#define CPU_BOOKE (0x00008000)
 #endif
 
 /*
  *  lkcd_fix_mem.c
  */
 
+struct _dump_header_asm_s;
+struct _dump_header_s;
 ulong get_lkcd_switch_stack(ulong);
-int fix_addr_v8(int);
+int fix_addr_v8(struct _dump_header_asm_s *);
+int lkcd_dump_init_v8_arch(struct _dump_header_s *dh);
 int fix_addr_v7(int);
+int get_lkcd_regs_for_cpu_arch(int cpu, ulong *eip, ulong *esp);
+int lkcd_get_kernel_start_v8(ulong *addr);
+
+/*
+ * lkcd_v8.c
+ */
+int get_lkcd_regs_for_cpu_v8(struct bt_info *bt, ulong *eip, ulong *esp);
 
 /*
  *  ia64.c
@@ -3283,6 +3987,8 @@
 #define display_idt_table() \
 	error(FATAL, "-d option TBD on ia64 architecture\n");
 int ia64_in_init_stack(ulong addr);
+int ia64_in_mca_stack_hyper(ulong addr, struct bt_info *bt);
+physaddr_t ia64_xen_kdump_p2m(struct xen_kdump_data *xkd, physaddr_t pseudo);
 
 #define OLD_UNWIND       (0x1)   /* CONFIG_IA64_NEW_UNWIND not turned on */
 #define NEW_UNWIND       (0x2)   /* CONFIG_IA64_NEW_UNWIND turned on */
@@ -3396,10 +4102,30 @@
 int netdump_init(char *, FILE *);
 ulong get_netdump_panic_task(void);
 ulong get_netdump_switch_stack(ulong);
-int netdump_memory_dump(FILE *);
 FILE *set_netdump_fp(FILE *);
+int netdump_memory_dump(FILE *);
 void get_netdump_regs(struct bt_info *, ulong *, ulong *);
 int is_partial_netdump(void);
+void get_netdump_regs_x86(struct bt_info *, ulong *, ulong *);
+void get_netdump_regs_x86_64(struct bt_info *, ulong *, ulong *);
+struct vmcore_data;
+struct vmcore_data *get_kdump_vmcore_data(void);
+int read_kdump(int, void *, int, ulong, physaddr_t);
+int write_kdump(int, void *, int, ulong, physaddr_t);
+int is_kdump(char *, ulong);
+int kdump_init(char *, FILE *);
+ulong get_kdump_panic_task(void);
+uint kdump_page_size(void);
+int kdump_free_memory(void);
+int kdump_memory_used(void);
+int kdump_memory_dump(FILE *);
+void get_kdump_regs(struct bt_info *, ulong *, ulong *);
+void xen_kdump_p2m_mfn(char *);
+int is_sadump_xen(void);
+void set_xen_phys_start(char *);
+ulong xen_phys_start(void);
+int xen_major_version(void);
+int xen_minor_version(void);
 
 /*
  *  diskdump.c
@@ -3416,6 +4142,28 @@
 int diskdump_memory_dump(FILE *);
 FILE *set_diskdump_fp(FILE *);
 void get_diskdump_regs(struct bt_info *, ulong *, ulong *);
+int diskdump_phys_base(unsigned long *);
+ulong *diskdump_flags;
+int is_partial_diskdump(void);
+
+/*
+ * xendump.c
+ */
+int is_xendump(char *);
+int read_xendump(int, void *, int, ulong, physaddr_t);
+int write_xendump(int, void *, int, ulong, physaddr_t);
+uint xendump_page_size(void);
+int xendump_free_memory(void);
+int xendump_memory_used(void);
+int xendump_init(char *, FILE *);
+int xendump_memory_dump(FILE *);
+ulong get_xendump_panic_task(void);
+void get_xendump_regs(struct bt_info *, ulong *, ulong *);
+char *xc_core_mfn_to_page(ulong, char *);
+int xc_core_mfn_to_page_index(ulong);
+void xendump_panic_hook(char *);
+int read_xendump_hyper(int, void *, int, ulong, physaddr_t);
+struct xendump_data *get_xendump_data(void);
 
 /*
  *  net.c
@@ -3493,6 +4241,8 @@
 void lkcd_dumpfile_complaint(uint32_t, uint32_t, int);
 int set_mb_benchmark(ulong);
 ulonglong fix_lkcd_address(ulonglong);
+int lkcd_get_kernel_start(ulong *addr);
+int get_lkcd_regs_for_cpu(struct bt_info *bt, ulong *eip, ulong *esp);
 
 /*
  * lkcd_v1.c
@@ -3560,6 +4310,7 @@
 #define LKCD_DUMP_V7                  (0x7)  /* DUMP_VERSION_NUMBER */
 #define LKCD_DUMP_V8                  (0x8)  /* DUMP_VERSION_NUMBER */
 #define LKCD_DUMP_V9                  (0x9)  /* DUMP_VERSION_NUMBER */
+#define LKCD_DUMP_V10                 (0xa)  /* DUMP_VERSION_NUMBER */
 
 #define LKCD_DUMP_VERSION_NUMBER_MASK (0xf)
 #define LKCD_DUMP_RAW                 (0x1)   /* DUMP_[DH_]RAW */ 
@@ -3764,8 +4515,8 @@
 extern int prettyprint_structs;
 extern int prettyprint_arrays;
 extern int repeat_count_threshold;
-extern int repeat_count_threshold;
 extern unsigned int print_max;
+extern int stop_print_at_null;
 
 /*
  *  gdb/utils.c
@@ -3814,4 +4565,8 @@
 extern int have_partial_symbols(void); 
 extern int have_full_symbols(void);
 
+#if defined(X86) || defined(X86_64) || defined(IA64)
+#define XEN_HYPERVISOR_ARCH 
+#endif
+
 #endif /* !GDB_COMMON */
--- crash/netdump.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/netdump.c	2009-01-15 16:17:17.000000000 -0500
@@ -1,7 +1,7 @@
 /* netdump.c 
  *
- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson
- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 David Anderson
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 Red Hat, Inc. All rights reserved.
  *
  * This software may be freely redistributed under the terms of the
  * GNU General Public License.
@@ -13,38 +13,14 @@
  * Author: David Anderson
  */
 
+#define _LARGEFILE64_SOURCE 1  /* stat64() */
+
 #include "defs.h"
 #include "netdump.h"
 
-struct pt_load_segment {
-	off_t file_offset;
-	physaddr_t phys_start;
-	physaddr_t phys_end;
-};
-
-struct netdump_data {
-	ulong flags;
-	int ndfd;
-	FILE *ofp;
-	uint header_size;
-	char *netdump_header;
-	uint num_pt_load_segments;
-	struct pt_load_segment *pt_load_segments;
-        Elf32_Ehdr *elf32;
-        Elf32_Phdr *notes32;
-        Elf32_Phdr *load32;
-        Elf64_Ehdr *elf64;
-        Elf64_Phdr *notes64;
-        Elf64_Phdr *load64;
-        void *nt_prstatus;
-        void *nt_prpsinfo;
-        void *nt_taskstruct;
-	ulong task_struct;
-	ulong switch_stack;
-};
-
-static struct netdump_data netdump_data = { 0 };
-static struct netdump_data *nd = &netdump_data;
+static struct vmcore_data vmcore_data = { 0 };
+static struct vmcore_data *nd = &vmcore_data;
+static struct xen_kdump_data xen_kdump_data = { 0 };
 static void netdump_print(char *, ...);
 static void dump_Elf32_Ehdr(Elf32_Ehdr *);
 static void dump_Elf32_Phdr(Elf32_Phdr *, int);
@@ -52,31 +28,41 @@
 static void dump_Elf64_Ehdr(Elf64_Ehdr *);
 static void dump_Elf64_Phdr(Elf64_Phdr *, int);
 static size_t dump_Elf64_Nhdr(Elf64_Off offset, int);
-static void get_netdump_regs_x86(struct bt_info *, ulong *, ulong *);
-static void get_netdump_regs_x86_64(struct bt_info *, ulong *, ulong *);
 static void get_netdump_regs_ppc64(struct bt_info *, ulong *, ulong *);
+static physaddr_t xen_kdump_p2m(physaddr_t);
+static void check_dumpfile_size(char *);
 
 #define ELFSTORE 1
 #define ELFREAD  0
-	
+
+#define MIN_PAGE_SIZE (4096)
+
+/* 
+ * PPC64 and IA64 architectures have configurable page sizes,
+ * which can differ from the host machine's page size.
+ */
+#define READ_PAGESIZE_FROM_VMCOREINFO() \
+	(machine_type("IA64") || machine_type("PPC64"))
+
 /*
- *  Determine whether a file is a netdump creation, and if TRUE, 
- *  initialize the netdump_data structure.
+ *  Determine whether a file is a netdump/diskdump/kdump creation, 
+ *  and if TRUE, initialize the vmcore_data structure.
  */
 int 
-is_netdump(char *file, ulong source) 
+is_netdump(char *file, ulong source_query) 
 {
-        int i;
-	int fd;
+        int i, fd, swap;
 	Elf32_Ehdr *elf32;
 	Elf32_Phdr *load32;
 	Elf64_Ehdr *elf64;
 	Elf64_Phdr *load64;
-	char header[MIN_NETDUMP_ELF_HEADER_SIZE];
+	char eheader[MIN_NETDUMP_ELF_HEADER_SIZE];
 	char buf[BUFSIZE];
 	size_t size, len, tot;
         Elf32_Off offset32;
         Elf64_Off offset64;
+	ulong tmp_flags;
+	char *tmp_elf_header;
 
 	if ((fd = open(file, O_RDWR)) < 0) {
         	if ((fd = open(file, O_RDONLY)) < 0) {
@@ -87,7 +73,7 @@
 	}
 
 	size = MIN_NETDUMP_ELF_HEADER_SIZE;
-        if (read(fd, header, size) != size) {
+        if (read(fd, eheader, size) != size) {
                 sprintf(buf, "%s: read", file);
                 perror(buf);
 		goto bailout;
@@ -99,89 +85,163 @@
                 goto bailout;
 	}
 
-	elf32 = (Elf32_Ehdr *)&header[0];
-	elf64 = (Elf64_Ehdr *)&header[0];
+	tmp_flags = 0;
+	elf32 = (Elf32_Ehdr *)&eheader[0];
+	elf64 = (Elf64_Ehdr *)&eheader[0];
 
   	/* 
-	 *  Verify the ELF header 
+	 *  Verify the ELF header, and determine the dumpfile format.
+	 * 
+	 *  For now, kdump vmcores differ from netdump/diskdump like so:
+	 *
+ 	 *   1. The first kdump PT_LOAD segment is packed just after
+	 *      the ELF header, whereas netdump/diskdump page-align 
+	 *      the first PT_LOAD segment.
+	 *   2. Each kdump PT_LOAD segment has a p_align field of zero,
+	 *      whereas netdump/diskdump have their p_align fields set
+	 *      to the system page-size. 
+	 *
+	 *  If either kdump difference is seen, presume kdump -- this
+	 *  is obviously subject to change.
 	 */
-        if (STRNEQ(elf32->e_ident, ELFMAG) && 
-	    (elf32->e_ident[EI_CLASS] == ELFCLASS32) &&
-  	    (elf32->e_ident[EI_DATA] == ELFDATA2LSB) &&
-    	    (elf32->e_ident[EI_VERSION] == EV_CURRENT) &&
-	    (elf32->e_type == ET_CORE) &&
-	    (elf32->e_version == EV_CURRENT) &&
-	    (elf32->e_phnum >= 2)) {
-		switch (elf32->e_machine)
+
+	if (!STRNEQ(eheader, ELFMAG) || eheader[EI_VERSION] != EV_CURRENT)
+		goto bailout;
+
+	swap = (((eheader[EI_DATA] == ELFDATA2LSB) && 
+	     (__BYTE_ORDER == __BIG_ENDIAN)) ||
+	    ((eheader[EI_DATA] == ELFDATA2MSB) && 
+	     (__BYTE_ORDER == __LITTLE_ENDIAN)));
+
+        if ((elf32->e_ident[EI_CLASS] == ELFCLASS32) &&
+	    (swap16(elf32->e_type, swap) == ET_CORE) &&
+	    (swap32(elf32->e_version, swap) == EV_CURRENT) &&
+	    (swap16(elf32->e_phnum, swap) >= 2)) {
+		switch (swap16(elf32->e_machine, swap))
 		{
 		case EM_386:
-			if (machine_type("X86"))
-				break;
+			if (machine_type_mismatch(file, "X86", NULL, 
+			    source_query))
+				goto bailout;
+			break;
+
 		default:
-                	goto bailout;
+			if (machine_type_mismatch(file, "(unknown)", NULL,
+			    source_query))
+				goto bailout;
 		}
-                nd->flags |= NETDUMP_ELF32;
+
+		if (endian_mismatch(file, elf32->e_ident[EI_DATA], 
+		    source_query))
+			goto bailout;
+
                 load32 = (Elf32_Phdr *)
-                        &header[sizeof(Elf32_Ehdr)+sizeof(Elf32_Phdr)];
+                        &eheader[sizeof(Elf32_Ehdr)+sizeof(Elf32_Phdr)];
                 size = (size_t)load32->p_offset;
-	} else if (STRNEQ(elf64->e_ident, ELFMAG) &&
-	    (elf64->e_ident[EI_CLASS] == ELFCLASS64) &&
-	    (elf64->e_ident[EI_VERSION] == EV_CURRENT) &&
-	    (elf64->e_type == ET_CORE) &&
-	    (elf64->e_version == EV_CURRENT) &&
-	    (elf64->e_phnum >= 2)) { 
-		switch (elf64->e_machine)
+
+		if ((load32->p_offset & (MIN_PAGE_SIZE-1)) &&
+		    (load32->p_align == 0))
+                	tmp_flags |= KDUMP_ELF32;
+		else
+                	tmp_flags |= NETDUMP_ELF32;
+	} else if ((elf64->e_ident[EI_CLASS] == ELFCLASS64) &&
+	    (swap16(elf64->e_type, swap) == ET_CORE) &&
+	    (swap32(elf64->e_version, swap) == EV_CURRENT) &&
+	    (swap16(elf64->e_phnum, swap) >= 2)) { 
+		switch (swap16(elf64->e_machine, swap))
 		{
 		case EM_IA_64:
-			if ((elf64->e_ident[EI_DATA] == ELFDATA2LSB) &&
-				machine_type("IA64"))
-				break;
-			else
+			if (machine_type_mismatch(file, "IA64", NULL, 
+			    source_query))
 				goto bailout;
+			break;
 
 		case EM_PPC64:
-			if ((elf64->e_ident[EI_DATA] == ELFDATA2MSB) &&
-				machine_type("PPC64"))
-				break;
-			else
+			if (machine_type_mismatch(file, "PPC64", NULL, 
+			    source_query))
 				goto bailout;
+			break;
 
 		case EM_X86_64:
-			if ((elf64->e_ident[EI_DATA] == ELFDATA2LSB) &&
-				machine_type("X86_64"))
-				break;
-			else
+			if (machine_type_mismatch(file, "X86_64", NULL,
+			    source_query))
 				goto bailout;
+			break;
+
+		case EM_386:
+			if (machine_type_mismatch(file, "X86", NULL,
+			    source_query))
+				goto bailout;
+			break;
 
 		default:
-			goto bailout;
+			if (machine_type_mismatch(file, "(unknown)", NULL,
+			    source_query))
+				goto bailout;
 		}
-                nd->flags |= NETDUMP_ELF64;
+
+		if (endian_mismatch(file, elf64->e_ident[EI_DATA], 
+		    source_query))
+			goto bailout;
+
                 load64 = (Elf64_Phdr *)
-                        &header[sizeof(Elf64_Ehdr)+sizeof(Elf64_Phdr)];
+                        &eheader[sizeof(Elf64_Ehdr)+sizeof(Elf64_Phdr)];
                 size = (size_t)load64->p_offset;
-	} else
+		if ((load64->p_offset & (MIN_PAGE_SIZE-1)) &&
+		    (load64->p_align == 0))
+                	tmp_flags |= KDUMP_ELF64;
+		else
+                	tmp_flags |= NETDUMP_ELF64;
+	} else {
+		if (CRASHDEBUG(2))
+			error(INFO, "%s: not a %s ELF dumpfile\n",
+				file, source_query == NETDUMP_LOCAL ?
+				"netdump" : "kdump");
+			
+			
 		goto bailout;
+	}
+
+	switch (DUMPFILE_FORMAT(tmp_flags))
+	{
+	case NETDUMP_ELF32:
+	case NETDUMP_ELF64:
+		if (source_query & (NETDUMP_LOCAL|NETDUMP_REMOTE))
+			break;
+		else
+			goto bailout;
+
+	case KDUMP_ELF32:
+	case KDUMP_ELF64:
+		if (source_query & KDUMP_LOCAL)
+			break;
+		else
+			goto bailout;
+	}
 
-	if ((nd->netdump_header = (char *)malloc(size)) == NULL) {
-		fprintf(stderr, "cannot malloc netdump header buffer\n");
+	if ((tmp_elf_header = (char *)malloc(size)) == NULL) {
+		fprintf(stderr, "cannot malloc ELF header buffer\n");
 		clean_exit(1);
 	}
 
-        if (read(fd, nd->netdump_header, size) != size) {
+        if (read(fd, tmp_elf_header, size) != size) {
                 sprintf(buf, "%s: read", file);
                 perror(buf);
+		free(tmp_elf_header);
                 goto bailout;
         }
 
 	nd->ndfd = fd;
-	nd->flags |= source;
+	nd->elf_header = tmp_elf_header;
+	nd->flags = tmp_flags;
+	nd->flags |= source_query;
 
-	switch (nd->flags & (NETDUMP_ELF32|NETDUMP_ELF64))
+	switch (DUMPFILE_FORMAT(nd->flags))
 	{
 	case NETDUMP_ELF32:
+	case KDUMP_ELF32:
 		nd->header_size = load32->p_offset;
-        	nd->elf32 = (Elf32_Ehdr *)&nd->netdump_header[0];
+        	nd->elf32 = (Elf32_Ehdr *)&nd->elf_header[0];
 		nd->num_pt_load_segments = nd->elf32->e_phnum - 1;
 		if ((nd->pt_load_segments = (struct pt_load_segment *)
 		    malloc(sizeof(struct pt_load_segment) *
@@ -190,9 +250,11 @@
 			clean_exit(1);
 		}
         	nd->notes32 = (Elf32_Phdr *)
-		    &nd->netdump_header[sizeof(Elf32_Ehdr)];
+		    &nd->elf_header[sizeof(Elf32_Ehdr)];
         	nd->load32 = (Elf32_Phdr *)
-		    &nd->netdump_header[sizeof(Elf32_Ehdr)+sizeof(Elf32_Phdr)];
+		    &nd->elf_header[sizeof(Elf32_Ehdr)+sizeof(Elf32_Phdr)];
+		if (DUMPFILE_FORMAT(nd->flags) == NETDUMP_ELF32)
+			nd->page_size = (uint)nd->load32->p_align;
                 dump_Elf32_Ehdr(nd->elf32);
                 dump_Elf32_Phdr(nd->notes32, ELFREAD);
 		for (i = 0; i < nd->num_pt_load_segments; i++) 
@@ -205,8 +267,9 @@
 		break;
 
 	case NETDUMP_ELF64:
+	case KDUMP_ELF64:
                 nd->header_size = load64->p_offset;
-                nd->elf64 = (Elf64_Ehdr *)&nd->netdump_header[0];
+                nd->elf64 = (Elf64_Ehdr *)&nd->elf_header[0];
 		nd->num_pt_load_segments = nd->elf64->e_phnum - 1;
                 if ((nd->pt_load_segments = (struct pt_load_segment *)
                     malloc(sizeof(struct pt_load_segment) *
@@ -215,9 +278,11 @@
                         clean_exit(1);
                 }
                 nd->notes64 = (Elf64_Phdr *)
-                    &nd->netdump_header[sizeof(Elf64_Ehdr)];
+                    &nd->elf_header[sizeof(Elf64_Ehdr)];
                 nd->load64 = (Elf64_Phdr *)
-                    &nd->netdump_header[sizeof(Elf64_Ehdr)+sizeof(Elf64_Phdr)];
+                    &nd->elf_header[sizeof(Elf64_Ehdr)+sizeof(Elf64_Phdr)];
+		if (DUMPFILE_FORMAT(nd->flags) == NETDUMP_ELF64)
+			nd->page_size = (uint)nd->load64->p_align;
                 dump_Elf64_Ehdr(nd->elf64);
                 dump_Elf64_Phdr(nd->notes64, ELFREAD);
 		for (i = 0; i < nd->num_pt_load_segments; i++)
@@ -230,6 +295,9 @@
 		break;
 	}
 
+	if (CRASHDEBUG(1))
+		netdump_memory_dump(fp);
+
 	return nd->header_size;
 
 bailout:
@@ -238,15 +306,97 @@
 }
 
 /*
+ *  Return the e_version number of an ELF file
+ *  (or -1 if its not readable ELF file)
+ */
+int
+file_elf_version(char *file)
+{
+	int fd, size;
+	Elf32_Ehdr *elf32;
+	Elf64_Ehdr *elf64;
+	char header[MIN_NETDUMP_ELF_HEADER_SIZE];
+	char buf[BUFSIZE];
+
+	if ((fd = open(file, O_RDONLY)) < 0) {
+		sprintf(buf, "%s: open", file);
+		perror(buf);
+		return -1;
+	}
+
+	size = MIN_NETDUMP_ELF_HEADER_SIZE;
+        if (read(fd, header, size) != size) {
+                sprintf(buf, "%s: read", file);
+                perror(buf);
+		close(fd);
+		return -1;
+	}
+	close(fd);
+
+	elf32 = (Elf32_Ehdr *)&header[0];
+	elf64 = (Elf64_Ehdr *)&header[0];
+
+        if (STRNEQ(elf32->e_ident, ELFMAG) &&
+	    (elf32->e_ident[EI_CLASS] == ELFCLASS32) &&
+  	    (elf32->e_ident[EI_DATA] == ELFDATA2LSB) &&
+    	    (elf32->e_ident[EI_VERSION] == EV_CURRENT)) {
+		return (elf32->e_version);
+	} else if (STRNEQ(elf64->e_ident, ELFMAG) &&
+	    (elf64->e_ident[EI_CLASS] == ELFCLASS64) &&
+	    (elf64->e_ident[EI_VERSION] == EV_CURRENT)) {
+		return (elf64->e_version);
+	} 
+	
+	return -1;
+}
+
+/* 
+ *  Check whether any PT_LOAD segment goes beyond the file size.
+ */
+static void
+check_dumpfile_size(char *file)
+{
+	int i;
+	struct stat64 stat;
+	struct pt_load_segment *pls;
+	uint64_t segment_end;
+
+	if (stat64(file, &stat) < 0)
+		return;
+
+	for (i = 0; i < nd->num_pt_load_segments; i++) {
+		pls = &nd->pt_load_segments[i];
+
+		segment_end = pls->file_offset + 
+			(pls->phys_end - pls->phys_start);
+
+		if (segment_end > stat.st_size) {
+			error(WARNING, "%s: may be truncated or incomplete\n"
+				"         PT_LOAD p_offset: %lld\n"
+				"                 p_filesz: %lld\n"
+				"           bytes required: %lld\n"
+				"            dumpfile size: %lld\n\n",
+				file, pls->file_offset, 
+				pls->phys_end - pls->phys_start,  
+				segment_end, stat.st_size);
+			return;
+		}
+	}
+}
+
+/*
  *  Perform any post-dumpfile determination stuff here.
  */
 int
 netdump_init(char *unused, FILE *fptr)
 {
-	if (!NETDUMP_VALID())
+	if (!VMCORE_VALID())
 		return FALSE;
 
 	nd->ofp = fptr;
+
+	check_dumpfile_size(pc->dumpfile);
+
         return TRUE;
 }
 
@@ -263,19 +413,19 @@
 	/*
 	 *  The Elf32_Phdr has 32-bit fields for p_paddr, p_filesz and
 	 *  p_memsz, so for now, multiple PT_LOAD segment support is
-	 *  restricted to 64-bit machines.  Until a "standard" becomes 
-	 *  available in the future that deals with physical memory 
-	 *  segments that start at greater then 4GB, or memory segments
-	 *  sizes that are greater than 4GB (kexec?), then this feature
-	 *  is restricted to 64-bit machines.
+	 *  restricted to 64-bit machines for netdump/diskdump vmcores.
+	 *  However, kexec/kdump has introduced the optional use of a
+         *  64-bit ELF header for 32-bit processors.
 	 */ 
-        switch (nd->flags & (NETDUMP_ELF32|NETDUMP_ELF64))
+        switch (DUMPFILE_FORMAT(nd->flags))
 	{
 	case NETDUMP_ELF32:
 		offset = (off_t)paddr + (off_t)nd->header_size;
 		break;
 
 	case NETDUMP_ELF64:
+	case KDUMP_ELF32:
+	case KDUMP_ELF64:
 		if (nd->num_pt_load_segments == 1) {
 			offset = (off_t)paddr + (off_t)nd->header_size;
 			break;
@@ -289,6 +439,11 @@
 					pls->file_offset;
 				break;
 			}
+			if (pls->zero_fill && (paddr >= pls->phys_end) &&
+			    (paddr < pls->zero_fill)) {
+				memset(bufptr, 0, cnt);
+                		return cnt;
+			}
 		}
 	
 		if (!offset) 
@@ -302,24 +457,57 @@
 
         if (read(nd->ndfd, bufptr, cnt) != cnt)
                 return READ_ERROR;
+
         return cnt;
 }
 
 /*
- *  Write to a netdump-created dumpfile.
+ *  Write to a netdump-created dumpfile.  Note that cmd_wr() does not
+ *  allow writes to dumpfiles, so you can't get here from there.
+ *  But, if it would ever be helpful, here it is...
  */
 int
 write_netdump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr)
 {
 	off_t offset;
+	struct pt_load_segment *pls;
+	int i;
 
-        offset = (off_t)paddr + (off_t)nd->header_size;
+        switch (DUMPFILE_FORMAT(nd->flags))
+	{
+	case NETDUMP_ELF32:
+		offset = (off_t)paddr + (off_t)nd->header_size;
+		break;
+
+	case NETDUMP_ELF64:
+	case KDUMP_ELF32:
+	case KDUMP_ELF64:
+		if (nd->num_pt_load_segments == 1) {
+			offset = (off_t)paddr + (off_t)nd->header_size;
+			break;
+		}
+
+		for (i = offset = 0; i < nd->num_pt_load_segments; i++) {
+			pls = &nd->pt_load_segments[i];
+			if ((paddr >= pls->phys_start) &&
+			    (paddr < pls->phys_end)) {
+				offset = (off_t)(paddr - pls->phys_start) +
+					pls->file_offset;
+				break;
+			}
+		}
+	
+		if (!offset) 
+	                return READ_ERROR;
+		
+		break;
+	}	
 
-        if (lseek(nd->ndfd, offset, SEEK_SET) != offset)
+        if (lseek(nd->ndfd, offset, SEEK_SET) == -1)
                 return SEEK_ERROR;
 
         if (write(nd->ndfd, bufptr, cnt) != cnt)
-                return WRITE_ERROR;
+                return READ_ERROR;
 
         return cnt;
 }
@@ -330,7 +518,7 @@
 FILE *
 set_netdump_fp(FILE *fp)
 {
-	if (!NETDUMP_VALID())
+	if (!VMCORE_VALID())
 		return NULL;
 
 	nd->ofp = fp;
@@ -346,7 +534,7 @@
         char buf[BUFSIZE];
         va_list ap;
 
-        if (!fmt || !strlen(fmt) || !NETDUMP_VALID())
+        if (!fmt || !strlen(fmt) || !VMCORE_VALID())
                 return;
 
         va_start(ap, fmt);
@@ -362,33 +550,21 @@
 uint 
 netdump_page_size(void)
 {
-	uint pagesz;
-
-	if (!NETDUMP_VALID())
+	if (!VMCORE_VALID())
 		return 0;
 
-	switch (nd->flags & (NETDUMP_ELF32|NETDUMP_ELF64))
-	{
-	case NETDUMP_ELF32:
-		pagesz = (uint)nd->load32->p_align;
-		break;
-	case NETDUMP_ELF64:
-		pagesz = (uint)nd->load64->p_align;
-		break;
-	}
-
-	return pagesz;
+	return nd->page_size;
 }
 
 int 
 netdump_free_memory(void)
 {
-	return (NETDUMP_VALID() ? 0 : 0);
+	return (VMCORE_VALID() ? 0 : 0);
 }
 
 int netdump_memory_used(void)
 {
-	return (NETDUMP_VALID() ? 0 : 0);
+	return (VMCORE_VALID() ? 0 : 0);
 }
 
 /*
@@ -414,21 +590,57 @@
 #ifdef DAEMON
 	return nd->task_struct;
 #else
-	int i;
+	int i, crashing_cpu;
         size_t len;
 	char *user_regs;
 	ulong ebp, esp, task;
 
-	if (!NETDUMP_VALID() || !get_active_set())
-		return NO_TASK;
+	if (!VMCORE_VALID() || !get_active_set())
+		goto panic_task_undetermined;
 
-	if (nd->task_struct)
+	if (nd->task_struct) {
+		if (CRASHDEBUG(1))
+			error(INFO, 
+			    "get_netdump_panic_task: NT_TASKSTRUCT: %lx\n", 
+				nd->task_struct);
 		return nd->task_struct;
+	}
+
+        switch (DUMPFILE_FORMAT(nd->flags))
+        {
+        case NETDUMP_ELF32:
+        case NETDUMP_ELF64:
+		crashing_cpu = -1;
+		break;
+
+        case KDUMP_ELF32:
+        case KDUMP_ELF64:
+		crashing_cpu = -1;
+		if (symbol_exists("crashing_cpu")) {
+			get_symbol_data("crashing_cpu", sizeof(int), &i);
+			if ((i >= 0) && (i < nd->num_prstatus_notes)) {
+				crashing_cpu = i;
+				if (CRASHDEBUG(1))
+					error(INFO, 
+				   "get_netdump_panic_task: crashing_cpu: %d\n",
+						crashing_cpu);
+			}
+		}
+
+		if ((nd->num_prstatus_notes > 1) && (crashing_cpu == -1))
+			goto panic_task_undetermined;
+		break;
+	}
+
+        if (nd->elf32 && (nd->elf32->e_machine == EM_386)) {
+		Elf32_Nhdr *note32;
+
+                if ((nd->num_prstatus_notes > 1) && (crashing_cpu != -1))
+                        note32 = (Elf32_Nhdr *)
+                                nd->nt_prstatus_percpu[crashing_cpu];
+                else
+                        note32 = (Elf32_Nhdr *)nd->nt_prstatus;
 
-        if (nd->elf32 && nd->elf32->e_machine == EM_386) {
-	        Elf32_Nhdr *note32 = (Elf32_Nhdr *)
-			((char *)nd->elf32 + nd->notes32->p_offset);
-		
 	        len = sizeof(Elf32_Nhdr);
 	        len = roundup(len + note32->n_namesz, 4);
 	        len = roundup(len + note32->n_descsz, 4);
@@ -437,14 +649,15 @@
 			- SIZE(user_regs_struct) - sizeof(int);
 		ebp = ULONG(user_regs + OFFSET(user_regs_struct_ebp));
 		esp = ULONG(user_regs + OFFSET(user_regs_struct_esp));
+check_ebp_esp:
 		if (CRASHDEBUG(1)) 
-			fprintf(fp, 
-			    "get_netdump_panic_task: esp: %lx ebp: %lx\n",
+			error(INFO, 
+			    "get_netdump_panic_task: NT_PRSTATUS esp: %lx ebp: %lx\n",
 				esp, ebp);
 		if (IS_KVADDR(esp)) {
 			task = stkptr_to_task(esp);
 			if (CRASHDEBUG(1))
-				fprintf(fp, 
+				error(INFO, 
 			    "get_netdump_panic_task: esp: %lx -> task: %lx\n",
 					esp, task);
 			for (i = 0; task && (i < NR_CPUS); i++) {
@@ -455,7 +668,7 @@
                 if (IS_KVADDR(ebp)) {
                         task = stkptr_to_task(ebp);
 			if (CRASHDEBUG(1))
-				fprintf(fp, 
+				error(INFO, 
 			    "get_netdump_panic_task: ebp: %lx -> task: %lx\n",
 					ebp, task);
                         for (i = 0; task && (i < NR_CPUS); i++) {
@@ -464,25 +677,37 @@
                         }
                 }
 	} else if (nd->elf64) {
-	        Elf64_Nhdr *note64 = (Elf64_Nhdr *)
-			((char *)nd->elf64 + nd->notes64->p_offset);
-		
+		Elf64_Nhdr *note64;
+
+                if ((nd->num_prstatus_notes > 1) && (crashing_cpu != -1))
+                        note64 = (Elf64_Nhdr *)
+                                nd->nt_prstatus_percpu[crashing_cpu];
+                else
+                        note64 = (Elf64_Nhdr *)nd->nt_prstatus;
+
 	        len = sizeof(Elf64_Nhdr);
 	        len = roundup(len + note64->n_namesz, 4);
 		user_regs = (char *)((char *)note64 + len +
 			MEMBER_OFFSET("elf_prstatus", "pr_reg"));
+
+		if (nd->elf64->e_machine == EM_386) {
+                	ebp = ULONG(user_regs + OFFSET(user_regs_struct_ebp));
+                	esp = ULONG(user_regs + OFFSET(user_regs_struct_esp));
+			goto check_ebp_esp;
+		}
+
 		if (nd->elf64->e_machine == EM_PPC64) {
 			/*
 			 * Get the GPR1 register value.
 			 */
 			esp = *(ulong *)((char *)user_regs + 8);
 			if (CRASHDEBUG(1)) 
-				fprintf(fp, 
-			    	"get_netdump_panic_task: esp: %lx\n", esp);
+				error(INFO, 
+			    	"get_netdump_panic_task: NT_PRSTATUS esp: %lx\n", esp);
 			if (IS_KVADDR(esp)) {
 				task = stkptr_to_task(esp);
 				if (CRASHDEBUG(1))
-					fprintf(fp, 
+					error(INFO, 
 			    		"get_netdump_panic_task: esp: %lx -> task: %lx\n",
 						esp, task);
 				for (i = 0; task && (i < NR_CPUS); i++) {
@@ -493,8 +718,10 @@
 		}
 	} 
 
+panic_task_undetermined:
+
 	if (CRASHDEBUG(1))
-		fprintf(fp, "get_netdump_panic_task: returning NO_TASK\n");
+		error(INFO, "get_netdump_panic_task: failed\n");
 
 	return NO_TASK;
 #endif
@@ -512,7 +739,7 @@
 		return nd->switch_stack;
 	return 0;
 #else
-	if (!NETDUMP_VALID() || !get_active_set())
+	if (!VMCORE_VALID() || !get_active_set())
 		return 0;
 
 	if (nd->task_struct == task)
@@ -525,33 +752,75 @@
 int
 netdump_memory_dump(FILE *fp)
 {
-	int i, others;
+	int i, others, wrap, flen;
 	size_t len, tot;
 	FILE *fpsave;
 	Elf32_Off offset32;
 	Elf32_Off offset64;
 	struct pt_load_segment *pls;
 
-	if (!NETDUMP_VALID())
+	if (!VMCORE_VALID())
 		return FALSE;
 
 	fpsave = nd->ofp;
 	nd->ofp = fp;
 
-	netdump_print("netdump_data: \n");
+	netdump_print("vmcore_data: \n");
 	netdump_print("                  flags: %lx (", nd->flags);
 	others = 0;
 	if (nd->flags & NETDUMP_LOCAL)
 		netdump_print("%sNETDUMP_LOCAL", others++ ? "|" : "");
+	if (nd->flags & KDUMP_LOCAL)
+		netdump_print("%sKDUMP_LOCAL", others++ ? "|" : "");
 	if (nd->flags & NETDUMP_REMOTE)
 		netdump_print("%sNETDUMP_REMOTE", others++ ? "|" : "");
 	if (nd->flags & NETDUMP_ELF32)
 		netdump_print("%sNETDUMP_ELF32", others++ ? "|" : "");
 	if (nd->flags & NETDUMP_ELF64)
 		netdump_print("%sNETDUMP_ELF64", others++ ? "|" : "");
+	if (nd->flags & KDUMP_ELF32)
+		netdump_print("%sKDUMP_ELF32", others++ ? "|" : "");
+	if (nd->flags & KDUMP_ELF64)
+		netdump_print("%sKDUMP_ELF64", others++ ? "|" : "");
 	if (nd->flags & PARTIAL_DUMP)
 		netdump_print("%sPARTIAL_DUMP", others++ ? "|" : "");
 	netdump_print(")\n");
+	if ((pc->flags & RUNTIME) && symbol_exists("dump_level")) {
+		int dump_level;
+                if (readmem(symbol_value("dump_level"), KVADDR, &dump_level,
+                    sizeof(dump_level), "dump_level", QUIET|RETURN_ON_ERROR)) {
+			netdump_print("             dump_level: %d (0x%x) %s", 
+				dump_level, dump_level, 
+				dump_level > 0 ? "(" : "");
+
+#define DUMP_EXCLUDE_CACHE 0x00000001   /* Exclude LRU & SwapCache pages*/
+#define DUMP_EXCLUDE_CLEAN 0x00000002   /* Exclude all-zero pages */
+#define DUMP_EXCLUDE_FREE  0x00000004   /* Exclude free pages */
+#define DUMP_EXCLUDE_ANON  0x00000008   /* Exclude Anon pages */
+#define DUMP_SAVE_PRIVATE  0x00000010   /* Save private pages */
+
+		        others = 0;
+        		if (dump_level & DUMP_EXCLUDE_CACHE)
+                		netdump_print("%sDUMP_EXCLUDE_CACHE", 
+					others++ ? "|" : "");
+        		if (dump_level & DUMP_EXCLUDE_CLEAN)
+                		netdump_print("%sDUMP_EXCLUDE_CLEAN", 
+					others++ ? "|" : "");
+        		if (dump_level & DUMP_EXCLUDE_FREE)
+                		netdump_print("%sDUMP_EXCLUDE_FREE", 
+					others++ ? "|" : "");
+        		if (dump_level & DUMP_EXCLUDE_ANON)
+                		netdump_print("%sDUMP_EXCLUDE_ANON", 
+					others++ ? "|" : "");
+        		if (dump_level & DUMP_SAVE_PRIVATE)
+                		netdump_print("%sDUMP_SAVE_PRIVATE", 
+					others++ ? "|" : "");
+			netdump_print("%s\n", dump_level > 0 ? ")" : "");
+		} else
+			netdump_print("             dump_level: (unknown)\n");
+	} else if (!(pc->flags & RUNTIME) && symbol_exists("dump_level"))
+		netdump_print("             dump_level: (undetermined)\n");
+
 	netdump_print("                   ndfd: %d\n", nd->ndfd);
 	netdump_print("                    ofp: %lx\n", nd->ofp);
 	netdump_print("            header_size: %d\n", nd->header_size);
@@ -565,8 +834,10 @@
 			pls->phys_start);
 		netdump_print("               phys_end: %llx\n", 
 			pls->phys_end);
+		netdump_print("              zero_fill: %llx\n", 
+			pls->zero_fill);
 	}
-	netdump_print("         netdump_header: %lx\n", nd->netdump_header);
+	netdump_print("             elf_header: %lx\n", nd->elf_header);
 	netdump_print("                  elf32: %lx\n", nd->elf32);
 	netdump_print("                notes32: %lx\n", nd->notes32);
 	netdump_print("                 load32: %lx\n", nd->load32);
@@ -577,11 +848,74 @@
 	netdump_print("            nt_prpsinfo: %lx\n", nd->nt_prpsinfo);
 	netdump_print("          nt_taskstruct: %lx\n", nd->nt_taskstruct);
 	netdump_print("            task_struct: %lx\n", nd->task_struct);
-	netdump_print("           switch_stack: %lx\n\n", nd->switch_stack);
+	netdump_print("              page_size: %d\n", nd->page_size);
+	netdump_print("           switch_stack: %lx\n", nd->switch_stack);
+	netdump_print("         xen_kdump_data: %s\n",
+		XEN_CORE_DUMPFILE() ? " " : "(unused)");
+	if (XEN_CORE_DUMPFILE()) {
+		netdump_print("                    flags: %lx (", nd->xen_kdump_data->flags);
+		others = 0;
+        	if (nd->xen_kdump_data->flags & KDUMP_P2M_INIT)
+                	netdump_print("%sKDUMP_P2M_INIT", others++ ? "|" : "");
+        	if (nd->xen_kdump_data->flags & KDUMP_CR3)
+                	netdump_print("%sKDUMP_CR3", others++ ? "|" : "");
+        	if (nd->xen_kdump_data->flags & KDUMP_MFN_LIST)
+                	netdump_print("%sKDUMP_MFN_LIST", others++ ? "|" : "");
+		netdump_print(")\n");
+		netdump_print("                  p2m_mfn: %lx\n", 
+			nd->xen_kdump_data->p2m_mfn);
+		netdump_print("                      cr3: %lx\n", 
+			nd->xen_kdump_data->cr3);
+		netdump_print("            last_mfn_read: %lx\n", 
+			nd->xen_kdump_data->last_mfn_read);
+		netdump_print("            last_pmd_read: %lx\n", 
+			nd->xen_kdump_data->last_pmd_read);
+		netdump_print("                     page: %lx\n", 
+			nd->xen_kdump_data->page);
+		netdump_print("                 accesses: %ld\n", 
+			nd->xen_kdump_data->accesses);
+		netdump_print("               cache_hits: %ld ", 
+			nd->xen_kdump_data->cache_hits);
+      		if (nd->xen_kdump_data->accesses)
+                	netdump_print("(%ld%%)", 
+			    nd->xen_kdump_data->cache_hits * 100 / nd->xen_kdump_data->accesses);
+		netdump_print("\n               p2m_frames: %d\n", 
+			nd->xen_kdump_data->p2m_frames);
+		netdump_print("           xen_phys_start: %lx\n", 
+			nd->xen_kdump_data->xen_phys_start);
+		netdump_print("        xen_major_version: %d\n", 
+			nd->xen_kdump_data->xen_major_version);
+		netdump_print("        xen_minor_version: %d\n", 
+			nd->xen_kdump_data->xen_minor_version);
+		netdump_print("       p2m_mfn_frame_list: %lx\n", 
+			nd->xen_kdump_data->p2m_mfn_frame_list);
+		for (i = 0; i < nd->xen_kdump_data->p2m_frames; i++)
+			netdump_print("%lx ", 
+				nd->xen_kdump_data->p2m_mfn_frame_list[i]);
+		if (i) netdump_print("\n");
+	}
+	netdump_print("       num_prstatus_notes: %d\n", nd->num_prstatus_notes);	
+	netdump_print("               vmcoreinfo: %lx\n", (ulong)nd->vmcoreinfo);
+	netdump_print("          size_vmcoreinfo: %d\n", nd->size_vmcoreinfo);
+	netdump_print("       nt_prstatus_percpu: ");
+        wrap = sizeof(void *) == SIZEOF_32BIT ? 8 : 4;
+        flen = sizeof(void *) == SIZEOF_32BIT ? 8 : 16;
+	if (nd->num_prstatus_notes == 1)
+                netdump_print("%.*lx\n", flen, nd->nt_prstatus_percpu[0]);
+	else {
+        	for (i = 0; i < nd->num_prstatus_notes; i++) {
+                	if ((i % wrap) == 0)
+                        	netdump_print("\n        ");
+                	netdump_print("%.*lx ", flen, 
+				nd->nt_prstatus_percpu[i]);
+        	}
+	}
+	netdump_print("\n\n");
 
-        switch (nd->flags & (NETDUMP_ELF32|NETDUMP_ELF64))
+        switch (DUMPFILE_FORMAT(nd->flags))
 	{
 	case NETDUMP_ELF32:
+	case KDUMP_ELF32:
 		dump_Elf32_Ehdr(nd->elf32);
 		dump_Elf32_Phdr(nd->notes32, ELFREAD);
                 for (i = 0; i < nd->num_pt_load_segments; i++) 
@@ -594,6 +928,7 @@
 		break;
 
 	case NETDUMP_ELF64:
+	case KDUMP_ELF64:
 		dump_Elf64_Ehdr(nd->elf64);
 		dump_Elf64_Phdr(nd->notes64, ELFREAD);
                 for (i = 0; i < nd->num_pt_load_segments; i++)
@@ -865,6 +1200,9 @@
         netdump_print("              e_machine: %d ", elf->e_machine);
         switch (elf->e_machine)
         {
+	case EM_386:
+		netdump_print("(EM_386)\n");
+		break;
         case EM_IA_64:
                 netdump_print("(EM_IA_64)\n");
                 break;
@@ -961,8 +1299,11 @@
 		pls->phys_start = prog->p_paddr; 
 	netdump_print("               p_filesz: %lu (%lx)\n", prog->p_filesz, 
 		prog->p_filesz);
-	if (store_pt_load_data)
+	if (store_pt_load_data) {
 		pls->phys_end = pls->phys_start + prog->p_filesz;
+		pls->zero_fill = (prog->p_filesz == prog->p_memsz) ? 
+			0 : pls->phys_start + prog->p_memsz;
+	}
 	netdump_print("                p_memsz: %lu (%lx)\n", prog->p_memsz,
 		prog->p_memsz);
 	netdump_print("                p_flags: %lx (", prog->p_flags);
@@ -1030,19 +1371,22 @@
 		netdump_print("(?)\n");
 	}
 
-	netdump_print("               p_offset: %ld (%lx)\n", prog->p_offset, 
+	netdump_print("               p_offset: %lld (%llx)\n", prog->p_offset, 
 		prog->p_offset);
 	if (store_pt_load_data)
 		pls->file_offset = prog->p_offset;
-	netdump_print("                p_vaddr: %lx\n", prog->p_vaddr);
-	netdump_print("                p_paddr: %lx\n", prog->p_paddr);
+	netdump_print("                p_vaddr: %llx\n", prog->p_vaddr);
+	netdump_print("                p_paddr: %llx\n", prog->p_paddr);
 	if (store_pt_load_data)
 		pls->phys_start = prog->p_paddr; 
-	netdump_print("               p_filesz: %lu (%lx)\n", prog->p_filesz, 
+	netdump_print("               p_filesz: %llu (%llx)\n", prog->p_filesz, 
 		prog->p_filesz);
-	if (store_pt_load_data)
+	if (store_pt_load_data) {
 		pls->phys_end = pls->phys_start + prog->p_filesz;
-	netdump_print("                p_memsz: %lu (%lx)\n", prog->p_memsz,
+		pls->zero_fill = (prog->p_filesz == prog->p_memsz) ?
+			0 : pls->phys_start + prog->p_memsz;
+	}
+	netdump_print("                p_memsz: %llu (%llx)\n", prog->p_memsz,
 		prog->p_memsz);
 	netdump_print("                p_flags: %lx (", prog->p_flags);
 	others = 0;
@@ -1053,7 +1397,97 @@
 	if (prog->p_flags & PF_R)
 		netdump_print("%sPF_R", others++ ? "|" : "");
 	netdump_print(")\n");
-	netdump_print("                p_align: %ld\n", prog->p_align);
+	netdump_print("                p_align: %lld\n", prog->p_align);
+}
+
+/*
+ * VMCOREINFO
+ *
+ * This is a ELF note intented for makedumpfile that is exported by the
+ * kernel that crashes and presented as ELF note to the /proc/vmcore
+ * of the panic kernel.
+ */
+
+#define VMCOREINFO_NOTE_NAME        "VMCOREINFO"
+#define VMCOREINFO_NOTE_NAME_BYTES  (sizeof(VMCOREINFO_NOTE_NAME))
+
+/*
+ * Reads a string value from VMCOREINFO.
+ *
+ * Returns a string (that has to be freed by the caller) that contains the
+ * value for key or NULL if the key has not been found.
+ */
+static char *
+vmcoreinfo_read_string(const char *key)
+{
+	int i, j, end;
+	size_t value_length;
+	size_t key_length = strlen(key);
+	char *vmcoreinfo = (char *)nd->vmcoreinfo;
+	char *value = NULL;
+
+	if (!nd->vmcoreinfo)
+		return NULL;
+
+	/* the '+ 1' is the equal sign */
+	for (i = 0; i < (nd->size_vmcoreinfo - key_length + 1); i++) {
+		/*
+		 * We must also check if we're at the beginning of VMCOREINFO
+		 * or the separating newline is there, and of course if we 
+		 * have a equal sign after the key.
+		 */
+		if ((strncmp(vmcoreinfo+i, key, key_length) == 0) &&
+		    (i == 0 || vmcoreinfo[i-1] == '\n') &&
+		    (vmcoreinfo[i+key_length] == '=')) {
+
+			end = -1;
+
+			/* Found -- search for the next newline. */
+			for (j = i + key_length + 1; 
+			     j < nd->size_vmcoreinfo; j++) {
+				if (vmcoreinfo[j] == '\n') {
+					end = j;
+					break;
+				}
+			}
+
+			/* 
+			 * If we didn't find an end, we assume it's the end 
+			 * of VMCOREINFO data. 
+			 */
+			if (end == -1) {
+				/* Point after the end. */
+				end = nd->size_vmcoreinfo + 1;
+			}
+
+			value_length = end - (1+ i + key_length);
+			value = malloc(value_length);
+			if (value)
+				strncpy(value, vmcoreinfo + i + key_length + 1, 
+					value_length);
+			break;
+		}
+	}
+
+	return value;
+}
+
+/*
+ * Reads an integer value from VMCOREINFO.
+ */
+static long
+vmcoreinfo_read_integer(const char *key, long default_value)
+{
+	char *string;
+	long retval = default_value;
+
+	string = vmcoreinfo_read_string(key);
+	if (string) {
+		retval = atol(string);
+		free(string);
+	}
+
+	return retval;
 }
 
 /*
@@ -1061,20 +1495,22 @@
  */
 
 static size_t 
-dump_Elf32_Nhdr(Elf32_Off offset, int store_addresses)
+dump_Elf32_Nhdr(Elf32_Off offset, int store)
 {
-	int i, lf;
+	int i, lf, words;
 	Elf32_Nhdr *note;
 	size_t len;
 	char buf[BUFSIZE];
 	char *ptr;
 	ulong *uptr;
+	int xen_core, vmcoreinfo;
 
 	note = (Elf32_Nhdr *)((char *)nd->elf32 + offset);
 
         netdump_print("Elf32_Nhdr:\n");
         netdump_print("               n_namesz: %ld ", note->n_namesz);
         BZERO(buf, BUFSIZE);
+	xen_core = vmcoreinfo = FALSE;
         ptr = (char *)note + sizeof(Elf32_Nhdr);
         BCOPY(ptr, buf, note->n_namesz);
         netdump_print("(\"%s\")\n", buf);
@@ -1085,17 +1521,26 @@
 	{
 	case NT_PRSTATUS:
 		netdump_print("(NT_PRSTATUS)\n");
-		if (store_addresses)
-			nd->nt_prstatus = (void *)note;
+		if (store) { 
+			if (!nd->nt_prstatus)
+				nd->nt_prstatus = (void *)note;
+			for (i = 0; i < NR_CPUS; i++) {
+				if (!nd->nt_prstatus_percpu[i]) {
+					nd->nt_prstatus_percpu[i] = (void *)note;
+					nd->num_prstatus_notes++;
+					break;
+				}
+			}
+		}
 		break;
 	case NT_PRPSINFO:
 		netdump_print("(NT_PRPSINFO)\n");
-		if (store_addresses)
+		if (store)
 			nd->nt_prpsinfo = (void *)note;
 		break;
 	case NT_TASKSTRUCT:
 		netdump_print("(NT_TASKSTRUCT)\n");
-		if (store_addresses) {
+		if (store) {
 			nd->nt_taskstruct = (void *)note;
 			nd->task_struct = *((ulong *)(ptr + note->n_namesz));
 			nd->switch_stack = *((ulong *)
@@ -1105,25 +1550,132 @@
         case NT_DISKDUMP:
                 netdump_print("(NT_DISKDUMP)\n");
 		uptr = (ulong *)(ptr + note->n_namesz);
-		if (*uptr)
+		if (*uptr && store)
 			nd->flags |= PARTIAL_DUMP;
 		break;
+#ifdef NOTDEF
+	/*
+	 *  Note: Based upon the original, abandoned, proposal for
+	 *  its contents -- keep around for potential future use.
+	 */
+	case NT_KDUMPINFO:
+		netdump_print("(NT_KDUMPINFO)\n");
+		if (store) {
+			uptr = (note->n_namesz == 5) ?
+				(ulong *)(ptr + ((note->n_namesz + 3) & ~3)) :
+				(ulong *)(ptr + note->n_namesz);
+			nd->page_size = (uint)(1 << *uptr);
+			uptr++;
+			nd->task_struct = *uptr;
+		}
+		break;
+#endif
 	default:
-		netdump_print("(?)\n");
+		xen_core = STRNEQ(buf, "XEN CORE") || STRNEQ(buf, "Xen");
+		vmcoreinfo = STRNEQ(buf, "VMCOREINFO");
+		if (xen_core) {
+			netdump_print("(unknown Xen n_type)\n"); 
+			if (store)
+				error(WARNING, "unknown Xen n_type: %lx\n\n", 
+					note->n_type);
+		} else if (vmcoreinfo)
+			netdump_print("(unused)\n");
+		else
+			netdump_print("(?)\n");
+		break;
+
+	case NT_XEN_KDUMP_CR3: 
+                netdump_print("(NT_XEN_KDUMP_CR3) [obsolete]\n");
+		if (store)
+			error(WARNING, 
+			    "obsolete Xen n_type: %lx (NT_XEN_KDUMP_CR3)\n\n", 
+				note->n_type);
+		/* FALL THROUGH */
+
+	case XEN_ELFNOTE_CRASH_INFO:
+		/*
+		 *  x86 and x86_64: p2m mfn appended to crash_xen_info_t structure
+		 */
+		if (note->n_type == XEN_ELFNOTE_CRASH_INFO)
+                	netdump_print("(XEN_ELFNOTE_CRASH_INFO)\n");
+		xen_core = TRUE;
+		if (store) { 
+			pc->flags |= XEN_CORE;
+			nd->xen_kdump_data = &xen_kdump_data;
+			nd->xen_kdump_data->last_mfn_read = UNINITIALIZED;
+			nd->xen_kdump_data->last_pmd_read = UNINITIALIZED;
+
+			if ((note->n_type == NT_XEN_KDUMP_CR3) &&
+			    ((note->n_descsz/sizeof(ulong)) == 1)) {
+				nd->xen_kdump_data->flags |= KDUMP_CR3;
+				/*
+				 *  Use the first cr3 found.
+				 */
+				if (!nd->xen_kdump_data->cr3) {
+					uptr = (ulong *)(ptr + note->n_namesz);
+					uptr = (ulong *)roundup((ulong)uptr, 4);
+					nd->xen_kdump_data->cr3 = *uptr;
+				}
+			} else {
+				nd->xen_kdump_data->flags |= KDUMP_MFN_LIST;
+				uptr = (ulong *)(ptr + note->n_namesz);
+				uptr = (ulong *)roundup((ulong)uptr, 4);
+				words = note->n_descsz/sizeof(ulong);
+				/*
+				 *  If already set, overridden with --pfm_mfn
+				 */
+				if (!nd->xen_kdump_data->p2m_mfn)
+					nd->xen_kdump_data->p2m_mfn = *(uptr+(words-1));
+				if (words > 9 && !nd->xen_kdump_data->xen_phys_start)
+					nd->xen_kdump_data->xen_phys_start = *(uptr+(words-2));
+				nd->xen_kdump_data->xen_major_version = *uptr;
+				nd->xen_kdump_data->xen_minor_version = *(uptr+1);
+			}
+		}
+		break;
+
+	case XEN_ELFNOTE_CRASH_REGS:
+      		/* 
+		 *  x86 and x86_64: cr0, cr2, cr3, cr4 
+		 */
+		xen_core = TRUE;	
+               	netdump_print("(XEN_ELFNOTE_CRASH_REGS)\n");
+		break;
 	}
 
 	uptr = (ulong *)(ptr + note->n_namesz);
-	for (i = lf = 0; i < note->n_descsz/sizeof(ulong); i++) {
-		if (((i%4)==0)) {
-			netdump_print("%s                         ", 
-				i ? "\n" : "");
-			lf++;
-		} else
-			lf = 0;
-		netdump_print("%08lx ", *uptr++);
+
+	/*
+	 * kdumps are off-by-1, because their n_namesz is 5 for "CORE".
+ 	 */
+	if ((nd->flags & KDUMP_ELF32) && (note->n_namesz == 5))
+		uptr = (ulong *)(ptr + ((note->n_namesz + 3) & ~3));
+
+	if (xen_core)
+		uptr = (ulong *)roundup((ulong)uptr, 4);
+
+	if (vmcoreinfo) {
+                netdump_print("                         ");
+                ptr += note->n_namesz + 1;
+                for (i = 0; i < note->n_descsz; i++, ptr++) {
+                        netdump_print("%c", *ptr);
+                        if (*ptr == '\n')
+                                netdump_print("                         ");
+                }
+                lf = 0;
+	} else {
+		for (i = lf = 0; i < note->n_descsz/sizeof(ulong); i++) {
+			if (((i%4)==0)) {
+				netdump_print("%s                         ", 
+					i ? "\n" : "");
+				lf++;
+			} else
+				lf = 0;
+			netdump_print("%08lx ", *uptr++);
+		}
 	}
 	if (!lf || (note->n_type == NT_TASKSTRUCT) ||
-	    (note->n_type == NT_DISKDUMP))
+	    (note->n_type == NT_DISKDUMP) || xen_core)
 		netdump_print("\n");
 
   	len = sizeof(Elf32_Nhdr);
@@ -1135,15 +1687,17 @@
 
 
 static size_t 
-dump_Elf64_Nhdr(Elf64_Off offset, int store_addresses)
+dump_Elf64_Nhdr(Elf64_Off offset, int store)
 {
-	int i, lf;
+	int i, lf, words;
 	Elf64_Nhdr *note;
 	size_t len;
 	char buf[BUFSIZE];
 	char *ptr;
 	ulonglong *uptr;
 	int *iptr;
+	ulong *up;
+	int xen_core, vmcoreinfo;
 
 	note = (Elf64_Nhdr *)((char *)nd->elf64 + offset);
 
@@ -1151,6 +1705,7 @@
         netdump_print("               n_namesz: %ld ", note->n_namesz);
         BZERO(buf, BUFSIZE);
         ptr = (char *)note + sizeof(Elf64_Nhdr);
+	xen_core = vmcoreinfo = FALSE;
         BCOPY(ptr, buf, note->n_namesz);
         netdump_print("(\"%s\")\n", buf);
 
@@ -1160,17 +1715,26 @@
 	{
 	case NT_PRSTATUS:
 		netdump_print("(NT_PRSTATUS)\n");
-		if (store_addresses)
-			nd->nt_prstatus = (void *)note;
+		if (store) {
+			if (!nd->nt_prstatus)
+				nd->nt_prstatus = (void *)note;
+			for (i = 0; i < NR_CPUS; i++) {
+				if (!nd->nt_prstatus_percpu[i]) {
+					nd->nt_prstatus_percpu[i] = (void *)note;
+					nd->num_prstatus_notes++;
+					break;
+				}
+			}
+		}
 		break;
 	case NT_PRPSINFO:
 		netdump_print("(NT_PRPSINFO)\n");
-		if (store_addresses)
+		if (store)
 			nd->nt_prpsinfo = (void *)note;
 		break;
 	case NT_TASKSTRUCT:
 		netdump_print("(NT_TASKSTRUCT)\n");
-		if (store_addresses) {
+		if (store) {
 			nd->nt_taskstruct = (void *)note;
 			nd->task_struct = *((ulong *)(ptr + note->n_namesz));
                         nd->switch_stack = *((ulong *)
@@ -1180,24 +1744,162 @@
         case NT_DISKDUMP:
                 netdump_print("(NT_DISKDUMP)\n");
 		iptr = (int *)(ptr + note->n_namesz);
-		if (*iptr)
+		if (*iptr && store)
 			nd->flags |= PARTIAL_DUMP;
 		if (note->n_descsz < sizeof(ulonglong))
 			netdump_print("                         %08x", *iptr);
 		break;
+#ifdef NOTDEF
+	/*
+	 *  Note: Based upon the original, abandoned, proposal for
+	 *  its contents -- keep around for potential future use.
+	 */
+        case NT_KDUMPINFO:
+                netdump_print("(NT_KDUMPINFO)\n");
+		if (store) {
+			uint32_t *u32ptr;
+
+			if (nd->elf64->e_machine == EM_386) {
+				u32ptr = (note->n_namesz == 5) ?
+				    (uint *)(ptr + ((note->n_namesz + 3) & ~3)) :
+	                            (uint *)(ptr + note->n_namesz);
+				nd->page_size = 1 << *u32ptr;
+				u32ptr++;
+				nd->task_struct = *u32ptr;
+			} else {
+	                       	uptr = (note->n_namesz == 5) ?
+				    (ulonglong *)(ptr + ((note->n_namesz + 3) & ~3)) :
+	                            (ulonglong *)(ptr + note->n_namesz);
+				nd->page_size = (uint)(1 << *uptr);
+				uptr++;
+				nd->task_struct = *uptr;
+			}
+		}
+                break;
+#endif
 	default:
-		netdump_print("(?)\n");
+		xen_core = STRNEQ(buf, "XEN CORE") || STRNEQ(buf, "Xen");
+		vmcoreinfo = STRNEQ(buf, "VMCOREINFO");
+                if (xen_core) {
+                        netdump_print("(unknown Xen n_type)\n");
+			if (store)
+                        	error(WARNING, 
+				    "unknown Xen n_type: %lx\n\n", note->n_type);
+		} else if (vmcoreinfo) {
+                        netdump_print("(unused)\n");
+
+			if (READ_PAGESIZE_FROM_VMCOREINFO() && store) {
+				nd->vmcoreinfo = (char *)nd->elf64 + offset +
+					(sizeof(Elf64_Nhdr) +
+					((note->n_namesz + 3) & ~3));
+				nd->size_vmcoreinfo = note->n_descsz;
+				nd->page_size = (uint)
+					vmcoreinfo_read_integer("PAGESIZE", 0);
+			}
+                } else
+                        netdump_print("(?)\n");
+                break;
+
+	case NT_XEN_KDUMP_CR3: 
+                netdump_print("(NT_XEN_KDUMP_CR3) [obsolete]\n");
+               	if (store)
+                	error(WARNING,
+                            "obsolete Xen n_type: %lx (NT_XEN_KDUMP_CR3)\n\n",
+                                note->n_type);
+		/* FALL THROUGH */
+
+	case XEN_ELFNOTE_CRASH_INFO:
+		/*
+		 *  x86 and x86_64: p2m mfn appended to crash_xen_info_t structure
+		 */
+		if (note->n_type == XEN_ELFNOTE_CRASH_INFO)
+                	netdump_print("(XEN_ELFNOTE_CRASH_INFO)\n");
+		xen_core = TRUE;
+		if (store) {
+			pc->flags |= XEN_CORE;
+			nd->xen_kdump_data = &xen_kdump_data;
+			nd->xen_kdump_data->last_mfn_read = UNINITIALIZED;
+			nd->xen_kdump_data->last_pmd_read = UNINITIALIZED;
+
+			if ((note->n_type == NT_XEN_KDUMP_CR3) &&
+			    ((note->n_descsz/sizeof(ulong)) == 1)) {
+				nd->xen_kdump_data->flags |= KDUMP_CR3;
+	                        /*
+	                         *  Use the first cr3 found.
+	                         */
+	                        if (!nd->xen_kdump_data->cr3) {
+					up = (ulong *)(ptr + note->n_namesz);
+	                                up = (ulong *)roundup((ulong)up, 4);
+	                                nd->xen_kdump_data->cr3 = *up;
+	                        }
+			} else {
+				nd->xen_kdump_data->flags |= KDUMP_MFN_LIST;
+				up = (ulong *)(ptr + note->n_namesz);
+	                        up = (ulong *)roundup((ulong)up, 4);
+				words = note->n_descsz/sizeof(ulong);
+				/*
+				 *  If already set, overridden with --p2m_mfn
+				 */
+	                        if (!nd->xen_kdump_data->p2m_mfn)
+	                        	nd->xen_kdump_data->p2m_mfn = *(up+(words-1));
+				if (words > 9 && !nd->xen_kdump_data->xen_phys_start)
+					nd->xen_kdump_data->xen_phys_start = *(up+(words-2));
+				nd->xen_kdump_data->xen_major_version = *up;
+				nd->xen_kdump_data->xen_minor_version = *(up+1);
+			}
+		}
+                break;
+
+        case XEN_ELFNOTE_CRASH_REGS:
+      		/* 
+		 *  x86 and x86_64: cr0, cr2, cr3, cr4 
+		 */
+                xen_core = TRUE;
+                netdump_print("(XEN_ELFNOTE_CRASH_REGS)\n");
+                break;
 	}
 
 	uptr = (ulonglong *)(ptr + note->n_namesz);
-	for (i = lf = 0; i < note->n_descsz/sizeof(ulonglong); i++) {
-		if (((i%2)==0)) {
-			netdump_print("%s                         ", 
-				i ? "\n" : "");
-			lf++;
-		} else
-			lf = 0;
-		netdump_print("%016llx ", *uptr++);
+
+        /*
+         * kdumps are off-by-1, because their n_namesz is 5 for "CORE".
+         */
+        if ((nd->flags & KDUMP_ELF64) && (note->n_namesz == 5))
+                uptr = (ulonglong *)(ptr + ((note->n_namesz + 3) & ~3));
+
+        if (xen_core)
+                uptr = (ulonglong *)roundup((ulong)uptr, 4);
+
+	if (BITS32() && (xen_core || (note->n_type == NT_PRSTATUS))) {
+		iptr = (int *)uptr;
+		for (i = lf = 0; i < note->n_descsz/sizeof(ulong); i++) {
+			if (((i%4)==0)) {
+				netdump_print("%s                         ", 
+					i ? "\n" : "");
+				lf++;
+			} else
+				lf = 0;
+			netdump_print("%08lx ", *iptr++);
+		}
+	} else if (vmcoreinfo) {
+		netdump_print("                         ");
+		ptr += note->n_namesz + 1;
+		for (i = 0; i < note->n_descsz; i++, ptr++) {
+			netdump_print("%c", *ptr);
+			if (*ptr == '\n')
+				netdump_print("                         ");
+		}
+		lf = 0;
+	} else {
+		for (i = lf = 0; i < note->n_descsz/sizeof(ulonglong); i++) {
+			if (((i%2)==0)) {
+				netdump_print("%s                         ", 
+					i ? "\n" : "");
+				lf++;
+			} else
+				lf = 0;
+			netdump_print("%016llx ", *uptr++);
+		}
 	}
 	if (!lf)
 		netdump_print("\n");
@@ -1251,39 +1953,70 @@
 
 	default:
 		error(FATAL, 
-		   "netdump support for ELF machine type %d not available\n",
+		   "support for ELF machine type %d not available\n",
 			e_machine);  
 	}
 }
 
-static void 
+struct x86_64_user_regs_struct {
+        unsigned long r15,r14,r13,r12,rbp,rbx,r11,r10;
+        unsigned long r9,r8,rax,rcx,rdx,rsi,rdi,orig_rax;
+        unsigned long rip,cs,eflags;
+        unsigned long rsp,ss;
+        unsigned long fs_base, gs_base;
+        unsigned long ds,es,fs,gs;
+};
+
+void 
 get_netdump_regs_x86_64(struct bt_info *bt, ulong *ripp, ulong *rspp)
 {
         Elf64_Nhdr *note;
         size_t len;
         char *user_regs;
-        ulong rsp, rip;
+	ulong regs_size, rsp_offset, rip_offset;
 
         if (is_task_active(bt->task)) 
                 bt->flags |= BT_DUMPFILE_SEARCH;
 
-	if (VALID_STRUCT(user_regs_struct) && (bt->task == tt->panic_task)) {
-                note = (Elf64_Nhdr *)nd->nt_prstatus;
+	if (((NETDUMP_DUMPFILE() || KDUMP_DUMPFILE()) &&
+   	      VALID_STRUCT(user_regs_struct) && (bt->task == tt->panic_task)) ||
+	      (KDUMP_DUMPFILE() && (kt->flags & DWARF_UNWIND) && 
+	      (bt->flags & BT_DUMPFILE_SEARCH))) {
+		if (nd->num_prstatus_notes > 1)
+                	note = (Elf64_Nhdr *)
+				nd->nt_prstatus_percpu[bt->tc->processor];
+		else
+                	note = (Elf64_Nhdr *)nd->nt_prstatus;
 
                 len = sizeof(Elf64_Nhdr);
                 len = roundup(len + note->n_namesz, 4);
                 len = roundup(len + note->n_descsz, 4);
 
-                user_regs = ((char *)note + len)
-                        - SIZE(user_regs_struct) - sizeof(long);
+		regs_size = VALID_STRUCT(user_regs_struct) ?
+			SIZE(user_regs_struct) : 
+			sizeof(struct x86_64_user_regs_struct);
+		rsp_offset = VALID_MEMBER(user_regs_struct_rsp) ?
+			OFFSET(user_regs_struct_rsp) : 
+			offsetof(struct x86_64_user_regs_struct, rsp);
+		rip_offset = VALID_MEMBER(user_regs_struct_rip) ?
+			OFFSET(user_regs_struct_rip) :
+                        offsetof(struct x86_64_user_regs_struct, rip);
 
-		if (CRASHDEBUG(1)) {
-                	rsp = ULONG(user_regs + OFFSET(user_regs_struct_rsp));
-                	rip = ULONG(user_regs + OFFSET(user_regs_struct_rip));
+                user_regs = ((char *)note + len) - regs_size - sizeof(long);
+
+		if (CRASHDEBUG(1))
 			netdump_print("ELF prstatus rsp: %lx rip: %lx\n", 
-				rsp, rip);
-		}
+                		ULONG(user_regs + rsp_offset),
+                		ULONG(user_regs + rip_offset));
+
+		if (KDUMP_DUMPFILE()) {
+			*rspp = ULONG(user_regs + rsp_offset);
+			*ripp = ULONG(user_regs + rip_offset);
 
+			if (*ripp && *rspp)
+				return;
+		}
+			
 		bt->machdep = (void *)user_regs;
 	}
 
@@ -1295,13 +2028,14 @@
  *  the raw stack for some reasonable hooks.
  */
 
-static void
+void
 get_netdump_regs_x86(struct bt_info *bt, ulong *eip, ulong *esp)
 {
-	int i, search, panic;
+	int i, search, panic, panic_task;
 	char *sym;
 	ulong *up;
 	ulong ipintr_eip, ipintr_esp, ipintr_func;
+	ulong halt_eip, halt_esp;
 	int check_hardirq, check_softirq;
 
 	if (!is_task_active(bt->task)) {
@@ -1309,17 +2043,31 @@
 		return;
 	}
 
+	panic_task = tt->panic_task == bt->task ? TRUE : FALSE;
+
 	ipintr_eip = ipintr_esp = ipintr_func = panic = 0;
+	halt_eip = halt_esp = 0;
 	check_hardirq = check_softirq = tt->flags & IRQSTACKS ? TRUE : FALSE;
 	search = ((bt->flags & BT_TEXT_SYMBOLS) && (tt->flags & TASK_INIT_DONE))
 		|| (machdep->flags & OMIT_FRAME_PTR);
-
 retry:
 	for (i = 0, up = (ulong *)bt->stackbuf; i < LONGS_PER_STACK; i++, up++){
 		sym = closest_symbol(*up);
-		if (STREQ(sym, "netconsole_netdump") || 
+
+		if (XEN_CORE_DUMPFILE()) {
+			if (STREQ(sym, "xen_machine_kexec")) {
+				*eip = *up;
+				*esp = bt->stackbase + ((char *)(up+1) - bt->stackbuf);
+				return;
+			}
+			if (STREQ(sym, "crash_kexec")) {
+                        	halt_eip = *up;
+				halt_esp = bt->stackbase + ((char *)(up+1) - bt->stackbuf);
+			}
+		} else if (STREQ(sym, "netconsole_netdump") || 
 		    STREQ(sym, "netpoll_start_netdump") ||
 		    STREQ(sym, "start_disk_dump") ||
+		    STREQ(sym, "crash_kexec") ||
 		    STREQ(sym, "disk_dump")) {
 			*eip = *up;
 			*esp = search ?
@@ -1354,7 +2102,7 @@
 next_sysrq:
                         *eip = *up;
 			*esp = bt->stackbase + ((char *)(up+4) - bt->stackbuf);
-			machdep->flags |= SYSRQ;
+			pc->flags |= SYSRQ;
 			for (i++, up++; i < LONGS_PER_STACK; i++, up++) {
 				sym = closest_symbol(*up);
                 		if (STREQ(sym, "sysrq_handle_crash")) 
@@ -1371,7 +2119,15 @@
                         *esp = search ?
                             bt->stackbase + ((char *)(up+1) - bt->stackbuf) :
                                 *(up-1);
-                        machdep->flags |= SYSRQ;
+                        pc->flags |= SYSRQ;
+                        return;
+                }
+
+                if (STREQ(sym, "crash_nmi_callback")) {
+                        *eip = *up;
+                        *esp = search ?
+                            bt->stackbase + ((char *)(up+1) - bt->stackbuf) :
+                                *(up-1);
                         return;
                 }
 
@@ -1385,6 +2141,18 @@
 			    bt->stackbase + ((char *)(up-1) - bt->stackbuf);
 			ipintr_func = *(up - 2);
                 }
+
+                if (XEN_CORE_DUMPFILE() && !panic_task && (bt->tc->pid == 0) &&
+                    STREQ(sym, "safe_halt")) {
+                        halt_eip = *up;
+			halt_esp = bt->stackbase + ((char *)(up+1) - bt->stackbuf);
+                }
+
+                if (XEN_CORE_DUMPFILE() && !panic_task && (bt->tc->pid == 0) &&
+                    !halt_eip && STREQ(sym, "xen_idle")) {
+                        halt_eip = *up;
+			halt_esp = bt->stackbase + ((char *)(up+1) - bt->stackbuf);
+                }
 	}
 
 	if (ipintr_eip) {
@@ -1393,6 +2161,12 @@
 		return;
 	}
 
+	if (halt_eip && halt_esp) {
+        	*eip = halt_eip;
+        	*esp = halt_esp;
+		return;
+	}
+
 	if (panic)
 		return;
 
@@ -1418,7 +2192,9 @@
                 goto retry;
         }
 
-	console("get_netdump_regs_x86: cannot find anything useful\n");
+	if (CRASHDEBUG(1))
+		error(INFO, 
+    "get_netdump_regs_x86: cannot find anything useful (task: %lx)\n", bt->task);
  
 	machdep->get_stack_frame(bt, eip, esp);
 }
@@ -1429,8 +2205,24 @@
 	Elf64_Nhdr *note;
 	size_t len;
 
-	if (bt->task == tt->panic_task) {
-		note = (Elf64_Nhdr *)nd->nt_prstatus;
+	if ((bt->task == tt->panic_task) ||
+		(is_task_active(bt->task) && nd->num_prstatus_notes > 1)) {
+		/*	
+		 * Registers are saved during the dump process for the 
+		 * panic task. Whereas in kdump, regs are captured for all 
+		 * CPUs if they responded to an IPI.
+		 */
+                if (nd->num_prstatus_notes > 1) {
+			if (bt->tc->processor >= nd->num_prstatus_notes)
+				error(FATAL, 
+		          	    "cannot determine NT_PRSTATUS ELF note "
+				    "for %s task: %lx\n", 
+					(bt->task == tt->panic_task) ?
+					"panic" : "active", bt->task);	
+                        note = (Elf64_Nhdr *)
+                                nd->nt_prstatus_percpu[bt->tc->processor];
+		} else
+			note = (Elf64_Nhdr *)nd->nt_prstatus;
 
 		len = sizeof(Elf64_Nhdr);
 		len = roundup(len + note->n_namesz, 4);
@@ -1446,3 +2238,236 @@
 {
 	return (nd->flags & PARTIAL_DUMP ? TRUE : FALSE);
 }
+
+
+/*
+ *  kexec/kdump generated vmcore files are similar enough in
+ *  nature to netdump/diskdump such that most vmcore access
+ *  functionality may be borrowed from the equivalent netdump
+ *  function.  If not, re-work them here.
+ */
+int
+is_kdump(char *file, ulong source_query)
+{
+        return is_netdump(file, source_query);
+}
+
+int
+kdump_init(char *unused, FILE *fptr)
+{
+	return netdump_init(unused, fptr);
+}
+
+ulong 
+get_kdump_panic_task(void)
+{
+	return get_netdump_panic_task();
+}
+
+int
+read_kdump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr)
+{
+	if (XEN_CORE_DUMPFILE() && !XEN_HYPER_MODE()) {
+	    	if (!(nd->xen_kdump_data->flags & KDUMP_P2M_INIT)) {
+        		if (!machdep->xen_kdump_p2m_create)
+                		error(FATAL,
+                            "xen kdump dumpfiles not supported on this architecture\n");
+
+			if ((nd->xen_kdump_data->page = 
+			    (char *)malloc(PAGESIZE())) == NULL)
+				error(FATAL,
+				    "cannot malloc xen kdump data page\n");
+
+			if (!machdep->xen_kdump_p2m_create(nd->xen_kdump_data))
+                		error(FATAL,
+                    	    "cannot create xen kdump pfn-to-mfn mapping\n");
+
+        		nd->xen_kdump_data->flags |= KDUMP_P2M_INIT;
+		}
+
+		if ((paddr = xen_kdump_p2m(paddr)) == P2M_FAILURE)
+			return READ_ERROR;
+	}
+
+	return read_netdump(fd, bufptr, cnt, addr, paddr);
+}
+
+int
+write_kdump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr)
+{
+	return write_netdump(fd, bufptr, cnt, addr, paddr);
+}
+
+void
+get_kdump_regs(struct bt_info *bt, ulong *eip, ulong *esp)
+{
+	get_netdump_regs(bt, eip, esp);
+}
+
+uint
+kdump_page_size(void)
+{
+        uint pagesz;
+
+        if (!VMCORE_VALID())
+                return 0;
+
+	if (!(pagesz = nd->page_size))
+                pagesz = (uint)getpagesize();
+
+        return pagesz;
+}
+
+int 
+kdump_free_memory(void)
+{
+	return netdump_free_memory();
+}
+
+int 
+kdump_memory_used(void)
+{
+	return netdump_memory_used();
+}
+
+int 
+kdump_memory_dump(FILE *fp)
+{
+	return netdump_memory_dump(fp);
+}
+
+/*
+ *  Translate a xen domain's pseudo-physical address into the
+ *  xen machine address.  Since there's no compression involved,
+ *  just the last phys_to_machine_mapping[] page read is cached, 
+ *  which essentially caches 1024 p2m translations. 
+ */
+static physaddr_t 
+xen_kdump_p2m(physaddr_t pseudo)
+{
+	ulong pfn, mfn_frame; 
+	ulong *mfnptr;
+	ulong mfn_idx, frame_idx;
+	physaddr_t paddr;
+	struct xen_kdump_data *xkd = nd->xen_kdump_data;
+
+	if (pc->curcmd_flags & XEN_MACHINE_ADDR)
+		return pseudo;
+
+#ifdef IA64
+	return ia64_xen_kdump_p2m(xkd, pseudo);
+#endif
+
+	xkd->accesses++;
+
+	pfn = (ulong)BTOP(pseudo);
+	mfn_idx = pfn / (PAGESIZE()/sizeof(ulong));
+	frame_idx = pfn % (PAGESIZE()/sizeof(ulong));
+	if (mfn_idx >= xkd->p2m_frames)
+		return P2M_FAILURE;
+	mfn_frame = xkd->p2m_mfn_frame_list[mfn_idx];
+
+	if (mfn_frame == xkd->last_mfn_read)
+		xkd->cache_hits++;
+	else if (read_netdump(0, xkd->page, PAGESIZE(), 0, 
+	    	(physaddr_t)PTOB(mfn_frame)) != PAGESIZE())
+		return P2M_FAILURE;
+
+	xkd->last_mfn_read = mfn_frame;
+
+	mfnptr = ((ulong *)(xkd->page)) + frame_idx;
+	paddr = (physaddr_t)PTOB((ulonglong)(*mfnptr));  
+	paddr |= PAGEOFFSET(pseudo);
+
+	if (CRASHDEBUG(7))
+		fprintf(fp, 
+		    "xen_dump_p2m(%llx): mfn_idx: %ld frame_idx: %ld"
+		    " mfn_frame: %lx mfn: %lx => %llx\n",
+			(ulonglong)pseudo, mfn_idx, frame_idx, 
+			mfn_frame, *mfnptr, (ulonglong)paddr);
+	
+	return paddr;
+}
+
+struct vmcore_data *
+get_kdump_vmcore_data(void)
+{
+	if (!VMCORE_VALID() || !KDUMP_DUMPFILE())
+		return NULL;
+
+	return &vmcore_data;
+}
+
+/*
+ *  Override the dom0 p2m mfn in the XEN_ELFNOTE_CRASH_INFO note
+ *  in order to initiate a crash session of a guest kernel.
+ */
+void
+xen_kdump_p2m_mfn(char *arg)
+{
+	ulong value;
+	int errflag;
+
+	errflag = 0;
+	value = htol(arg, RETURN_ON_ERROR|QUIET, &errflag);
+	if (!errflag) {
+		xen_kdump_data.p2m_mfn = value;
+		if (CRASHDEBUG(1))
+			error(INFO, 
+			    "xen_kdump_data.p2m_mfn override: %lx\n",  
+				value); 
+	} else 
+		error(WARNING, "invalid p2m_mfn argument: %s\n", arg);
+}
+
+/*
+ *  Fujitsu dom0/HV sadump-generated dumpfile, which requires
+ *  the --p2m_mfn command line argument.
+ */
+int
+is_sadump_xen(void)
+{
+	if (xen_kdump_data.p2m_mfn) {
+		if (!XEN_CORE_DUMPFILE()) {
+			pc->flags |= XEN_CORE;
+			nd->xen_kdump_data = &xen_kdump_data;
+			nd->xen_kdump_data->last_mfn_read = UNINITIALIZED;
+			nd->xen_kdump_data->last_pmd_read = UNINITIALIZED;
+			nd->xen_kdump_data->flags |= KDUMP_MFN_LIST;
+		}
+		return TRUE;
+	}
+
+	return FALSE;
+}
+
+void
+set_xen_phys_start(char *arg)
+{
+	ulong value;
+	int errflag = 0;
+
+	value = htol(arg, RETURN_ON_ERROR|QUIET, &errflag);
+	if (!errflag)
+		xen_kdump_data.xen_phys_start = value;
+	else 
+		error(WARNING, "invalid xen_phys_start argument: %s\n", arg);
+}
+
+ulong
+xen_phys_start(void)
+{
+	return nd->xen_kdump_data->xen_phys_start;
+}
+
+int
+xen_major_version(void)
+{
+	return nd->xen_kdump_data->xen_major_version;
+}
+
+int
+xen_minor_version(void)
+{
+	return nd->xen_kdump_data->xen_minor_version;
+}
--- crash/lkcd_dump_v5.h.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/lkcd_dump_v5.h	2006-10-11 09:14:35.000000000 -0400
@@ -1,8 +1,8 @@
 /* lkcd_dump_v5.h - core analysis suite
  *
  * Copyright (C) 2001, 2002 Mission Critical Linux, Inc.
- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson
- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006 David Anderson
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006 Red Hat, Inc. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -35,7 +35,7 @@
 #ifndef _DUMP_H
 #define _DUMP_H
 
-#include <linux/list.h>
+//#include <linux/list.h>
 
 /* define TRUE and FALSE for use in our dump modules */
 #ifndef FALSE
--- crash/main.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/main.c	2009-01-23 14:52:16.000000000 -0500
@@ -1,8 +1,8 @@
 /* main.c - core analysis suite
  *
  * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc.
- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson
- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 David Anderson
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 Red Hat, Inc. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -16,6 +16,7 @@
  */
 
 #include "defs.h"
+#include "xen_hyper_defs.h"
 #include <curses.h>
 #include <getopt.h>
 
@@ -23,23 +24,41 @@
 static int is_external_command(void);
 static int is_builtin_command(void);
 static int is_input_file(void);
+static void check_xen_hyper(void);
+static void show_untrusted_files(void);
 
 static struct option long_options[] = {
-        {"memory_module", 1, 0, 0},
-        {"memory_device", 1, 0, 0},
+        {"memory_module", required_argument, 0, 0},
+        {"memory_device", required_argument, 0, 0},
         {"no_kallsyms", 0, 0, 0},
         {"no_modules", 0, 0, 0},
         {"no_namelist_gzip", 0, 0, 0},
-        {"help", 0, 0, 0},
+        {"help", optional_argument, 0, 'h'},
 	{"data_debug", 0, 0, 0},
 	{"no_data_debug", 0, 0, 0},
 	{"no_crashrc", 0, 0, 0},
 	{"no_kmem_cache", 0, 0, 0},
+	{"kmem_cache_delay", 0, 0, 0},
 	{"readnow", 0, 0, 0},
 	{"smp", 0, 0, 0},
-	{"machdep", 1, 0, 0},
+	{"machdep", required_argument, 0, 0},
 	{"version", 0, 0, 0},
 	{"buildinfo", 0, 0, 0},
+	{"shadow_page_tables", 0, 0, 0},
+        {"cpus", required_argument, 0, 0},
+        {"no_ikconfig", 0, 0, 0},
+        {"hyper", 0, 0, 0},
+	{"p2m_mfn", required_argument, 0, 0},
+	{"xen_phys_start", required_argument, 0, 0},
+	{"zero_excluded", 0, 0, 0},
+	{"no_panic", 0, 0, 0},
+        {"more", 0, 0, 0},
+        {"less", 0, 0, 0},
+        {"CRASHPAGER", 0, 0, 0},
+        {"no_scroll", 0, 0, 0},
+        {"reloc", required_argument, 0, 0},
+	{"active", 0, 0, 0},
+	{"minimal", 0, 0, 0},
         {0, 0, 0, 0}
 };
 
@@ -55,7 +74,7 @@
 	 */
 	opterr = 0;
 	optind = 0;
-	while((c = getopt_long(argc, argv, "LgH:h:e:i:sSvc:d:tf",
+	while((c = getopt_long(argc, argv, "Lkgh::e:i:sSvc:d:tfp:m:",
        		long_options, &option_index)) != -1) {
 		switch (c)
 		{
@@ -64,52 +83,55 @@
 			    "memory_module")) 
 				pc->memory_module = optarg;
 
-		        if (STREQ(long_options[option_index].name, 
+		        else if (STREQ(long_options[option_index].name, 
 			    "memory_device")) 
 				pc->memory_device = optarg;
 
-		        if (STREQ(long_options[option_index].name, 
+		        else if (STREQ(long_options[option_index].name, 
 			    "no_kallsyms")) 
 				kt->flags |= NO_KALLSYMS;
 
-		        if (STREQ(long_options[option_index].name, 
+		        else if (STREQ(long_options[option_index].name, 
 			    "no_modules")) 
 				kt->flags |= NO_MODULE_ACCESS;
 
-		        if (STREQ(long_options[option_index].name, 
+		        else if (STREQ(long_options[option_index].name, 
+			    "no_ikconfig")) 
+				kt->flags |= NO_IKCONFIG;
+
+		        else if (STREQ(long_options[option_index].name, 
 			    "no_namelist_gzip")) 
 				pc->flags |= NAMELIST_NO_GZIP;
 
-		        if (STREQ(long_options[option_index].name, "help")) {
-				program_usage(LONG_FORM);
-				clean_exit(0);
-			}
-
-		        if (STREQ(long_options[option_index].name, 
+		        else if (STREQ(long_options[option_index].name, 
 			    "data_debug")) 
 				pc->flags |= DATADEBUG;
 
-		        if (STREQ(long_options[option_index].name, 
+		        else if (STREQ(long_options[option_index].name, 
 			    "no_data_debug")) 
 				pc->flags &= ~DATADEBUG;
 
-		        if (STREQ(long_options[option_index].name, 
+		        else if (STREQ(long_options[option_index].name, 
 			    "no_kmem_cache")) 
 				vt->flags |= KMEM_CACHE_UNAVAIL;
 
-		        if (STREQ(long_options[option_index].name, 
+		        else if (STREQ(long_options[option_index].name, 
+			    "kmem_cache_delay")) 
+				vt->flags |= KMEM_CACHE_DELAY;
+
+		        else if (STREQ(long_options[option_index].name, 
 			    "readnow")) 
 				pc->flags |= READNOW;
 
-		        if (STREQ(long_options[option_index].name, 
+		        else if (STREQ(long_options[option_index].name, 
 			    "smp")) 
 				kt->flags |= SMP;
 
-		        if (STREQ(long_options[option_index].name, 
+		        else if (STREQ(long_options[option_index].name, 
 			    "machdep")) 
 				machdep->cmdline_arg = optarg;
 
-		        if (STREQ(long_options[option_index].name, 
+		        else if (STREQ(long_options[option_index].name, 
 			    "version")) { 
 				pc->flags |= VERSION_QUERY;
                         	display_version();
@@ -117,12 +139,78 @@
                         	clean_exit(0);
 			}
 
-		        if (STREQ(long_options[option_index].name, 
+		        else if (STREQ(long_options[option_index].name, 
 			    "buildinfo")) {
 				dump_build_data();
 				clean_exit(0);
 			}
 
+		        else if (STREQ(long_options[option_index].name, 
+			    "shadow_page_tables")) 
+				kt->xen_flags |= SHADOW_PAGE_TABLES;
+
+		        else if (STREQ(long_options[option_index].name, "cpus")) 
+				kt->cpus_override = optarg;
+
+			else if (STREQ(long_options[option_index].name, "hyper"))
+				pc->flags |= XEN_HYPER;
+
+		        else if (STREQ(long_options[option_index].name, "p2m_mfn")) 
+				xen_kdump_p2m_mfn(optarg);
+
+		        else if (STREQ(long_options[option_index].name, "xen_phys_start")) 
+				set_xen_phys_start(optarg);
+
+		        else if (STREQ(long_options[option_index].name, "zero_excluded")) 
+				*diskdump_flags |= ZERO_EXCLUDED;
+
+		        else if (STREQ(long_options[option_index].name, "no_panic")) 
+				tt->flags |= PANIC_TASK_NOT_FOUND;
+
+		        else if (STREQ(long_options[option_index].name, "more")) {
+				if ((pc->scroll_command != SCROLL_NONE) &&
+				    file_exists("/bin/more", NULL))
+					pc->scroll_command = SCROLL_MORE;
+			}
+
+		        else if (STREQ(long_options[option_index].name, "less")) {
+				if ((pc->scroll_command != SCROLL_NONE) &&
+				    file_exists("/usr/bin/less", NULL))
+					pc->scroll_command = SCROLL_LESS;
+			}
+
+		        else if (STREQ(long_options[option_index].name, "CRASHPAGER")) {
+				if ((pc->scroll_command != SCROLL_NONE) && 
+				    CRASHPAGER_valid())
+					pc->scroll_command = SCROLL_CRASHPAGER;
+			}
+
+		        else if (STREQ(long_options[option_index].name, "no_scroll"))
+				 pc->flags &= ~SCROLL;
+
+		        else if (STREQ(long_options[option_index].name, "no_crashrc"))
+				pc->flags |= NOCRASHRC;
+
+		        else if (STREQ(long_options[option_index].name, "active"))
+				tt->flags |= ACTIVE_ONLY;
+
+		        else if (STREQ(long_options[option_index].name, "reloc")) {
+				if (!calculate(optarg, &kt->relocate, NULL, 0)) {
+					error(INFO, "invalid --reloc argument: %s\n",
+						optarg);
+					program_usage(SHORT_FORM);
+				} 
+				kt->flags |= RELOC_SET;
+			}
+
+			else if (STREQ(long_options[option_index].name, "minimal")) 
+				pc->flags |= MINIMAL_MODE;
+
+			else {
+				error(INFO, "internal error: option %s unhandled\n",
+					long_options[option_index].name);
+				program_usage(SHORT_FORM);
+			}
 			break;
 
 		case 'f':
@@ -133,14 +221,25 @@
 			pc->flags |= KERNEL_DEBUG_QUERY;
 			break;
 
-		case 'H':
-			cmd_usage(optarg, COMPLETE_HELP);
-			clean_exit(0);
-
 		case 'h':
-			cmd_usage(optarg, COMPLETE_HELP|PIPE_TO_LESS);
+			/* note: long_getopt's handling of optional arguments is weak.
+			 * To it, an optional argument must be part of the same argument
+			 * as the flag itself (eg. --help=commands or -hcommands).
+			 * We want to accept "--help commands" or "-h commands".
+			 * So we must do that part ourselves.
+			 */
+			if (optarg != NULL)
+				cmd_usage(optarg, COMPLETE_HELP|PIPE_TO_SCROLL|MUST_HELP);
+			else if (argv[optind] != NULL && argv[optind][0] != '-')
+				cmd_usage(argv[optind++], COMPLETE_HELP|PIPE_TO_SCROLL|MUST_HELP);
+			else
+				program_usage(LONG_FORM);
 			clean_exit(0);
 			
+		case 'k':
+			pc->flags |= KERNTYPES;
+			break;
+
 		case 'e':
 			if (STREQ(optarg, "vi"))
 				pc->editing_mode = "vi";
@@ -168,7 +267,7 @@
 		case 's':
 			pc->flags |= SILENT;
 			pc->flags &= ~SCROLL;
-			pc->scroll_command = SCROLL_NONE;
+//   			pc->scroll_command = SCROLL_NONE;   (why?)
 			break;
 
 		case 'L':
@@ -193,14 +292,18 @@
 			set_vas_debug(pc->debug);
 			break;
 
+		case 'p':
+			force_page_size(optarg);
+			break;
+
+		case 'm':
+			machdep->cmdline_arg = optarg;
+			break;
+
 		default:
-			if (STREQ(argv[optind-1], "-h"))
-				program_usage(LONG_FORM);
-			else {
-				error(INFO, "invalid option: %s\n",
-					argv[optind-1]);
-				program_usage(SHORT_FORM);
-			}
+			error(INFO, "invalid option: %s\n",
+				argv[optind-1]);
+			program_usage(SHORT_FORM);
 		}
 	}
 	opterr = 1;
@@ -229,7 +332,7 @@
         	} else if (!is_readable(argv[optind])) 
 			program_usage(SHORT_FORM);
 
-		if (is_elf_file(argv[optind])) {
+		if (is_kernel(argv[optind])) {
 			if (pc->namelist || pc->server_namelist) {
 				if (!select_namelist(argv[optind])) {
                                		error(INFO, 
@@ -261,8 +364,36 @@
                                 }
                                 pc->flags |= NETDUMP;
                                 pc->dumpfile = argv[optind];
-                                pc->readmem = read_netdump;
-                                pc->writemem = write_netdump;
+
+				if (is_sadump_xen()) {
+					pc->readmem = read_kdump;
+					pc->writemem = write_kdump;
+				} else {
+					pc->readmem = read_netdump;
+					pc->writemem = write_netdump;
+				}
+
+                        } else if (is_kdump(argv[optind], KDUMP_LOCAL)) {
+                                if (pc->flags & MEMORY_SOURCES) {
+                                        error(INFO,
+                                            "too many dumpfile arguments\n");
+                                        program_usage(SHORT_FORM);
+                                }
+                                pc->flags |= KDUMP;
+                                pc->dumpfile = argv[optind];
+                                pc->readmem = read_kdump;
+                                pc->writemem = write_kdump;
+
+                        } else if (is_xendump(argv[optind])) {
+                                if (pc->flags & MEMORY_SOURCES) {
+                                        error(INFO,
+                                            "too many dumpfile arguments\n");
+                                        program_usage(SHORT_FORM);
+                                }
+                                pc->flags |= XENDUMP;
+                                pc->dumpfile = argv[optind];
+                                pc->readmem = read_xendump;
+                                pc->writemem = write_xendump;
 
 			} else if (is_diskdump(argv[optind])) {
                                 if (pc->flags & MEMORY_SOURCES) {
@@ -322,6 +453,8 @@
 		optind++;
 	}
 	
+	check_xen_hyper();
+
         if (setjmp(pc->main_loop_env))
                 clean_exit(1);
 
@@ -332,11 +465,11 @@
 	buf_init();
         cmdline_init();
         mem_init();
+       	hq_init();
 	machdep_init(PRE_SYMTAB);
         symtab_init();
+	paravirt_init();
 	machdep_init(PRE_GDB);
-	kernel_init(PRE_GDB);
-	verify_version();
         datatype_init();
 
 	/*
@@ -361,17 +494,29 @@
 {
         if (!(pc->flags & GDB_INIT)) {
 		gdb_session_init();
-		kernel_init(POST_GDB);
-		machdep_init(POST_GDB);
-        	vm_init();
-        	hq_init();
-        	module_init();
-        	help_init();
-        	task_init();
-        	vfs_init();
-		net_init();
-		dev_init();
-		machdep_init(POST_INIT);
+		show_untrusted_files();
+		if (XEN_HYPER_MODE()) {
+#ifdef XEN_HYPERVISOR_ARCH
+			machdep_init(POST_GDB);
+			xen_hyper_init();
+			machdep_init(POST_INIT);
+#else
+        		error(FATAL, XEN_HYPERVISOR_NOT_SUPPORTED);
+#endif
+		} else if (!(pc->flags & MINIMAL_MODE)) {
+			read_in_kernel_config(IKCFG_INIT);
+			kernel_init();
+			machdep_init(POST_GDB);
+        		vm_init();
+			machdep_init(POST_VM);
+        		module_init();
+        		help_init();
+        		task_init();
+        		vfs_init();
+			net_init();
+			dev_init();
+			machdep_init(POST_INIT);
+		}
 	} else
 		SIGACTION(SIGINT, restart, &pc->sigaction, NULL);
 
@@ -379,11 +524,25 @@
          *  Display system statistics and current context.
          */
         if (!(pc->flags & SILENT) && !(pc->flags & RUNTIME)) {
-                display_sys_stats();
-                show_context(CURRENT_CONTEXT());
-                fprintf(fp, "\n");
+		if (XEN_HYPER_MODE()) {
+#ifdef XEN_HYPERVISOR_ARCH
+			xen_hyper_display_sys_stats();
+			xen_hyper_show_vcpu_context(XEN_HYPER_VCPU_LAST_CONTEXT());
+                	fprintf(fp, "\n");
+#else
+        		error(FATAL, XEN_HYPERVISOR_NOT_SUPPORTED);
+#endif
+		} else if (!(pc->flags & MINIMAL_MODE)) {
+			display_sys_stats();
+			show_context(CURRENT_CONTEXT());
+                	fprintf(fp, "\n");
+		}
         }
 
+	if (pc->flags & MINIMAL_MODE)
+            error(NOTE, 
+		"minimal mode commands: log, dis, rd, sym, eval and exit\n\n");
+
         pc->flags |= RUNTIME;
 
 	/*
@@ -426,8 +585,17 @@
 
 	if ((ct = get_command_table_entry(args[0]))) {
                 if (ct->flags & REFRESH_TASK_TABLE) {
-                        tt->refresh_task_table();
-			sort_context_array();
+			if (XEN_HYPER_MODE()) {
+#ifdef XEN_HYPERVISOR_ARCH
+				xen_hyper_refresh_domain_context_space();
+				xen_hyper_refresh_vcpu_context_space();
+#else
+        			error(FATAL, XEN_HYPERVISOR_NOT_SUPPORTED);
+#endif
+			} else {
+				tt->refresh_task_table();
+				sort_context_array();
+			}
 		}
                 if (!STREQ(pc->curcmd, pc->program_name))
                         pc->lastcmd = pc->curcmd;
@@ -451,14 +619,25 @@
         if (is_datatype_command()) 
                 goto reattempt;
 
-	if (is_gdb_command(TRUE, FAULT_ON_ERROR)) 
+	if (!(pc->flags & MINIMAL_MODE) &&
+	    is_gdb_command(TRUE, FAULT_ON_ERROR)) 
 		goto reattempt;
 
 	if (REMOTE() && remote_execute())
 		return;
 
 	pc->curcmd = pc->program_name;
-	error(INFO, "command not found: %s\n", args[0]);
+
+	if (pc->flags & MINIMAL_MODE)
+		error(INFO, 
+		    "%s: command not available in minimal mode\n"
+		    "NOTE: minimal mode commands: log, dis, rd, sym, eval and exit\n",
+			args[0]);
+	else
+		error(INFO, "command not found: %s\n", args[0]);
+
+	if (pc->curcmd_flags & REPEAT)
+		pc->curcmd_flags &= ~REPEAT;
 }
 
 
@@ -470,8 +649,11 @@
 {       
         struct command_table_entry *cp;
         struct extension_table *ext;
+	
+	if ((pc->flags & MINIMAL_MODE) && !minimal_functions(name)) 
+		return NULL;
   
-        for (cp = &base_command_table[0]; cp->name; cp++) {
+	for (cp = pc->cmd_table; cp->name; cp++) {
                 if (STREQ(cp->name, name))
                         return cp;
         }
@@ -591,6 +773,8 @@
 	int i;
 	char *p1;
 	char buf[BUFSIZE];
+	char homerc[BUFSIZE];
+	char localrc[BUFSIZE];
 	FILE *afp;
 	char *program;
 
@@ -625,7 +809,8 @@
 	machdep->verify_paddr = generic_verify_paddr;
 	pc->redhat_debug_loc = DEFAULT_REDHAT_DEBUG_LOCATION;
 	pc->cmdgencur = 0;
-	pc->cmdgenspec = ~pc->cmdgencur;
+	pc->cmd_table = linux_command_table;
+	kt->BUG_bytes = -1;
 
 	/*
 	 *  Get gdb version before initializing it since this might be one 
@@ -637,7 +822,10 @@
 	 *  Set up the default scrolling behavior for terminal output.
 	 */
 	if (isatty(fileno(stdout))) {
-        	if (file_exists("/usr/bin/less", NULL)) {
+		if (CRASHPAGER_valid()) {
+			pc->flags |= SCROLL;
+			pc->scroll_command = SCROLL_CRASHPAGER;
+		} else if (file_exists("/usr/bin/less", NULL)) {
 			pc->flags |= SCROLL;
 			pc->scroll_command = SCROLL_LESS;
 		} else if (file_exists("/bin/more", NULL)) {
@@ -685,11 +873,13 @@
 			pc->home = "(unknown)";
 		} else
 			strcpy(pc->home, p1);
-	        sprintf(buf, "%s/.%src", pc->home, pc->program_name);
-	        if (!(pc->flags & NOCRASHRC) && file_exists(buf, NULL)) {
-	                if ((afp = fopen(buf, "r")) == NULL)
+	        sprintf(homerc, "%s/.%src", pc->home, pc->program_name);
+	        if (!(pc->flags & NOCRASHRC) && file_exists(homerc, NULL)) {
+	                if ((afp = fopen(homerc, "r")) == NULL)
 	                        error(INFO, "cannot open %s: %s\n",
-	                                buf, strerror(errno));
+	                                homerc, strerror(errno));
+			else if (untrusted_file(afp, homerc))
+				fclose(afp);
 	                else {
 	                        while (fgets(buf, BUFSIZE, afp))
 	                                resolve_rc_cmd(buf, ALIAS_RCHOME);
@@ -698,11 +888,14 @@
 	        }
 	}
 
-        sprintf(buf, ".%src", pc->program_name);
-	if (!(pc->flags & NOCRASHRC) && file_exists(buf, NULL)) {
-		if ((afp = fopen(buf, "r")) == NULL)
+        sprintf(localrc, ".%src", pc->program_name);
+	if (!same_file(homerc, localrc) && 
+	    !(pc->flags & NOCRASHRC) && file_exists(localrc, NULL)) {
+		if ((afp = fopen(localrc, "r")) == NULL)
                         error(INFO, "cannot open %s: %s\n",
-				buf, strerror(errno));
+				localrc, strerror(errno));
+		else if (untrusted_file(afp, localrc))
+			fclose(afp);
 		else {
 			while (fgets(buf, BUFSIZE, afp)) 
 				resolve_rc_cmd(buf, ALIAS_RCLOCAL);
@@ -712,6 +905,8 @@
 
 	if (STREQ(pc->editing_mode, "no_mode"))
 		pc->editing_mode = "vi";
+
+	machdep_init(SETUP_ENV);
 }
 
 
@@ -840,13 +1035,22 @@
         if (pc->flags & REM_S390D)
                 sprintf(&buf[strlen(buf)],
                         "%sREM_S390D", others++ ? "|" : "");
-       if (pc->flags & NETDUMP)
+        if (pc->flags & NETDUMP)
                 sprintf(&buf[strlen(buf)],
                         "%sNETDUMP", others++ ? "|" : "");
+        if (pc->flags & XENDUMP)
+                sprintf(&buf[strlen(buf)],
+                        "%sXENDUMP", others++ ? "|" : "");
+        if (pc->flags & KDUMP)
+                sprintf(&buf[strlen(buf)],
+                        "%sKDUMP", others++ ? "|" : "");
+        if (pc->flags & SYSRQ)
+                sprintf(&buf[strlen(buf)],
+                        "%sSYSRQ", others++ ? "|" : "");
         if (pc->flags & REM_NETDUMP)
                 sprintf(&buf[strlen(buf)],
                         "%sREM_NETDUMP", others++ ? "|" : "");
-       if (pc->flags & DISKDUMP)
+        if (pc->flags & DISKDUMP)
                 sprintf(&buf[strlen(buf)],
                         "%sDISKDUMP", others++ ? "|" : "");
         if (pc->flags & SYSMAP)
@@ -855,21 +1059,42 @@
         if (pc->flags & SYSMAP_ARG)
                 sprintf(&buf[strlen(buf)],
                         "%sSYSMAP_ARG", others++ ? "|" : "");
-       if (pc->flags & DATADEBUG)
+        if (pc->flags & DATADEBUG)
                 sprintf(&buf[strlen(buf)],
                         "%sDATADEBUG", others++ ? "|" : "");
-       if (pc->flags & FINDKERNEL)
+	if (pc->flags & FINDKERNEL)
                 sprintf(&buf[strlen(buf)],
                         "%sFINDKERNEL", others++ ? "|" : "");
-       if (pc->flags & VERSION_QUERY)
+        if (pc->flags & VERSION_QUERY)
                 sprintf(&buf[strlen(buf)],
                         "%sVERSION_QUERY", others++ ? "|" : "");
-       if (pc->flags & READNOW)
+        if (pc->flags & READNOW)
                 sprintf(&buf[strlen(buf)],
                         "%sREADNOW", others++ ? "|" : "");
-       if (pc->flags & NOCRASHRC)
+        if (pc->flags & NOCRASHRC)
                 sprintf(&buf[strlen(buf)],
                         "%sNOCRASHRC", others++ ? "|" : "");
+        if (pc->flags & INIT_IFILE)
+                sprintf(&buf[strlen(buf)],
+                        "%sINIT_IFILE", others++ ? "|" : "");
+        if (pc->flags & XEN_HYPER)
+                sprintf(&buf[strlen(buf)],
+                        "%sXEN_HYPER", others++ ? "|" : "");
+        if (pc->flags & XEN_CORE)
+                sprintf(&buf[strlen(buf)],
+                        "%sXEN_CORE", others++ ? "|" : "");
+        if (pc->flags & PLEASE_WAIT)
+                sprintf(&buf[strlen(buf)],
+                        "%sPLEASE_WAIT", others++ ? "|" : "");
+        if (pc->flags & IFILE_ERROR)
+                sprintf(&buf[strlen(buf)],
+                        "%sIFILE_ERROR", others++ ? "|" : "");
+        if (pc->flags & MINIMAL_MODE)
+                sprintf(&buf[strlen(buf)],
+                        "%sMINIMAL_MODE", others++ ? "|" : "");
+        if (pc->flags & CRASHBUILTIN)
+                sprintf(&buf[strlen(buf)], 
+			"%sCRASHBUILTIN", others++ ? "|" : "");
 
 	if (pc->flags)
 		strcat(buf, ")");
@@ -933,10 +1158,36 @@
 	fprintf(fp, "       ifile_pipe: %lx\n", (ulong)pc->ifile_pipe);
 	fprintf(fp, "      ifile_ofile: %lx\n", (ulong)pc->ifile_ofile);
 	fprintf(fp, "       input_file: %s\n", pc->input_file);
-	fprintf(fp, "   scroll_command: %s\n", 
-		pc->scroll_command == SCROLL_NONE ? "(none)" :
-		    pc->scroll_command == SCROLL_LESS ? 
-			"/usr/bin/less" : "/bin/more");
+	fprintf(fp, "ifile_in_progress: %lx (", pc->ifile_in_progress);
+	others = 0;
+	if (pc->ifile_in_progress & RCHOME_IFILE)
+		fprintf(fp, "%sRCHOME_IFILE", others++ ? "|" : "");
+	if (pc->ifile_in_progress & RCLOCAL_IFILE)
+		fprintf(fp, "%sRCLOCAL_IFILE", others++ ? "|" : "");
+	if (pc->ifile_in_progress & CMDLINE_IFILE)
+		fprintf(fp, "%sCMDLINE_IFILE", others++ ? "|" : "");
+	if (pc->ifile_in_progress & RUNTIME_IFILE)
+		fprintf(fp, "%sRUNTIME_IFILE", others++ ? "|" : "");
+	fprintf(fp, ")\n");
+	fprintf(fp, "     ifile_offset: %lld\n", (ulonglong)pc->ifile_offset);
+	fprintf(fp, "runtime_ifile_cmd: %s\n", pc->runtime_ifile_cmd ?
+                pc->runtime_ifile_cmd : "(unused)");
+	fprintf(fp, "   scroll_command: ");
+	switch (pc->scroll_command) 
+	{
+	case SCROLL_NONE:
+		fprintf(fp, "SCROLL_NONE\n");
+		break;
+	case SCROLL_LESS:
+		fprintf(fp, "SCROLL_LESS\n");
+		break;
+	case SCROLL_MORE:
+		fprintf(fp, "SCROLL_MORE\n");
+		break;
+	case SCROLL_CRASHPAGER:
+		fprintf(fp, "SCROLL_CRASHPAGER (%s)\n", getenv("CRASHPAGER"));
+		break;
+	}
 
 	buf[0] = NULLCHAR;
 	fprintf(fp, "         redirect: %lx ", pc->redirect);
@@ -1008,6 +1259,8 @@
 	fprintf(fp, "           tmp_fp: %lx\n", (ulong)pc->tmp_fp);
 	fprintf(fp, "         tmpfile2: %lx\n", (ulong)pc->tmpfile2);
 
+	fprintf(fp, "        cmd_table: %s\n", XEN_HYPER_MODE() ?
+		"xen_hyper_command_table" : "linux_command_table");
 	fprintf(fp, "           curcmd: %s\n", pc->curcmd);
 	fprintf(fp, "          lastcmd: %s\n", pc->lastcmd);
 	fprintf(fp, "      cur_gdb_cmd: %d  %s\n", pc->cur_gdb_cmd,
@@ -1016,7 +1269,30 @@
 		gdb_command_string(pc->last_gdb_cmd, buf, FALSE));
 	fprintf(fp, "          cur_req: %lx\n", (ulong)pc->cur_req);
 	fprintf(fp, "        cmdgencur: %ld\n", pc->cmdgencur); 
-	fprintf(fp, "       cmdgenspec: %ld\n", pc->cmdgenspec); 
+	fprintf(fp, "     curcmd_flags: %lx (", pc->curcmd_flags);
+	others = 0;
+        if (pc->curcmd_flags & XEN_MACHINE_ADDR)
+		fprintf(fp, "%sXEN_MACHINE_ADDR", others ? "|" : "");
+        if (pc->curcmd_flags & REPEAT)
+		fprintf(fp, "%sREPEAT", others ? "|" : "");
+        if (pc->curcmd_flags & IDLE_TASK_SHOWN)
+		fprintf(fp, "%sIDLE_TASK_SHOWN", others ? "|" : "");
+        if (pc->curcmd_flags & TASK_SPECIFIED)
+		fprintf(fp, "%sTASK_SPECIFIED", others ? "|" : "");
+        if (pc->curcmd_flags & MEMTYPE_UVADDR)
+		fprintf(fp, "%sMEMTYPE_UVADDR", others ? "|" : "");
+        if (pc->curcmd_flags & MEMTYPE_FILEADDR)
+		fprintf(fp, "%sMEMTYPE_FILEADDR", others ? "|" : "");
+        if (pc->curcmd_flags & HEADER_PRINTED)
+		fprintf(fp, "%sHEADER_PRINTED", others ? "|" : "");
+        if (pc->curcmd_flags & BAD_INSTRUCTION)
+		fprintf(fp, "%sBAD_INSTRUCTION", others ? "|" : "");
+        if (pc->curcmd_flags & UD2A_INSTRUCTION)
+		fprintf(fp, "%sUD2A_INSTRUCTION", others ? "|" : "");
+        if (pc->curcmd_flags & IRQ_IN_USE)
+		fprintf(fp, "%sIRQ_IN_USE", others ? "|" : "");
+	fprintf(fp, ")\n");
+	fprintf(fp, "   curcmd_private: %llx\n", pc->curcmd_private); 
 	fprintf(fp, "       sigint_cnt: %d\n", pc->sigint_cnt);
 	fprintf(fp, "        sigaction: %lx\n", (ulong)&pc->sigaction);
 	fprintf(fp, "    gdb_sigaction: %lx\n", (ulong)&pc->gdb_sigaction);
@@ -1051,8 +1327,16 @@
 		fprintf(fp, "          readmem: read_daemon()\n");
 	else if (pc->readmem == read_netdump)
 		fprintf(fp, "          readmem: read_netdump()\n");
+	else if (pc->readmem == read_xendump)
+		fprintf(fp, "          readmem: read_xendump()\n");
+	else if (pc->readmem == read_kdump)
+		fprintf(fp, "          readmem: read_kdump()\n");
 	else if (pc->readmem == read_memory_device)
 		fprintf(fp, "          readmem: read_memory_device()\n");
+	else if (pc->readmem == read_xendump_hyper)
+		fprintf(fp, "          readmem: read_xendump_hyper()\n");
+	else if (pc->readmem == read_diskdump)
+		fprintf(fp, "          readmem: read_diskdump()\n");
 	else
 		fprintf(fp, "          readmem: %lx\n", (ulong)pc->readmem);
         if (pc->writemem == write_dev_mem)
@@ -1065,8 +1349,14 @@
                 fprintf(fp, "         writemem: write_daemon()\n");
         else if (pc->writemem == write_netdump)
                 fprintf(fp, "         writemem: write_netdump()\n");
+        else if (pc->writemem == write_xendump)
+                fprintf(fp, "         writemem: write_xendump()\n");
+        else if (pc->writemem == write_kdump)
+                fprintf(fp, "         writemem: write_kdump()\n");
         else if (pc->writemem == write_memory_device)
                 fprintf(fp, "         writemem: write_memory_device()\n");
+        else if (pc->writemem == write_diskdump)
+                fprintf(fp, "         writemem: write_diskdump()\n");
         else
                 fprintf(fp, "         writemem: %lx\n", (ulong)pc->writemem);
 
@@ -1100,3 +1390,71 @@
 
 	exit(status);
 }
+
+/*
+ *  Check whether this session is for xen hypervisor analysis.
+ */
+static void
+check_xen_hyper(void)
+{
+	if (!pc->namelist)
+		return;
+
+	if (!XEN_HYPER_MODE()) {
+		if (STRNEQ(basename(pc->namelist), "xen-syms"))
+			pc->flags |= XEN_HYPER;
+		else
+			return;
+	}
+
+#ifdef XEN_HYPERVISOR_ARCH
+	pc->cmd_table = xen_hyper_command_table;
+	if (pc->flags & XENDUMP)
+		pc->readmem = read_xendump_hyper;
+#else
+	error(FATAL, XEN_HYPERVISOR_NOT_SUPPORTED);
+#endif
+}
+
+/*
+ *  Reject untrusted .crashrc, $HOME/.crashrc, 
+ *  .gdbinit, and $HOME/.gdbinit files.
+ */
+static char *untrusted_file_list[4] = { 0 };
+
+int
+untrusted_file(FILE *filep, char *filename)
+{
+	struct stat sbuf;
+	int i;
+
+	if (filep && (fstat(fileno(filep), &sbuf) == 0) &&
+	    (sbuf.st_uid == getuid()) && !(sbuf.st_mode & S_IWOTH))
+		return FALSE;
+	
+	for (i = 0; i < 4; i++) {
+		if (!untrusted_file_list[i]) {
+			untrusted_file_list[i] = strdup(filename);
+			break;
+		}
+	}
+
+	return TRUE;
+}
+
+static void
+show_untrusted_files(void)
+{
+	int i, cnt;
+
+	for (i = cnt = 0; i < 4; i++) {
+		if (untrusted_file_list[i]) {
+			error(WARNING, "not using untrusted file: \"%s\"\n", 
+				untrusted_file_list[i]);
+			free(untrusted_file_list[i]);
+			cnt++;
+		}
+	}
+	if (cnt)
+		fprintf(fp, "\n");
+}
--- crash/xen_hyper.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/xen_hyper.c	2008-12-03 12:03:24.000000000 -0500
@@ -0,0 +1,2019 @@
+/*
+ *  xen_hyper.c
+ *
+ *  Portions Copyright (C) 2006-2007 Fujitsu Limited
+ *  Portions Copyright (C) 2006-2007 VA Linux Systems Japan K.K.
+ *
+ *  Authors: Itsuro Oda <oda@valinux.co.jp>
+ *           Fumihiko Kakuma <kakuma@valinux.co.jp>
+ *
+ *  This file is part of Xencrash.
+ *
+ *  Xencrash is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation (version 2 of the License).
+ *
+ *  Xencrash is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with Xencrash; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA.
+ */
+
+#include "defs.h"
+
+#ifdef XEN_HYPERVISOR_ARCH
+#include "xen_hyper_defs.h"
+
+static void xen_hyper_schedule_init(void);
+
+/*
+ * Do initialization for Xen Hyper system here.
+ */
+void
+xen_hyper_init(void)
+{
+	char *buf;
+#if defined(X86) || defined(X86_64)
+	long member_offset;
+#endif
+
+	if (machine_type("X86_64") &&
+	    symbol_exists("xen_phys_start") && !xen_phys_start())
+		error(WARNING, 
+	 	    "This hypervisor is relocatable; if initialization fails below, try\n"
+                    "         using the \"--xen_phys_start <address>\" command line option.\n\n");
+
+	if (symbol_exists("crashing_cpu")) {
+		get_symbol_data("crashing_cpu", sizeof(xht->crashing_cpu),
+			&xht->crashing_cpu);
+	} else {
+		xht->crashing_cpu = XEN_HYPER_PCPU_ID_INVALID;
+	}
+	machdep->get_smp_cpus();
+	machdep->memory_size();
+
+#ifdef IA64
+	if (symbol_exists("__per_cpu_offset")) {
+		xht->flags |= XEN_HYPER_SMP;
+		if((xht->__per_cpu_offset = malloc(sizeof(ulong) * XEN_HYPER_MAX_CPUS())) == NULL) {
+			error(FATAL, "cannot malloc __per_cpu_offset space.\n");
+		}
+		if (!readmem(symbol_value("__per_cpu_offset"), KVADDR,
+		xht->__per_cpu_offset, sizeof(ulong) * XEN_HYPER_MAX_CPUS(),
+		"__per_cpu_offset", RETURN_ON_ERROR)) {
+			error(FATAL, "cannot read __per_cpu_offset.\n");
+		}
+	}
+#endif
+
+#if defined(X86) || defined(X86_64)
+	if (symbol_exists("__per_cpu_shift")) {
+		xht->percpu_shift = (int)symbol_value("__per_cpu_shift");
+	} else if (xen_major_version() >= 3 && xen_minor_version() >= 3) {
+		xht->percpu_shift = 13;
+	} else {
+		xht->percpu_shift = 12;
+	}
+	member_offset = MEMBER_OFFSET("cpuinfo_x86", "x86_model_id");
+	buf = GETBUF(XEN_HYPER_SIZE(cpuinfo_x86));	
+	if (xen_hyper_test_pcpu_id(XEN_HYPER_CRASHING_CPU())) {
+		xen_hyper_x86_fill_cpu_data(XEN_HYPER_CRASHING_CPU(), buf);
+	} else {
+		xen_hyper_x86_fill_cpu_data(xht->cpu_idxs[0], buf);
+	}
+	strncpy(xht->utsname.machine, (char *)(buf + member_offset),
+		sizeof(xht->utsname.machine)-1);
+	FREEBUF(buf);
+#elif defined(IA64)
+	buf = GETBUF(XEN_HYPER_SIZE(cpuinfo_ia64));
+	if (xen_hyper_test_pcpu_id(XEN_HYPER_CRASHING_CPU())) {
+		xen_hyper_ia64_fill_cpu_data(XEN_HYPER_CRASHING_CPU(), buf);
+	} else {
+		xen_hyper_ia64_fill_cpu_data(xht->cpu_idxs[0], buf);
+	}
+	strncpy(xht->utsname.machine, (char *)(buf + XEN_HYPER_OFFSET(cpuinfo_ia64_vendor)),
+		sizeof(xht->utsname.machine)-1);
+	FREEBUF(buf);
+#endif
+
+#ifndef IA64
+	XEN_HYPER_STRUCT_SIZE_INIT(note_buf_t, "note_buf_t");
+	XEN_HYPER_STRUCT_SIZE_INIT(crash_note_t, "crash_note_t");
+	XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_t_core, "crash_note_t", "core");
+	XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_t_xen, "crash_note_t", "xen");
+	XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_t_xen_regs, "crash_note_t", "xen_regs");
+	XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_t_xen_info, "crash_note_t", "xen_info");
+
+	XEN_HYPER_STRUCT_SIZE_INIT(crash_note_core_t, "crash_note_core_t");
+	XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_core_t_note, "crash_note_core_t", "note");
+	XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_core_t_desc, "crash_note_core_t", "desc");
+
+	XEN_HYPER_STRUCT_SIZE_INIT(crash_note_xen_t, "crash_note_xen_t");
+	XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_xen_t_note, "crash_note_xen_t", "note");
+	XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_xen_t_desc, "crash_note_xen_t", "desc");
+	XEN_HYPER_STRUCT_SIZE_INIT(crash_note_xen_core_t, "crash_note_xen_core_t");
+	XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_xen_core_t_note, "crash_note_xen_core_t", "note");
+	XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_xen_core_t_desc, "crash_note_xen_core_t", "desc");
+	XEN_HYPER_STRUCT_SIZE_INIT(crash_note_xen_info_t, "crash_note_xen_info_t");
+	XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_xen_info_t_note, "crash_note_xen_info_t", "note");
+	XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_xen_info_t_desc, "crash_note_xen_info_t", "desc");
+	XEN_HYPER_STRUCT_SIZE_INIT(crash_xen_core_t, "crash_xen_core_t");
+	XEN_HYPER_STRUCT_SIZE_INIT(crash_xen_info_t, "crash_xen_info_t");
+	XEN_HYPER_STRUCT_SIZE_INIT(xen_crash_xen_regs_t, "xen_crash_xen_regs_t");
+
+	XEN_HYPER_STRUCT_SIZE_INIT(ELF_Prstatus,"ELF_Prstatus");
+	XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_info, "ELF_Prstatus", "pr_info");
+	XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_cursig, "ELF_Prstatus", "pr_cursig");
+	XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_sigpend, "ELF_Prstatus", "pr_sigpend");
+	XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_sighold, "ELF_Prstatus", "pr_sighold");
+	XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_pid, "ELF_Prstatus", "pr_pid");
+	XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_ppid, "ELF_Prstatus", "pr_ppid");
+	XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_pgrp, "ELF_Prstatus", "pr_pgrp");
+	XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_sid, "ELF_Prstatus", "pr_sid");
+	XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_utime, "ELF_Prstatus", "pr_utime");
+	XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_stime, "ELF_Prstatus", "pr_stime");
+	XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_cutime, "ELF_Prstatus", "pr_cutime");
+	XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_cstime, "ELF_Prstatus", "pr_cstime");
+	XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_reg, "ELF_Prstatus", "pr_reg");
+	XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_fpvalid, "ELF_Prstatus", "pr_fpvalid");
+	XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Timeval_tv_sec, "ELF_Timeval", "tv_sec");
+	XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Timeval_tv_usec, "ELF_Timeval", "tv_usec");
+	XEN_HYPER_STRUCT_SIZE_INIT(ELF_Signifo,"ELF_Signifo");
+	XEN_HYPER_STRUCT_SIZE_INIT(ELF_Gregset,"ELF_Gregset");
+	XEN_HYPER_STRUCT_SIZE_INIT(ELF_Timeval,"ELF_Timeval");
+#endif
+	XEN_HYPER_STRUCT_SIZE_INIT(domain, "domain");
+	XEN_HYPER_STRUCT_SIZE_INIT(vcpu, "vcpu");
+#ifndef IA64
+	XEN_HYPER_STRUCT_SIZE_INIT(cpu_info, "cpu_info");
+#endif
+	XEN_HYPER_STRUCT_SIZE_INIT(cpu_user_regs, "cpu_user_regs");
+
+	xht->idle_vcpu_size = get_array_length("idle_vcpu", NULL, 0);
+	xht->idle_vcpu_array = (ulong *)malloc(xht->idle_vcpu_size * sizeof(ulong));
+	if (xht->idle_vcpu_array == NULL) {
+		error(FATAL, "cannot malloc idle_vcpu_array space.\n");
+	}
+	if (!readmem(symbol_value("idle_vcpu"), KVADDR, xht->idle_vcpu_array,
+		xht->idle_vcpu_size * sizeof(ulong), "idle_vcpu_array",
+		RETURN_ON_ERROR)) {
+		error(FATAL, "cannot read idle_vcpu array.\n");
+	}
+
+	/*
+	 * Do some initialization.
+	 */
+#ifndef IA64
+	xen_hyper_dumpinfo_init();
+#endif
+	xhmachdep->pcpu_init();
+	xen_hyper_domain_init();
+	xen_hyper_vcpu_init();
+	xen_hyper_misc_init();
+	/*
+	 * xen_hyper_post_init() have to be called after all initialize
+	 * functions finished.
+	 */
+	xen_hyper_post_init();
+}
+
+/*
+ * Do initialization for Domain of Xen Hyper system here.
+ */
+void
+xen_hyper_domain_init(void)
+{
+	XEN_HYPER_MEMBER_OFFSET_INIT(domain_domain_id, "domain", "domain_id");
+	XEN_HYPER_MEMBER_OFFSET_INIT(domain_tot_pages, "domain", "tot_pages");
+	XEN_HYPER_MEMBER_OFFSET_INIT(domain_max_pages, "domain", "max_pages");
+	XEN_HYPER_MEMBER_OFFSET_INIT(domain_xenheap_pages, "domain", "xenheap_pages");
+	XEN_HYPER_MEMBER_OFFSET_INIT(domain_shared_info, "domain", "shared_info");
+	XEN_HYPER_MEMBER_OFFSET_INIT(domain_sched_priv, "domain", "sched_priv");
+	XEN_HYPER_MEMBER_OFFSET_INIT(domain_next_in_list, "domain", "next_in_list");
+	XEN_HYPER_MEMBER_OFFSET_INIT(domain_domain_flags, "domain", "domain_flags");
+	XEN_HYPER_MEMBER_OFFSET_INIT(domain_evtchn, "domain", "evtchn");
+	XEN_HYPER_MEMBER_OFFSET_INIT(domain_is_hvm, "domain", "is_hvm");
+	XEN_HYPER_MEMBER_OFFSET_INIT(domain_is_privileged, "domain", "is_privileged");
+	XEN_HYPER_MEMBER_OFFSET_INIT(domain_debugger_attached, "domain", "debugger_attached");
+
+	/*
+	 * Will be removed in Xen 4.4 (hg ae9b223a675d),
+	 * need to check that with XEN_HYPER_VALID_MEMBER() before using
+	 */
+	XEN_HYPER_MEMBER_OFFSET_INIT(domain_is_polling, "domain", "is_polling");
+
+	XEN_HYPER_MEMBER_OFFSET_INIT(domain_is_dying, "domain", "is_dying");
+	XEN_HYPER_MEMBER_OFFSET_INIT(domain_is_paused_by_controller, "domain", "is_paused_by_controller");
+	XEN_HYPER_MEMBER_OFFSET_INIT(domain_is_shutting_down, "domain", "is_shutting_down");
+	XEN_HYPER_MEMBER_OFFSET_INIT(domain_is_shut_down, "domain", "is_shut_down");
+	XEN_HYPER_MEMBER_OFFSET_INIT(domain_vcpu, "domain", "vcpu");
+	XEN_HYPER_MEMBER_OFFSET_INIT(domain_arch, "domain", "arch");
+
+	XEN_HYPER_STRUCT_SIZE_INIT(arch_shared_info, "arch_shared_info");
+	XEN_HYPER_MEMBER_OFFSET_INIT(arch_shared_info_max_pfn, "arch_shared_info", "max_pfn");
+	XEN_HYPER_MEMBER_OFFSET_INIT(arch_shared_info_pfn_to_mfn_frame_list_list, "arch_shared_info", "pfn_to_mfn_frame_list_list");
+	XEN_HYPER_MEMBER_OFFSET_INIT(arch_shared_info_nmi_reason, "arch_shared_info", "nmi_reason");
+
+	XEN_HYPER_STRUCT_SIZE_INIT(shared_info, "shared_info");
+	XEN_HYPER_MEMBER_OFFSET_INIT(shared_info_vcpu_info, "shared_info", "vcpu_info");
+	XEN_HYPER_MEMBER_OFFSET_INIT(shared_info_evtchn_pending, "shared_info", "evtchn_pending");
+	XEN_HYPER_MEMBER_OFFSET_INIT(shared_info_evtchn_mask, "shared_info", "evtchn_mask");
+	XEN_HYPER_MEMBER_OFFSET_INIT(shared_info_arch, "shared_info", "arch");
+
+	XEN_HYPER_STRUCT_SIZE_INIT(arch_domain, "arch_domain");
+#ifdef IA64
+	XEN_HYPER_MEMBER_OFFSET_INIT(arch_domain_mm, "arch_domain", "mm");
+
+	XEN_HYPER_STRUCT_SIZE_INIT(mm_struct, "mm_struct");
+	XEN_HYPER_MEMBER_OFFSET_INIT(mm_struct_pgd, "mm_struct", "pgd");
+#endif
+
+	if((xhdt->domain_struct = malloc(XEN_HYPER_SIZE(domain))) == NULL) {
+		error(FATAL, "cannot malloc domain struct space.\n");
+	}
+	if((xhdt->domain_struct_verify = malloc(XEN_HYPER_SIZE(domain))) == NULL) {
+		error(FATAL, "cannot malloc domain struct space to verification.\n");
+	}
+	xen_hyper_refresh_domain_context_space();
+	xhdt->flags |= XEN_HYPER_DOMAIN_F_INIT;
+}
+
+/*
+ * Do initialization for vcpu of Xen Hyper system here.
+ */
+void
+xen_hyper_vcpu_init(void)
+{
+	XEN_HYPER_STRUCT_SIZE_INIT(timer, "timer");
+	XEN_HYPER_MEMBER_OFFSET_INIT(timer_expires, "timer", "expires");
+	XEN_HYPER_MEMBER_OFFSET_INIT(timer_cpu, "timer", "cpu");
+	XEN_HYPER_MEMBER_OFFSET_INIT(timer_function, "timer", "function");
+	XEN_HYPER_MEMBER_OFFSET_INIT(timer_data, "timer", "data");
+	XEN_HYPER_MEMBER_OFFSET_INIT(timer_heap_offset, "timer", "heap_offset");
+	XEN_HYPER_MEMBER_OFFSET_INIT(timer_killed, "timer", "killed");
+
+	XEN_HYPER_STRUCT_SIZE_INIT(vcpu_runstate_info, "vcpu_runstate_info");
+	XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_runstate_info_state, "vcpu_runstate_info", "state");
+	XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_runstate_info_state_entry_time, "vcpu_runstate_info", "state_entry_time");
+	XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_runstate_info_time, "vcpu_runstate_info", "time");
+
+	XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_vcpu_id, "vcpu", "vcpu_id");
+	XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_processor, "vcpu", "processor");
+	XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_vcpu_info, "vcpu", "vcpu_info");
+	XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_domain, "vcpu", "domain");
+	XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_next_in_list, "vcpu", "next_in_list");
+	XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_timer, "vcpu", "timer");
+	XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_sleep_tick, "vcpu", "sleep_tick");
+	XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_poll_timer, "vcpu", "poll_timer");
+	XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_sched_priv, "vcpu", "sched_priv");
+	XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_runstate, "vcpu", "runstate");
+	XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_runstate_guest, "vcpu", "runstate_guest");
+	XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_vcpu_flags, "vcpu", "vcpu_flags");
+	XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_pause_count, "vcpu", "pause_count");
+	XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_virq_to_evtchn, "vcpu", "virq_to_evtchn");
+	XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_cpu_affinity, "vcpu", "cpu_affinity");
+	XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_nmi_addr, "vcpu", "nmi_addr");
+	XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_vcpu_dirty_cpumask, "vcpu", "vcpu_dirty_cpumask");
+	XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_arch, "vcpu", "arch");
+
+#ifdef IA64
+	XEN_HYPER_ASSIGN_OFFSET(vcpu_thread_ksp) =
+		MEMBER_OFFSET("vcpu", "arch") + MEMBER_OFFSET("arch_vcpu", "_thread") +
+		MEMBER_OFFSET("thread_struct", "ksp");
+#endif
+
+	if((xhvct->vcpu_struct = malloc(XEN_HYPER_SIZE(vcpu))) == NULL) {
+		error(FATAL, "cannot malloc vcpu struct space.\n");
+	}
+	if((xhvct->vcpu_struct_verify = malloc(XEN_HYPER_SIZE(vcpu))) == NULL) {
+		error(FATAL, "cannot malloc vcpu struct space to verification.\n");
+	}
+
+	xen_hyper_refresh_vcpu_context_space();
+	xhvct->flags |= XEN_HYPER_VCPU_F_INIT;
+	xhvct->idle_vcpu = symbol_value("idle_vcpu");
+}
+
+/*
+ * Do initialization for pcpu of Xen Hyper system here.
+ */
+#if defined(X86) || defined(X86_64)
+void
+xen_hyper_x86_pcpu_init(void)
+{
+	ulong cpu_info;
+	ulong init_tss_base, init_tss;
+	ulong sp;
+	struct xen_hyper_pcpu_context *pcc;
+	char *buf, *bp;
+	int i, cpuid;
+
+	XEN_HYPER_MEMBER_OFFSET_INIT(cpu_info_guest_cpu_user_regs, "cpu_info", "guest_cpu_user_regs");
+	XEN_HYPER_MEMBER_OFFSET_INIT(cpu_info_processor_id, "cpu_info", "processor_id");
+	XEN_HYPER_MEMBER_OFFSET_INIT(cpu_info_current_vcpu, "cpu_info", "current_vcpu");
+
+	if((xhpct->pcpu_struct = malloc(XEN_HYPER_SIZE(cpu_info))) == NULL) {
+		error(FATAL, "cannot malloc pcpu struct space.\n");
+	}
+
+	/* get physical cpu context */
+	xen_hyper_alloc_pcpu_context_space(XEN_HYPER_MAX_CPUS());
+	init_tss_base = symbol_value("init_tss");
+	buf = GETBUF(XEN_HYPER_SIZE(tss_struct));	
+	for_cpu_indexes(i, cpuid)
+	{
+		init_tss = init_tss_base + XEN_HYPER_SIZE(tss_struct) * cpuid;
+		if (!readmem(init_tss, KVADDR, buf,
+			XEN_HYPER_SIZE(tss_struct), "init_tss", RETURN_ON_ERROR)) {
+			error(FATAL, "cannot read init_tss.\n");
+		}
+		if (machine_type("X86")) {
+			sp = ULONG(buf + XEN_HYPER_OFFSET(tss_struct_esp0));
+		} else if (machine_type("X86_64")) {
+			sp = ULONG(buf + XEN_HYPER_OFFSET(tss_struct_rsp0));
+		}
+		cpu_info = XEN_HYPER_GET_CPU_INFO(sp);
+		if (CRASHDEBUG(1)) {
+			fprintf(fp, "sp=%lx, cpu_info=%lx\n", sp, cpu_info);
+		}
+		if(!(bp = xen_hyper_read_pcpu(cpu_info))) {
+			error(FATAL, "cannot read cpu_info.\n");
+		}
+		pcc = &xhpct->context_array[cpuid];
+		xen_hyper_store_pcpu_context(pcc, cpu_info, bp);
+		xen_hyper_store_pcpu_context_tss(pcc, init_tss, buf);
+	}
+	FREEBUF(buf);
+}
+
+#elif defined(IA64)
+void
+xen_hyper_ia64_pcpu_init(void)
+{
+	struct xen_hyper_pcpu_context *pcc;
+	int i, cpuid;
+
+	/* get physical cpu context */
+	xen_hyper_alloc_pcpu_context_space(XEN_HYPER_MAX_CPUS());
+	for_cpu_indexes(i, cpuid)
+	{
+		pcc = &xhpct->context_array[cpuid];
+		pcc->processor_id = cpuid;
+	}
+}
+#endif
+
+/*
+ * Do initialization for some miscellaneous thing
+ * of Xen Hyper system here.
+ */
+void
+xen_hyper_misc_init(void)
+{
+	XEN_HYPER_STRUCT_SIZE_INIT(schedule_data, "schedule_data");
+	XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_schedule_lock, "schedule_data", "schedule_lock");
+	XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_curr, "schedule_data", "curr");
+	XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_idle, "schedule_data", "idle");
+	XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_sched_priv, "schedule_data", "sched_priv");
+	XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_s_timer, "schedule_data", "s_timer");
+	XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_tick, "schedule_data", "tick");
+
+	XEN_HYPER_STRUCT_SIZE_INIT(scheduler, "scheduler");
+	XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_name, "scheduler", "name");
+	XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_opt_name, "scheduler", "opt_name");
+	XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_sched_id, "scheduler", "sched_id");
+	XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_init, "scheduler", "init");
+	XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_tick, "scheduler", "tick");
+	XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_init_vcpu, "scheduler", "init_vcpu");
+	XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_destroy_domain, "scheduler", "destroy_domain");
+	XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_sleep, "scheduler", "sleep");
+	XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_wake, "scheduler", "wake");
+	XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_set_affinity, "scheduler", "set_affinity");
+	XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_do_schedule, "scheduler", "do_schedule");
+	XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_adjust, "scheduler", "adjust");
+	XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_dump_settings, "scheduler", "dump_settings");
+	XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_dump_cpu_state, "scheduler", "dump_cpu_state");
+
+	xen_hyper_schedule_init();
+}
+
+/*
+ * Do initialization for scheduler of Xen Hyper system here.
+ */
+#define XEN_HYPER_SCHEDULERS_ARRAY_CNT 10
+#define XEN_HYPER_SCHEDULER_NAME 1024
+
+static void
+xen_hyper_schedule_init(void)
+{
+	ulong addr, opt_sched, schedulers, opt_name;
+	long scheduler_opt_name;
+	long schedulers_buf[XEN_HYPER_SCHEDULERS_ARRAY_CNT];
+	struct xen_hyper_sched_context *schc;
+	char *buf;
+	char opt_name_buf[XEN_HYPER_OPT_SCHED_SIZE];
+	int i, cpuid, flag;
+
+	/* get scheduler information */
+	if((xhscht->scheduler_struct =
+	malloc(XEN_HYPER_SIZE(scheduler))) == NULL) {
+		error(FATAL, "cannot malloc scheduler struct space.\n");
+	}
+	buf = GETBUF(XEN_HYPER_SCHEDULER_NAME);	
+	opt_sched = symbol_value("opt_sched");
+	if (!readmem(opt_sched, KVADDR, xhscht->opt_sched,
+	XEN_HYPER_OPT_SCHED_SIZE, "opt_sched,", RETURN_ON_ERROR)) {
+		error(FATAL, "cannot read opt_sched,.\n");
+	}
+	schedulers = symbol_value("schedulers");
+	scheduler_opt_name = XEN_HYPER_OFFSET(scheduler_opt_name);
+	addr = schedulers;
+	while (xhscht->name == NULL) {
+		if (!readmem(addr, KVADDR, schedulers_buf,
+		sizeof(long) * XEN_HYPER_SCHEDULERS_ARRAY_CNT,
+		"schedulers", RETURN_ON_ERROR)) {
+			error(FATAL, "cannot read schedulers.\n");
+		}
+		for (i = 0; i < XEN_HYPER_SCHEDULERS_ARRAY_CNT; i++) {
+			if (schedulers_buf[i] == 0) {
+				error(FATAL, "schedule data not found.\n");
+			}
+			if (!readmem(schedulers_buf[i], KVADDR,
+			xhscht->scheduler_struct, XEN_HYPER_SIZE(scheduler),
+			"scheduler", RETURN_ON_ERROR)) {
+				error(FATAL, "cannot read scheduler.\n");
+			}
+			opt_name = ULONG(xhscht->scheduler_struct +
+				scheduler_opt_name);
+			if (!readmem(opt_name, KVADDR, opt_name_buf,
+			XEN_HYPER_OPT_SCHED_SIZE, "opt_name", RETURN_ON_ERROR)) {
+				error(FATAL, "cannot read opt_name.\n");
+			}
+			if (strncmp(xhscht->opt_sched, opt_name_buf,
+			XEN_HYPER_OPT_SCHED_SIZE))
+				continue;
+			xhscht->scheduler = schedulers_buf[i];
+			xhscht->sched_id = INT(xhscht->scheduler_struct +
+				XEN_HYPER_OFFSET(scheduler_sched_id));
+			addr = ULONG(xhscht->scheduler_struct +
+				XEN_HYPER_OFFSET(scheduler_name));
+			if (!readmem(addr, KVADDR, buf, XEN_HYPER_SCHEDULER_NAME,
+			"scheduler_name", RETURN_ON_ERROR)) {
+				error(FATAL, "cannot read scheduler_name.\n");
+			}
+			if (strlen(buf) >= XEN_HYPER_SCHEDULER_NAME) {
+				error(FATAL, "cannot read scheduler_name.\n");
+			}
+			if((xhscht->name = malloc(strlen(buf) + 1)) == NULL) {
+				error(FATAL, "cannot malloc scheduler_name space.\n");
+			}
+			BZERO(xhscht->name, strlen(buf) + 1);
+			strncpy(xhscht->name, buf, strlen(buf));
+			break;
+		}
+		addr += sizeof(long) * XEN_HYPER_SCHEDULERS_ARRAY_CNT;
+	}
+	FREEBUF(buf);
+
+	/* get schedule_data information */
+	if((xhscht->sched_context_array =
+	malloc(sizeof(struct xen_hyper_sched_context) * XEN_HYPER_MAX_CPUS())) == NULL) {
+		error(FATAL, "cannot malloc xen_hyper_sched_context struct space.\n");
+	}
+	BZERO(xhscht->sched_context_array,
+		sizeof(struct xen_hyper_sched_context) * XEN_HYPER_MAX_CPUS());
+	buf = GETBUF(XEN_HYPER_SIZE(schedule_data));	
+	if (symbol_exists("per_cpu__schedule_data")) {
+		addr = symbol_value("per_cpu__schedule_data");
+		flag = TRUE;
+	} else {
+		addr = symbol_value("schedule_data");
+		flag = FALSE;
+	}
+	for_cpu_indexes(i, cpuid)
+	{
+		schc = &xhscht->sched_context_array[cpuid];
+		if (flag) {
+			schc->schedule_data =
+				xen_hyper_per_cpu(addr, i);
+		} else {
+			schc->schedule_data = addr +
+				XEN_HYPER_SIZE(schedule_data) * i;
+		}
+		if (!readmem(schc->schedule_data,
+			KVADDR, buf, XEN_HYPER_SIZE(schedule_data),
+		"schedule_data", RETURN_ON_ERROR)) {
+			error(FATAL, "cannot read schedule_data.\n");
+		}
+		schc->cpu_id = cpuid;
+		schc->curr = ULONG(buf + XEN_HYPER_OFFSET(schedule_data_curr));
+		schc->idle = ULONG(buf + XEN_HYPER_OFFSET(schedule_data_idle));
+		schc->sched_priv =
+			ULONG(buf + XEN_HYPER_OFFSET(schedule_data_sched_priv));
+		if (XEN_HYPER_VALID_MEMBER(schedule_data_tick))
+			schc->tick = ULONG(buf + XEN_HYPER_OFFSET(schedule_data_tick));
+	}
+	FREEBUF(buf);
+}
+
+/*
+ * This should be called after all initailize process finished.
+ */
+void
+xen_hyper_post_init(void)
+{
+	struct xen_hyper_pcpu_context *pcc;
+	int i, cpuid;
+
+	/* set current vcpu to pcpu context */
+	for_cpu_indexes(i, cpuid)
+	{
+		pcc = &xhpct->context_array[cpuid];
+		if (!pcc->current_vcpu) {
+			pcc->current_vcpu =
+				xen_hyper_get_active_vcpu_from_pcpuid(cpuid);
+		}
+	}
+
+	/* set pcpu last */
+	if (!(xhpct->last =
+		xen_hyper_id_to_pcpu_context(XEN_HYPER_CRASHING_CPU()))) {
+		xhpct->last = &xhpct->context_array[xht->cpu_idxs[0]];
+	}
+
+	/* set vcpu last */
+	if (xhpct->last) {
+		xhvct->last =
+			xen_hyper_vcpu_to_vcpu_context(xhpct->last->current_vcpu);
+		/* set crashing vcpu */
+		xht->crashing_vcc = xhvct->last;
+	}
+	if (!xhvct->last) {
+		xhvct->last = xhvct->vcpu_context_arrays->context_array;
+	}
+
+	/* set domain last */
+	if (xhvct->last) {
+		xhdt->last =
+			xen_hyper_domain_to_domain_context(xhvct->last->domain);
+	}
+	if (!xhdt->last) {
+		xhdt->last = xhdt->context_array;
+	}
+}
+
+/*
+ * Do initialization for dump information here.
+ */
+void
+xen_hyper_dumpinfo_init(void)
+{
+	Elf32_Nhdr *note;
+	char *buf, *bp, *np, *upp;
+	char *nccp, *xccp;
+	ulong addr;
+	long size;
+	int i, cpuid, samp_cpuid;
+
+	/*
+	 * NOTE kakuma: It is not clear that what kind of
+	 * a elf note format each one of the xen uses.
+	 * So, we decide it confirming whether a symbol exists.
+	 */
+	if (STRUCT_EXISTS("note_buf_t"))
+		xhdit->note_ver = XEN_HYPER_ELF_NOTE_V1;
+	else if (STRUCT_EXISTS("crash_note_xen_t"))
+		xhdit->note_ver = XEN_HYPER_ELF_NOTE_V2;
+	else if (STRUCT_EXISTS("crash_xen_core_t")) {
+		if (STRUCT_EXISTS("crash_note_xen_core_t"))
+			xhdit->note_ver = XEN_HYPER_ELF_NOTE_V3;
+		else
+			xhdit->note_ver = XEN_HYPER_ELF_NOTE_V4;
+	} else {
+		error(WARNING, "found unsupported elf note format while checking of xen dumpinfo.\n");
+		return;
+	}
+	if (!xen_hyper_test_pcpu_id(XEN_HYPER_CRASHING_CPU())) {
+		error(WARNING, "crashing_cpu not found.\n");
+		return;
+	}
+
+	/* allocate a context area */
+	size = sizeof(struct xen_hyper_dumpinfo_context) * XEN_HYPER_MAX_CPUS();
+	if((xhdit->context_array = malloc(size)) == NULL) {
+		error(FATAL, "cannot malloc dumpinfo table context space.\n");
+	}
+	BZERO(xhdit->context_array, size);
+	size = sizeof(struct xen_hyper_dumpinfo_context_xen_core) * XEN_HYPER_MAX_CPUS();
+	if((xhdit->context_xen_core_array = malloc(size)) == NULL) {
+		error(FATAL, "cannot malloc dumpinfo table context_xen_core_array space.\n");
+	}
+	BZERO(xhdit->context_xen_core_array, size);
+	addr = symbol_value("per_cpu__crash_notes");
+	for (i = 0; i < XEN_HYPER_MAX_CPUS(); i++) {
+		ulong addr_notes;
+
+		addr_notes = xen_hyper_per_cpu(addr, i);
+		if (xhdit->note_ver == XEN_HYPER_ELF_NOTE_V4) {
+			if (!readmem(addr_notes, KVADDR, &(xhdit->context_array[i].note),
+			sizeof(ulong), "per_cpu__crash_notes", RETURN_ON_ERROR)) {
+				error(WARNING, "cannot read per_cpu__crash_notes.\n");
+				return;
+			}
+		} else {
+			xhdit->context_array[i].note = addr_notes;
+		}
+	}
+
+	if (xhdit->note_ver == XEN_HYPER_ELF_NOTE_V1) {
+		xhdit->note_size = XEN_HYPER_SIZE(note_buf_t);
+	} else if (xhdit->note_ver == XEN_HYPER_ELF_NOTE_V4) {
+		xhdit->note_size = XEN_HYPER_ELF_NOTE_V4_NOTE_SIZE;
+	} else {
+		xhdit->note_size = XEN_HYPER_SIZE(crash_note_t);
+	}
+
+	/* read a sample note */
+	buf = GETBUF(xhdit->note_size);
+	if (xhdit->note_ver == XEN_HYPER_ELF_NOTE_V4)
+		samp_cpuid = xht->cpu_idxs[0];
+	else
+		samp_cpuid = XEN_HYPER_CRASHING_CPU();
+	xhdit->xen_info_cpu = samp_cpuid;
+	if (!xen_hyper_fill_elf_notes(xhdit->context_array[samp_cpuid].note,
+	buf, XEN_HYPER_ELF_NOTE_FILL_T_NOTE)) {
+		error(FATAL, "cannot read per_cpu__crash_notes.\n");
+	}
+	bp = buf;
+
+	/* Get elf format information for each version. */
+	switch (xhdit->note_ver) {
+	case XEN_HYPER_ELF_NOTE_V1:
+		/* core data */
+		note = (Elf32_Nhdr *)bp;
+		np = bp + sizeof(Elf32_Nhdr);
+		upp = np + note->n_namesz;
+		upp = (char *)roundup((ulong)upp, 4);
+		xhdit->core_offset = (Elf_Word)((ulong)upp - (ulong)note);
+		note = (Elf32_Nhdr *)(upp + note->n_descsz);
+		/* cr3 data */
+		np = (char *)note + sizeof(Elf32_Nhdr);
+		upp = np + note->n_namesz;
+		upp = (char *)roundup((ulong)upp, 4);
+		upp = upp + note->n_descsz;
+		xhdit->core_size = upp - bp;
+		break;
+	case XEN_HYPER_ELF_NOTE_V2:
+		/* core data */
+		xhdit->core_offset = XEN_HYPER_OFFSET(crash_note_core_t_desc);
+		xhdit->core_size = XEN_HYPER_SIZE(crash_note_core_t);
+		/* xen core */
+		xhdit->xen_info_offset = XEN_HYPER_OFFSET(crash_note_xen_t_desc);
+		xhdit->xen_info_size = XEN_HYPER_SIZE(crash_note_xen_t);
+		break;
+	case XEN_HYPER_ELF_NOTE_V3:
+		/* core data */
+		xhdit->core_offset = XEN_HYPER_OFFSET(crash_note_core_t_desc);
+		xhdit->core_size = XEN_HYPER_SIZE(crash_note_core_t);
+		/* xen core */
+		xhdit->xen_core_offset = XEN_HYPER_OFFSET(crash_note_xen_core_t_desc);
+		xhdit->xen_core_size = XEN_HYPER_SIZE(crash_note_xen_core_t);
+		/* xen info */
+		xhdit->xen_info_offset = XEN_HYPER_OFFSET(crash_note_xen_info_t_desc);
+		xhdit->xen_info_size = XEN_HYPER_SIZE(crash_note_xen_info_t);
+		break;
+	case XEN_HYPER_ELF_NOTE_V4:
+		/* core data */
+		note = (Elf32_Nhdr *)bp;
+		np = bp + sizeof(Elf32_Nhdr);
+		upp = np + note->n_namesz;
+		upp = (char *)roundup((ulong)upp, 4);
+		xhdit->core_offset = (Elf_Word)((ulong)upp - (ulong)note);
+		upp = upp + note->n_descsz;
+		xhdit->core_size = (Elf_Word)((ulong)upp - (ulong)note);
+		if (XEN_HYPER_ELF_NOTE_V4_NOTE_SIZE < xhdit->core_size + 32) {
+			error(WARNING, "note size is assumed on crash is incorrect.(core data)\n");
+			return;
+		}
+		/* xen core */
+		note = (Elf32_Nhdr *)upp;
+		np = (char *)note + sizeof(Elf32_Nhdr);
+		upp = np + note->n_namesz;
+		upp = (char *)roundup((ulong)upp, 4);
+		xhdit->xen_core_offset = (Elf_Word)((ulong)upp - (ulong)note);
+		upp = upp + note->n_descsz;
+		xhdit->xen_core_size = (Elf_Word)((ulong)upp - (ulong)note);
+		if (XEN_HYPER_ELF_NOTE_V4_NOTE_SIZE <
+		xhdit->core_size + xhdit->xen_core_size + 32) {
+			error(WARNING, "note size is assumed on crash is incorrect.(xen core)\n");
+			return;
+		}
+		/* xen info */
+		note = (Elf32_Nhdr *)upp;
+		np = (char *)note + sizeof(Elf32_Nhdr);
+		upp = np + note->n_namesz;
+		upp = (char *)roundup((ulong)upp, 4);
+		xhdit->xen_info_offset = (Elf_Word)((ulong)upp - (ulong)note);
+		upp = upp + note->n_descsz;
+		xhdit->xen_info_size =  (Elf_Word)((ulong)upp - (ulong)note);
+		if (XEN_HYPER_ELF_NOTE_V4_NOTE_SIZE <
+		xhdit->core_size + xhdit->xen_core_size + xhdit->xen_info_size) {
+			error(WARNING, "note size is assumed on crash is incorrect.(xen info)\n");
+			return;
+		}
+		xhdit->note_size = xhdit->core_size + xhdit->xen_core_size + xhdit->xen_info_size;
+		break;
+	default:
+		error(FATAL, "logic error in cheking elf note format occurs.\n");
+	}
+
+	/* fill xen info context. */
+	if (xhdit->note_ver >= XEN_HYPER_ELF_NOTE_V3) {
+		if((xhdit->crash_note_xen_info_ptr =
+		malloc(xhdit->xen_info_size)) == NULL) {
+			error(FATAL, "cannot malloc dumpinfo table "
+				"crash_note_xen_info_ptr space.\n");
+		}
+		memcpy(xhdit->crash_note_xen_info_ptr,
+			bp + xhdit->core_size + xhdit->xen_core_size,
+			xhdit->xen_info_size);
+		xhdit->context_xen_info.note =
+			xhdit->context_array[samp_cpuid].note +
+			xhdit->core_size + xhdit->xen_core_size;
+		xhdit->context_xen_info.pcpu_id = samp_cpuid;
+		xhdit->context_xen_info.crash_xen_info_ptr =
+			xhdit->crash_note_xen_info_ptr + xhdit->xen_info_offset;
+	}
+		
+	/* allocate note core */
+	size = xhdit->core_size * XEN_HYPER_NR_PCPUS();
+	if(!(xhdit->crash_note_core_array = malloc(size))) {
+		error(FATAL, "cannot malloc crash_note_core_array space.\n");
+	}
+	nccp = xhdit->crash_note_core_array;
+	BZERO(nccp, size);
+
+	/* allocate xen core */
+	if (xhdit->note_ver >= XEN_HYPER_ELF_NOTE_V2) {
+		size = xhdit->xen_core_size * XEN_HYPER_NR_PCPUS();
+		if(!(xhdit->crash_note_xen_core_array = malloc(size))) {
+			error(FATAL, "cannot malloc dumpinfo table "
+				"crash_note_xen_core_array space.\n");
+		}
+		xccp = xhdit->crash_note_xen_core_array;
+		BZERO(xccp, size);
+	}
+
+	/* fill a context. */
+	for_cpu_indexes(i, cpuid)
+	{
+		/* fill core context. */
+		addr = xhdit->context_array[cpuid].note;
+		if (!xen_hyper_fill_elf_notes(addr, nccp,
+		XEN_HYPER_ELF_NOTE_FILL_T_CORE)) {
+			error(FATAL, "cannot read elf note core.\n");
+		}
+		xhdit->context_array[cpuid].pcpu_id = cpuid;
+		xhdit->context_array[cpuid].ELF_Prstatus_ptr =
+			nccp + xhdit->core_offset;
+		xhdit->context_array[cpuid].pr_reg_ptr =
+			nccp + xhdit->core_offset +
+			XEN_HYPER_OFFSET(ELF_Prstatus_pr_reg);
+
+		/* Is there xen core data? */
+		if (xhdit->note_ver < XEN_HYPER_ELF_NOTE_V2) {
+			nccp += xhdit->core_size;
+			continue;
+		}
+		if (xhdit->note_ver == XEN_HYPER_ELF_NOTE_V2 &&
+		cpuid != samp_cpuid) {
+			xccp += xhdit->xen_core_size;
+			nccp += xhdit->core_size;
+			continue;
+		}
+
+		/* fill xen core context, in case of more elf note V2. */
+		xhdit->context_xen_core_array[cpuid].note =
+			xhdit->context_array[cpuid].note +
+			xhdit->core_size;
+		xhdit->context_xen_core_array[cpuid].pcpu_id = cpuid;
+		xhdit->context_xen_core_array[cpuid].crash_xen_core_ptr =
+			xccp + xhdit->xen_core_offset;
+		if (!xen_hyper_fill_elf_notes(xhdit->context_xen_core_array[cpuid].note,
+		xccp, XEN_HYPER_ELF_NOTE_FILL_T_XEN_CORE)) {
+			error(FATAL, "cannot read elf note xen core.\n");
+		}
+		xccp += xhdit->xen_core_size;
+		nccp += xhdit->core_size;
+	}
+
+	FREEBUF(buf);
+}
+
+/*
+ * Get dump information context from physical cpu id.
+ */
+struct xen_hyper_dumpinfo_context *
+xen_hyper_id_to_dumpinfo_context(uint id)
+{
+	if (!xen_hyper_test_pcpu_id(id))
+		return NULL;
+	return &xhdit->context_array[id];
+}
+
+/*
+ * Get dump information context from ELF Note address.
+ */
+struct xen_hyper_dumpinfo_context *
+xen_hyper_note_to_dumpinfo_context(ulong note)
+{
+	int i;
+
+	for (i = 0; i < XEN_HYPER_MAX_CPUS(); i++) {
+		if (note == xhdit->context_array[i].note) {
+			return &xhdit->context_array[i];
+		}
+	}
+	return NULL;
+}
+
+/*
+ * Fill ELF Notes header here.
+ * This assume that variable note has a top address of an area for
+ * specified type.
+ */
+char *
+xen_hyper_fill_elf_notes(ulong note, char *note_buf, int type)
+{
+	long size;
+	ulong rp = note;
+
+	if (type == XEN_HYPER_ELF_NOTE_FILL_T_NOTE)
+		size = xhdit->note_size;
+	else if (type == XEN_HYPER_ELF_NOTE_FILL_T_CORE)
+		size = xhdit->core_size;
+	else if (type == XEN_HYPER_ELF_NOTE_FILL_T_XEN_CORE)
+		size = xhdit->xen_core_size;
+	else if (type == XEN_HYPER_ELF_NOTE_FILL_T_XEN_CORE_M)
+		size = xhdit->core_size + xhdit->xen_core_size;
+	else if (type == XEN_HYPER_ELF_NOTE_FILL_T_PRS)
+		size = XEN_HYPER_SIZE(ELF_Prstatus);
+	else if (type == XEN_HYPER_ELF_NOTE_FILL_T_XEN_REGS)
+		size = XEN_HYPER_SIZE(xen_crash_xen_regs_t);
+	else
+		return NULL;
+
+	if (!readmem(rp, KVADDR, note_buf, size,
+		"note_buf_t or crash_note_t", RETURN_ON_ERROR)) {
+		if (type == XEN_HYPER_ELF_NOTE_FILL_T_NOTE)
+			error(WARNING, "cannot fill note_buf_t or crash_note_t.\n");
+		else if (type == XEN_HYPER_ELF_NOTE_FILL_T_CORE)
+			error(WARNING, "cannot fill note core.\n");
+		else if (type == XEN_HYPER_ELF_NOTE_FILL_T_XEN_CORE)
+			error(WARNING, "cannot fill note xen core.\n");
+		else if (type == XEN_HYPER_ELF_NOTE_FILL_T_XEN_CORE_M)
+			error(WARNING, "cannot fill note core & xen core.\n");
+		else if (type == XEN_HYPER_ELF_NOTE_FILL_T_PRS)
+			error(WARNING, "cannot fill ELF_Prstatus.\n");
+		else if (type == XEN_HYPER_ELF_NOTE_FILL_T_XEN_REGS)
+			error(WARNING, "cannot fill xen_crash_xen_regs_t.\n");
+		return NULL;
+	}
+	return note_buf;
+}
+
+
+
+/*
+ * Get domain status.
+ */
+ulong
+xen_hyper_domain_state(struct xen_hyper_domain_context *dc)
+{
+	if (ACTIVE()) {
+		if (xen_hyper_read_domain_verify(dc->domain) == NULL) {
+			return XEN_HYPER_DOMF_ERROR;
+		}
+	}
+	return dc->domain_flags;
+}
+
+/*
+ * Allocate domain context space.
+ */
+void
+xen_hyper_refresh_domain_context_space(void)
+{
+	char *domain_struct;
+	ulong domain, next, dom_xen, dom_io, idle_vcpu;
+	struct xen_hyper_domain_context *dc;
+	struct xen_hyper_domain_context *dom0;
+	int i;
+
+	if ((xhdt->flags & XEN_HYPER_DOMAIN_F_INIT) && !ACTIVE()) {
+		return;
+	}
+
+	XEN_HYPER_RUNNING_DOMAINS() = XEN_HYPER_NR_DOMAINS() =
+		xen_hyper_get_domains();
+	xen_hyper_alloc_domain_context_space(XEN_HYPER_NR_DOMAINS());
+
+	dc = xhdt->context_array;
+
+	/* restore an dom_io context. */
+	get_symbol_data("dom_io", sizeof(dom_io), &dom_io);
+	if ((domain_struct = xen_hyper_read_domain(dom_io)) == NULL) {
+		error(FATAL, "cannot read dom_io.\n");
+	}
+	xen_hyper_store_domain_context(dc, dom_io, domain_struct);
+	xhdt->dom_io = dc;
+	dc++;
+
+	/* restore an dom_xen context. */
+	get_symbol_data("dom_xen", sizeof(dom_xen), &dom_xen);
+	if ((domain_struct = xen_hyper_read_domain(dom_xen)) == NULL) {
+		error(FATAL, "cannot read dom_xen.\n");
+	}
+	xen_hyper_store_domain_context(dc, dom_xen, domain_struct);
+	xhdt->dom_xen = dc;
+	dc++;
+
+	/* restore an idle domain context. */
+	for (i = 0; i < xht->idle_vcpu_size; i += XEN_HYPER_MAX_VIRT_CPUS) {
+		idle_vcpu = xht->idle_vcpu_array[i];
+		if (idle_vcpu == 0)
+			break;
+		if (!readmem(idle_vcpu + MEMBER_OFFSET("vcpu", "domain"),
+			KVADDR, &domain, sizeof(domain), "domain", RETURN_ON_ERROR)) {
+			error(FATAL, "cannot read domain member in vcpu.\n");
+		}
+		if (CRASHDEBUG(1)) {
+			fprintf(fp, "idle_vcpu=%lx, domain=%lx\n", idle_vcpu, domain);
+		}
+		if ((domain_struct = xen_hyper_read_domain(domain)) == NULL) {
+			error(FATAL, "cannot read idle domain.\n");
+		}
+		xen_hyper_store_domain_context(dc, domain, domain_struct);
+		if (i == 0)
+			xhdt->idle_domain = dc;
+		dc++;
+	}
+
+	/* restore domain contexts from dom0 symbol. */
+	xen_hyper_get_domain_next(XEN_HYPER_DOMAIN_READ_DOM0, &next);
+	domain = next;
+	dom0 = dc;
+	while((domain_struct =
+	xen_hyper_get_domain_next(XEN_HYPER_DOMAIN_READ_NEXT, &next)) != NULL) {
+		xen_hyper_store_domain_context(dc, domain, domain_struct);
+		domain = next;
+		dc++;
+	}
+	xhdt->dom0 = dom0;
+}
+
+/*
+ * Get number of domain.
+ */
+int
+xen_hyper_get_domains(void)
+{
+	ulong domain, next_in_list;
+	long domain_next_in_list;
+	int i, j;
+
+	get_symbol_data("dom0", sizeof(void *), &domain);
+	domain_next_in_list = MEMBER_OFFSET("domain", "next_in_list");
+	i = 0;
+	while (domain != 0) {
+		i++;
+		next_in_list = domain + domain_next_in_list;
+		if (!readmem(next_in_list, KVADDR, &domain, sizeof(void *),
+			"domain.next_in_list", RETURN_ON_ERROR)) {
+			error(FATAL, "cannot read domain.next_in_list.\n");
+		}
+	}
+	i += 2;		/* for dom_io, dom_xen */
+	/* for idle domains */
+	for (j = 0; j < xht->idle_vcpu_size; j += XEN_HYPER_MAX_VIRT_CPUS) {
+		if (xht->idle_vcpu_array[j])
+			i++;
+	}
+	return i;
+}
+
+/*
+ * Get next domain struct.
+ * 	mod - XEN_HYPER_DOMAIN_READ_DOM0:start from dom0 symbol
+ * 	    - XEN_HYPER_DOMAIN_READ_INIT:start from xhdt->context_array
+ * 	    - XEN_HYPER_DOMAIN_READ_NEXT:next
+ */
+char *
+xen_hyper_get_domain_next(int mod, ulong *next)
+{
+	static int idx = 0;
+
+	char *domain_struct;
+	struct xen_hyper_domain_context *dc;
+
+	switch (mod) {
+	case XEN_HYPER_DOMAIN_READ_DOM0:
+		/* Case of search from dom0 symbol. */
+		idx = 0;
+		if (xhdt->dom0) {
+			*next = xhdt->dom0->domain;
+		} else {
+			get_symbol_data("dom0", sizeof(void *), next);
+		}
+		return xhdt->domain_struct;
+		break;
+	case XEN_HYPER_DOMAIN_READ_INIT:
+		/* Case of search from context_array. */
+		if (xhdt->context_array && xhdt->context_array->domain) {
+			idx = 1; 		/* this has a next index. */
+			*next = xhdt->context_array->domain;
+		} else {
+			idx = 0;
+			*next = 0;
+			return NULL;
+		}
+		return xhdt->domain_struct;
+		break;
+	case XEN_HYPER_DOMAIN_READ_NEXT:
+		break;
+	default :
+		error(FATAL, "xen_hyper_get_domain_next mod error: %d\n", mod);
+		return NULL;
+	}
+
+	/* Finished search */
+	if (!*next) {
+		return NULL;
+	}
+
+	domain_struct = NULL;
+	/* Is domain context array valid? */
+	if (idx) {
+		if ((domain_struct =
+			xen_hyper_read_domain(*next)) == NULL) {
+			error(FATAL, "cannot get next domain from domain context array.\n");
+		}
+		if (idx > XEN_HYPER_NR_DOMAINS()) {
+			*next = 0;
+		} else {
+			dc = xhdt->context_array;
+			dc += idx;
+			*next = dc->domain;
+			idx++;
+		}
+		return domain_struct;
+	}
+
+	/* Search from dom0 symbol. */
+	if ((domain_struct =
+		xen_hyper_read_domain(*next)) == NULL) {
+		error(FATAL, "cannot get next domain from dom0 symbol.\n");
+	}
+	*next = ULONG(domain_struct + XEN_HYPER_OFFSET(domain_next_in_list));
+	return domain_struct;
+}
+
+/*
+ * from domain address to id.
+ */
+domid_t
+xen_hyper_domain_to_id(ulong domain)
+{
+	struct xen_hyper_domain_context *dc;
+
+	/* Is domain context array valid? */
+	if (xhdt->context_array && xhdt->context_array->domain) {
+		if ((dc = xen_hyper_domain_to_domain_context(domain)) == NULL) {
+			return XEN_HYPER_DOMAIN_ID_INVALID;
+		} else {
+			return dc->domain_id;
+		}
+	} else {
+		return XEN_HYPER_DOMAIN_ID_INVALID;
+	}
+}
+
+/*
+ * Get domain struct from id.
+ */
+char *
+xen_hyper_id_to_domain_struct(domid_t id)
+{
+	char *domain_struct;
+	struct xen_hyper_domain_context *dc;
+
+	domain_struct = NULL;
+
+	/* Is domain context array valid? */
+	if (xhdt->context_array && xhdt->context_array->domain) {
+		if ((dc = xen_hyper_id_to_domain_context(id)) == NULL) {
+			return NULL;
+		} else {
+			if ((domain_struct =
+				xen_hyper_read_domain(dc->domain)) == NULL) {
+				error(FATAL, "cannot get domain from domain context array with id.\n");
+			}
+			return domain_struct;
+		}
+	} else {
+		return NULL;
+	}
+}
+
+/*
+ * Get domain context from domain address.
+ */
+struct xen_hyper_domain_context *
+xen_hyper_domain_to_domain_context(ulong domain)
+{
+	struct xen_hyper_domain_context *dc;
+	int i;
+
+	if (xhdt->context_array == NULL ||
+		xhdt->context_array->domain == 0) {
+		return NULL;
+	}
+	if (!domain) {
+		return NULL;
+	}
+	for (i = 0, dc = xhdt->context_array; i < XEN_HYPER_NR_DOMAINS();
+		i++, dc++) {
+		if (domain == dc->domain) {
+			return dc;
+		}
+	}
+	return NULL;
+}
+
+/*
+ * Get domain context from domain id.
+ */
+struct xen_hyper_domain_context *
+xen_hyper_id_to_domain_context(domid_t id)
+{
+	struct xen_hyper_domain_context *dc;
+	int i;
+
+	if (xhdt->context_array == NULL ||
+		xhdt->context_array->domain == 0) {
+		return NULL;
+	}
+	if (id == XEN_HYPER_DOMAIN_ID_INVALID) {
+		return NULL;
+	}
+	for (i = 0, dc = xhdt->context_array; i < XEN_HYPER_NR_DOMAINS();
+		i++, dc++) {
+		if (id == dc->domain_id) {
+			return dc;
+		}
+	}
+	return NULL;
+}
+
+/*
+ * Store domain struct contents.
+ */
+struct xen_hyper_domain_context *
+xen_hyper_store_domain_context(struct xen_hyper_domain_context *dc,
+	       ulong domain, char *dp)
+{
+	int i;
+
+	dc->domain = domain;
+	BCOPY((char *)(dp + XEN_HYPER_OFFSET(domain_domain_id)),
+		&dc->domain_id, sizeof(domid_t));
+	dc->tot_pages = UINT(dp + XEN_HYPER_OFFSET(domain_tot_pages));
+	dc->max_pages = UINT(dp + XEN_HYPER_OFFSET(domain_max_pages));
+	dc->xenheap_pages = UINT(dp + XEN_HYPER_OFFSET(domain_xenheap_pages));
+	dc->shared_info = ULONG(dp + XEN_HYPER_OFFSET(domain_shared_info));
+	dc->sched_priv = ULONG(dp + XEN_HYPER_OFFSET(domain_sched_priv));
+	dc->next_in_list = ULONG(dp + XEN_HYPER_OFFSET(domain_next_in_list));
+	if (XEN_HYPER_VALID_MEMBER(domain_domain_flags))
+		dc->domain_flags = ULONG(dp + XEN_HYPER_OFFSET(domain_domain_flags));
+	else if (XEN_HYPER_VALID_MEMBER(domain_is_shut_down)) {
+		dc->domain_flags = 0;
+		if (*(dp + XEN_HYPER_OFFSET(domain_is_hvm))) {
+			dc->domain_flags |= XEN_HYPER_DOMS_HVM;
+		} else if (*(dp + XEN_HYPER_OFFSET(domain_is_privileged))) {
+			dc->domain_flags |= XEN_HYPER_DOMS_privileged;
+		} else if (*(dp + XEN_HYPER_OFFSET(domain_debugger_attached))) {
+			dc->domain_flags |= XEN_HYPER_DOMS_debugging;
+		} else if (XEN_HYPER_VALID_MEMBER(domain_is_polling) &&
+				*(dp + XEN_HYPER_OFFSET(domain_is_polling))) {
+			dc->domain_flags |= XEN_HYPER_DOMS_polling;
+		} else if (*(dp + XEN_HYPER_OFFSET(domain_is_paused_by_controller))) {
+			dc->domain_flags |= XEN_HYPER_DOMS_ctrl_pause;
+		} else if (*(dp + XEN_HYPER_OFFSET(domain_is_dying))) {
+			dc->domain_flags |= XEN_HYPER_DOMS_dying;
+		} else if (*(dp + XEN_HYPER_OFFSET(domain_is_shutting_down))) {
+			dc->domain_flags |= XEN_HYPER_DOMS_shuttingdown;
+		} else if (*(dp + XEN_HYPER_OFFSET(domain_is_shut_down))) {
+			dc->domain_flags |= XEN_HYPER_DOMS_shutdown;
+		}
+	} else {
+		dc->domain_flags = XEN_HYPER_DOMF_ERROR;
+	}
+	dc->evtchn = ULONG(dp + XEN_HYPER_OFFSET(domain_evtchn));
+	for (i = 0; i < XEN_HYPER_MAX_VIRT_CPUS; i++) {
+		dc->vcpu[i] = ULONG(dp + XEN_HYPER_OFFSET(domain_vcpu) + i*sizeof(void *));
+		if (dc->vcpu[i])	XEN_HYPER_NR_VCPUS_IN_DOM(dc)++;
+	}
+
+	return dc;
+}
+
+/*
+ * Read domain struct from domain context.
+ */
+char *
+xen_hyper_read_domain_from_context(struct xen_hyper_domain_context *dc)
+{
+	return xen_hyper_fill_domain_struct(dc->domain, xhdt->domain_struct);
+}
+
+/*
+ * Read domain struct.
+ */
+char *
+xen_hyper_read_domain(ulong domain)
+{
+	return xen_hyper_fill_domain_struct(domain, xhdt->domain_struct);
+}
+
+/*
+ * Read domain struct to verification.
+ */
+char *
+xen_hyper_read_domain_verify(ulong domain)
+{
+	return xen_hyper_fill_domain_struct(domain, xhdt->domain_struct_verify);
+}
+
+/*
+ * Fill domain struct.
+ */
+char *
+xen_hyper_fill_domain_struct(ulong domain, char *domain_struct)
+{
+	if (!readmem(domain, KVADDR, domain_struct,
+		XEN_HYPER_SIZE(domain), "fill_domain_struct",
+	       	ACTIVE() ? (RETURN_ON_ERROR|QUIET) : RETURN_ON_ERROR)) {
+		error(WARNING, "cannot fill domain struct.\n");
+		return NULL;
+	}
+	return domain_struct;
+}
+
+/*
+ * Allocate domain context space.
+ */
+void
+xen_hyper_alloc_domain_context_space(int domains)
+{
+	if (xhdt->context_array == NULL) {
+		if (!(xhdt->context_array =
+			malloc(domains * sizeof(struct xen_hyper_domain_context)))) {
+			error(FATAL, "cannot malloc context array (%d domains).",
+				domains);
+		}
+		xhdt->context_array_cnt = domains;
+	} else if (domains > xhdt->context_array_cnt) {
+		if (!(xhdt->context_array =
+			realloc(xhdt->context_array,
+				domains * sizeof(struct xen_hyper_domain_context)))) {
+			error(FATAL, "cannot realloc context array (%d domains).",
+				domains);
+		}
+		xhdt->context_array_cnt = domains;
+	}
+	BZERO(xhdt->context_array,
+		domains * sizeof(struct xen_hyper_domain_context));
+}
+
+
+
+/*
+ * Get vcpu status.
+ */
+int
+xen_hyper_vcpu_state(struct xen_hyper_vcpu_context *vcc)
+{
+	if (ACTIVE()) {
+		if (xen_hyper_read_vcpu_verify(vcc->vcpu) == NULL) {
+			return XEN_HYPER_RUNSTATE_ERROR;
+		}
+	}
+	return vcc->state;
+}
+
+/*
+ * Allocate vcpu context space.
+ */
+void
+xen_hyper_refresh_vcpu_context_space(void)
+{
+	struct xen_hyper_domain_context *dc;
+	struct xen_hyper_vcpu_context_array *vcca;
+	struct xen_hyper_vcpu_context *vcc;
+	int i, j;
+
+	if ((xhvct->flags & XEN_HYPER_VCPU_F_INIT) && !ACTIVE()) {
+		return;
+	}
+
+	xen_hyper_alloc_vcpu_context_arrays_space(XEN_HYPER_NR_DOMAINS());
+	for (i = 0, xht->vcpus = 0, dc = xhdt->context_array,
+	vcca = xhvct->vcpu_context_arrays;
+	i < XEN_HYPER_NR_DOMAINS(); i++, dc++, vcca++) {
+		dc->vcpu_context_array = vcca;
+		xen_hyper_alloc_vcpu_context_space(vcca,
+			XEN_HYPER_NR_VCPUS_IN_DOM(dc));
+		for (j = 0, vcc = vcca->context_array;
+		j < XEN_HYPER_NR_VCPUS_IN_DOM(dc); j++, vcc++) {
+			xen_hyper_read_vcpu(dc->vcpu[j]);
+			xen_hyper_store_vcpu_context(vcc, dc->vcpu[j],
+				xhvct->vcpu_struct);	
+		}
+		if (dc == xhdt->idle_domain) {
+			xhvct->idle_vcpu_context_array = vcca;
+		}
+		xht->vcpus += vcca->context_array_cnt;
+	}
+}
+
+/*
+ * Get vcpu context from vcpu address.
+ */
+struct xen_hyper_vcpu_context *
+xen_hyper_vcpu_to_vcpu_context(ulong vcpu)
+{
+	struct xen_hyper_vcpu_context_array *vcca;
+	struct xen_hyper_vcpu_context *vcc;
+	int i, j;
+
+	if (!vcpu) {
+		return NULL;
+	}
+	for (i = 0, vcca = xhvct->vcpu_context_arrays;
+		i < xhvct->vcpu_context_arrays_cnt; i++, vcca++) {
+		for (j = 0, vcc = vcca->context_array;
+			j < vcca->context_array_cnt; j++, vcc++) {
+			if (vcpu == vcc->vcpu) {
+				return vcc;
+			}
+		}
+	}
+	return NULL;
+}
+
+/*
+ * Get vcpu context.
+ */
+struct xen_hyper_vcpu_context *
+xen_hyper_id_to_vcpu_context(ulong domain, domid_t did, int vcid)
+{
+	struct xen_hyper_vcpu_context_array *vcca;
+	struct xen_hyper_vcpu_context *vcc;
+	int i;
+
+	if (vcid == XEN_HYPER_VCPU_ID_INVALID) {
+		return NULL;
+	}
+	if ((vcca = xen_hyper_domain_to_vcpu_context_array(domain))) {
+		;
+	} else if (!(vcca = xen_hyper_domid_to_vcpu_context_array(did))) {
+		return NULL;
+	}
+	for (i = 0, vcc = vcca->context_array;
+		i < vcca->context_array_cnt; i++, vcc++) {
+		if (vcid == vcc->vcpu_id) {
+			return vcc;
+		}
+	}
+	return NULL;
+}
+
+/*
+ * Get pointer of a vcpu context array from domain address.
+ */
+struct xen_hyper_vcpu_context_array *
+xen_hyper_domain_to_vcpu_context_array(ulong domain)
+{
+	struct xen_hyper_domain_context *dc;
+
+	if(!(dc = xen_hyper_domain_to_domain_context(domain))) {
+		return NULL;
+	}
+	return dc->vcpu_context_array;
+}
+
+/*
+ * Get pointer of a vcpu context array from domain id.
+ */
+struct xen_hyper_vcpu_context_array *
+xen_hyper_domid_to_vcpu_context_array(domid_t id)
+{
+	struct xen_hyper_domain_context *dc;
+
+	if (!(dc = xen_hyper_id_to_domain_context(id))) {
+		return NULL;
+	}
+	return dc->vcpu_context_array;
+}
+
+/*
+ * Store vcpu struct contents.
+ */
+struct xen_hyper_vcpu_context *
+xen_hyper_store_vcpu_context(struct xen_hyper_vcpu_context *vcc,
+       ulong vcpu, char *vcp)
+{
+	vcc->vcpu = vcpu;
+	vcc->vcpu_id = INT(vcp + XEN_HYPER_OFFSET(vcpu_vcpu_id));
+	vcc->processor = INT(vcp + XEN_HYPER_OFFSET(vcpu_processor));
+	vcc->vcpu_info = ULONG(vcp + XEN_HYPER_OFFSET(vcpu_vcpu_info));
+	vcc->domain = ULONG(vcp + XEN_HYPER_OFFSET(vcpu_domain));
+	vcc->next_in_list = ULONG(vcp + XEN_HYPER_OFFSET(vcpu_next_in_list));
+	if (XEN_HYPER_VALID_MEMBER(vcpu_sleep_tick))
+		vcc->sleep_tick = ULONG(vcp + XEN_HYPER_OFFSET(vcpu_sleep_tick));
+	vcc->sched_priv = ULONG(vcp + XEN_HYPER_OFFSET(vcpu_sched_priv));
+	vcc->state = INT(vcp + XEN_HYPER_OFFSET(vcpu_runstate) +
+		XEN_HYPER_OFFSET(vcpu_runstate_info_state));
+	vcc->state_entry_time = ULONGLONG(vcp +
+		XEN_HYPER_OFFSET(vcpu_runstate) +
+		XEN_HYPER_OFFSET(vcpu_runstate_info_state_entry_time));
+	vcc->runstate_guest = ULONG(vcp + XEN_HYPER_OFFSET(vcpu_runstate_guest));
+	if (XEN_HYPER_VALID_MEMBER(vcpu_vcpu_flags))
+		vcc->vcpu_flags = ULONG(vcp + XEN_HYPER_OFFSET(vcpu_vcpu_flags));
+	else
+		vcc->vcpu_flags = XEN_HYPER_VCPUF_ERROR;
+	return vcc;
+}
+
+/*
+ * Read vcpu struct from vcpu context.
+ */
+char *
+xen_hyper_read_vcpu_from_context(struct xen_hyper_vcpu_context *vcc)
+{
+	return xen_hyper_fill_vcpu_struct(vcc->vcpu, xhvct->vcpu_struct);
+}
+
+/*
+ * Read vcpu struct.
+ */
+char *
+xen_hyper_read_vcpu(ulong vcpu)
+{
+	return xen_hyper_fill_vcpu_struct(vcpu, xhvct->vcpu_struct);
+}
+
+/*
+ * Read vcpu struct to verification.
+ */
+char *
+xen_hyper_read_vcpu_verify(ulong vcpu)
+{
+	return xen_hyper_fill_vcpu_struct(vcpu, xhvct->vcpu_struct_verify);
+}
+
+/*
+ * Fill vcpu struct.
+ */
+char *
+xen_hyper_fill_vcpu_struct(ulong vcpu, char *vcpu_struct)
+{
+	if (!readmem(vcpu, KVADDR, vcpu_struct,
+		XEN_HYPER_SIZE(vcpu), "fill_vcpu_struct",
+	       	ACTIVE() ? (RETURN_ON_ERROR|QUIET) : RETURN_ON_ERROR)) {
+		error(WARNING, "cannot fill vcpu struct.\n");
+		return NULL;
+	}
+	return vcpu_struct;
+}
+
+/*
+ * Allocate vcpu context arrays space.
+ */
+void
+xen_hyper_alloc_vcpu_context_arrays_space(int domains)
+{
+	struct xen_hyper_vcpu_context_array *vcca;
+
+	if (xhvct->vcpu_context_arrays == NULL) {
+		if (!(xhvct->vcpu_context_arrays =
+			malloc(domains * sizeof(struct xen_hyper_vcpu_context_array)))) {
+			error(FATAL, "cannot malloc context arrays (%d domains).",
+				domains);
+		}
+		BZERO(xhvct->vcpu_context_arrays, domains * sizeof(struct xen_hyper_vcpu_context_array));
+		xhvct->vcpu_context_arrays_cnt = domains;
+	} else if (domains > xhvct->vcpu_context_arrays_cnt) {
+		if (!(xhvct->vcpu_context_arrays =
+			realloc(xhvct->vcpu_context_arrays,
+				domains * sizeof(struct xen_hyper_vcpu_context_array)))) {
+			error(FATAL, "cannot realloc context arrays (%d domains).",
+				domains);
+		}
+		vcca = xhvct->vcpu_context_arrays + domains;
+		BZERO(vcca, (domains - xhvct->vcpu_context_arrays_cnt) *
+			sizeof(struct xen_hyper_vcpu_context_array));
+		xhvct->vcpu_context_arrays_cnt = domains;
+	}
+}
+
+/*
+ * Allocate vcpu context space.
+ */
+void
+xen_hyper_alloc_vcpu_context_space(struct xen_hyper_vcpu_context_array *vcca, int vcpus)
+{
+	if (!vcpus) {
+		if (vcca->context_array != NULL) {
+			free(vcca->context_array);
+			vcca->context_array = NULL;
+		}
+		vcca->context_array_cnt = vcpus;
+	} else if (vcca->context_array == NULL) {
+		if (!(vcca->context_array =
+			malloc(vcpus * sizeof(struct xen_hyper_vcpu_context)))) {
+			error(FATAL, "cannot malloc context array (%d vcpus).",
+				vcpus);
+		}
+		vcca->context_array_cnt = vcpus;
+	} else if (vcpus > vcca->context_array_cnt) {
+		if (!(vcca->context_array =
+			realloc(vcca->context_array,
+				vcpus * sizeof(struct xen_hyper_vcpu_context_array)))) {
+			error(FATAL, "cannot realloc context array (%d vcpus).",
+				vcpus);
+		}
+		vcca->context_array_cnt = vcpus;
+	}
+	vcca->context_array_valid = vcpus;
+	BZERO(vcca->context_array, vcpus * sizeof(struct xen_hyper_vcpu_context));
+}
+
+
+
+/*
+ * Get pcpu context from pcpu id.
+ */
+struct xen_hyper_pcpu_context *
+xen_hyper_id_to_pcpu_context(uint id)
+{
+	if (xhpct->context_array == NULL) {
+		return NULL;
+	}
+	if (!xen_hyper_test_pcpu_id(id)) {
+		return NULL;
+	}
+	return &xhpct->context_array[id];
+}
+
+/*
+ * Get pcpu context from pcpu address.
+ */
+struct xen_hyper_pcpu_context *
+xen_hyper_pcpu_to_pcpu_context(ulong pcpu)
+{
+	struct xen_hyper_pcpu_context *pcc;
+	int i;
+	uint cpuid;
+
+	if (xhpct->context_array == NULL) {
+		return NULL;
+	}
+	if (!pcpu) {
+		return NULL;
+	}
+	for_cpu_indexes(i, cpuid)
+	{
+		pcc = &xhpct->context_array[cpuid];
+		if (pcpu == pcc->pcpu) {
+			return pcc;
+		}
+	}
+	return NULL;
+}
+
+/*
+ * Store pcpu struct contents.
+ */
+struct xen_hyper_pcpu_context *
+xen_hyper_store_pcpu_context(struct xen_hyper_pcpu_context *pcc,
+       ulong pcpu, char *pcp)
+{
+	pcc->pcpu = pcpu;
+	pcc->processor_id =
+		UINT(pcp + XEN_HYPER_OFFSET(cpu_info_processor_id));
+	pcc->guest_cpu_user_regs = (ulong)(pcpu +
+			XEN_HYPER_OFFSET(cpu_info_guest_cpu_user_regs));
+	pcc->current_vcpu =
+		ULONG(pcp + XEN_HYPER_OFFSET(cpu_info_current_vcpu));
+	return pcc;
+}
+
+/*
+ * Store init_tss contents.
+ */
+struct xen_hyper_pcpu_context *
+xen_hyper_store_pcpu_context_tss(struct xen_hyper_pcpu_context *pcc,
+       ulong init_tss, char *tss)
+{
+	int i;
+	uint64_t *ist_p;
+
+	pcc->init_tss = init_tss;
+	if (machine_type("X86")) {
+		pcc->sp.esp0 = ULONG(tss + XEN_HYPER_OFFSET(tss_struct_esp0));
+	} else if (machine_type("X86_64")) {
+		pcc->sp.rsp0 = ULONG(tss + XEN_HYPER_OFFSET(tss_struct_rsp0));
+		ist_p = (uint64_t *)(tss + XEN_HYPER_OFFSET(tss_struct_ist));
+		for (i = 0; i < XEN_HYPER_TSS_IST_MAX; i++, ist_p++) {
+			pcc->ist[i] = ULONG(ist_p);
+		}
+	}
+	return pcc;
+}
+
+/*
+ * Read pcpu struct.
+ */
+char *
+xen_hyper_read_pcpu(ulong pcpu)
+{
+	return xen_hyper_fill_pcpu_struct(pcpu, xhpct->pcpu_struct);
+}
+
+/*
+ * Fill pcpu struct.
+ */
+char *
+xen_hyper_fill_pcpu_struct(ulong pcpu, char *pcpu_struct)
+{
+	if (!readmem(pcpu, KVADDR, pcpu_struct,
+		XEN_HYPER_SIZE(cpu_info), "fill_pcpu_struct",
+	       	ACTIVE() ? (RETURN_ON_ERROR|QUIET) : RETURN_ON_ERROR)) {
+		error(WARNING, "cannot fill pcpu_struct.\n");
+		return NULL;
+	}
+	return pcpu_struct;
+}
+
+/*
+ * Allocate pcpu context space.
+ */
+void
+xen_hyper_alloc_pcpu_context_space(int pcpus)
+{
+	if (xhpct->context_array == NULL) {
+		if (!(xhpct->context_array =
+			malloc(pcpus * sizeof(struct xen_hyper_pcpu_context)))) {
+			error(FATAL, "cannot malloc context array (%d pcpus).",
+				pcpus);
+		}
+	}
+	BZERO(xhpct->context_array, pcpus * sizeof(struct xen_hyper_pcpu_context));
+}
+
+
+
+/*
+ * Fill cpu_data.
+ */
+char *
+xen_hyper_x86_fill_cpu_data(int idx, char *cpuinfo_x86)
+{
+	ulong cpu_data;
+
+	if (!xen_hyper_test_pcpu_id(idx) || !xht->cpu_data_address)
+		return NULL;
+	cpu_data = xht->cpu_data_address + XEN_HYPER_SIZE(cpuinfo_x86) * idx;
+	if (!readmem(cpu_data, KVADDR, cpuinfo_x86, XEN_HYPER_SIZE(cpuinfo_x86),
+		"cpu_data", RETURN_ON_ERROR)) {
+		error(WARNING, "cannot read cpu_data.\n");
+		return NULL;
+	}
+	return cpuinfo_x86;
+}
+
+char *
+xen_hyper_ia64_fill_cpu_data(int idx, char *cpuinfo_ia64)
+{
+	ulong cpu_data;
+
+	if (!xen_hyper_test_pcpu_id(idx) || !xht->cpu_data_address)
+		return NULL;
+	cpu_data = xen_hyper_per_cpu(xht->cpu_data_address, idx);
+	if (!readmem(cpu_data, KVADDR, cpuinfo_ia64, XEN_HYPER_SIZE(cpuinfo_ia64),
+		"cpu_data", RETURN_ON_ERROR)) {
+		error(WARNING, "cannot read cpu_data.\n");
+		return NULL;
+	}
+	return cpuinfo_ia64;
+}
+
+/*
+ * Return whether vcpu is crashing.
+ */
+int
+xen_hyper_is_vcpu_crash(struct xen_hyper_vcpu_context *vcc)
+{
+	if (vcc == xht->crashing_vcc)
+		return TRUE;
+	return FALSE;
+}
+
+/*
+ * Test whether cpu for pcpu id exists.
+ */
+int
+xen_hyper_test_pcpu_id(uint pcpu_id)
+{
+	ulong *cpumask = xht->cpumask;
+	uint i, j;
+
+	if (pcpu_id == XEN_HYPER_PCPU_ID_INVALID ||
+	pcpu_id > XEN_HYPER_MAX_CPUS()) {
+		return FALSE;
+	}
+
+	i = pcpu_id / (sizeof(ulong) * 8);
+	j = pcpu_id % (sizeof(ulong) * 8);
+	cpumask += i;
+	if (*cpumask & (1UL << j)) {
+		return TRUE;
+	} else {
+		return FALSE;
+	}
+}
+
+
+
+/*
+ *  Calculate and return the uptime.
+ */
+ulonglong
+xen_hyper_get_uptime_hyper(void)
+{
+	ulong jiffies, tmp1, tmp2;
+	ulonglong jiffies_64, wrapped;
+
+	if (symbol_exists("jiffies_64")) {
+		get_symbol_data("jiffies_64", sizeof(ulonglong), &jiffies_64);
+		wrapped = (jiffies_64 & 0xffffffff00000000ULL);
+		if (wrapped) {
+			wrapped -= 0x100000000ULL;
+			jiffies_64 &= 0x00000000ffffffffULL;
+			jiffies_64 |= wrapped;
+               		jiffies_64 += (ulonglong)(300*machdep->hz);
+		} else {
+			tmp1 = (ulong)(uint)(-300*machdep->hz);
+			tmp2 = (ulong)jiffies_64;
+			jiffies_64 = (ulonglong)(tmp2 - tmp1);
+		}
+	} else if (symbol_exists("jiffies")) {
+		get_symbol_data("jiffies", sizeof(long), &jiffies);
+		jiffies_64 = (ulonglong)jiffies;
+	} else {
+		jiffies_64 = 0;	/* hypervisor does not have uptime */
+	}
+
+	return jiffies_64;
+}
+
+/*
+ * Get cpu informatin around.
+ */
+void
+xen_hyper_get_cpu_info(void)
+{
+	ulong addr;
+	ulong *cpumask;
+	uint *cpu_idx;
+	int i, j, cpus;
+
+	get_symbol_data("max_cpus", sizeof(xht->max_cpus), &xht->max_cpus);
+	XEN_HYPER_STRUCT_SIZE_INIT(cpumask_t, "cpumask_t");
+	if (XEN_HYPER_SIZE(cpumask_t) * 8 > xht->max_cpus) {
+		xht->max_cpus = XEN_HYPER_SIZE(cpumask_t) * 8;
+	}
+	if (xht->cpumask) {
+		free(xht->cpumask);
+	}
+	if((xht->cpumask = malloc(XEN_HYPER_SIZE(cpumask_t))) == NULL) {
+		error(FATAL, "cannot malloc cpumask space.\n");
+	}
+	/* kakuma: It may be better to use cpu_present_map. */
+	addr = symbol_value("cpu_online_map");
+	if (!readmem(addr, KVADDR, xht->cpumask,
+		XEN_HYPER_SIZE(cpumask_t), "cpu_online_map", RETURN_ON_ERROR)) {
+		error(FATAL, "cannot read cpu_online_map.\n");
+	}
+	if (xht->cpu_idxs) {
+		free(xht->cpu_idxs);
+	}
+	if((xht->cpu_idxs = malloc(sizeof(uint) * XEN_HYPER_MAX_CPUS())) == NULL) {
+		error(FATAL, "cannot malloc cpu_idxs space.\n");
+	}
+	memset(xht->cpu_idxs, 0xff, sizeof(uint) * XEN_HYPER_MAX_CPUS());
+
+	for (i = cpus = 0, cpumask = xht->cpumask, cpu_idx = xht->cpu_idxs;
+	i < (XEN_HYPER_SIZE(cpumask_t)/sizeof(ulong)); i++, cpumask++) {
+		for (j = 0; j < sizeof(ulong) * 8; j++) {
+			if (*cpumask & (1UL << j)) {
+				*cpu_idx++ = i * sizeof(ulong) * 8 + j;
+				cpus++;
+			}
+		}
+	}
+	xht->pcpus = cpus;
+}
+
+/*
+ * Calculate the number of physical cpu for x86.
+ */
+int
+xen_hyper_x86_get_smp_cpus(void)
+{
+	if (xht->pcpus) {
+		return xht->pcpus;
+	}
+	xen_hyper_get_cpu_info();
+	return xht->pcpus;
+}
+
+/*
+ * Calculate used memory size for x86.
+ */
+uint64_t
+xen_hyper_x86_memory_size(void)
+{
+	ulong vaddr;
+
+	if (machdep->memsize) {
+		return machdep->memsize;
+	}
+	vaddr = symbol_value("total_pages");
+	if (!readmem(vaddr, KVADDR, &xht->total_pages, sizeof(xht->total_pages),
+		"total_pages", RETURN_ON_ERROR)) {
+		error(WARNING, "cannot read total_pages.\n");
+	}
+	xht->sys_pages = xht->total_pages;
+	machdep->memsize = (uint64_t)(xht->sys_pages) * (uint64_t)(machdep->pagesize);
+	return machdep->memsize;
+}
+
+
+/*
+ * Calculate the number of physical cpu for ia64.
+ */
+int
+xen_hyper_ia64_get_smp_cpus(void)
+{
+	return xen_hyper_x86_get_smp_cpus();
+}
+
+/*
+ * Calculate used memory size for ia64.
+ */
+uint64_t
+xen_hyper_ia64_memory_size(void)
+{
+	return xen_hyper_x86_memory_size();
+}
+
+/*      
+ *  Calculate and return the speed of the processor. 
+ */
+ulong 
+xen_hyper_ia64_processor_speed(void)
+{
+	ulong mhz, proc_freq;
+
+	if (machdep->mhz)
+		return(machdep->mhz);
+
+	mhz = 0;
+
+	if (!xht->cpu_data_address ||
+	    !XEN_HYPER_VALID_STRUCT(cpuinfo_ia64) ||
+	    XEN_HYPER_INVALID_MEMBER(cpuinfo_ia64_proc_freq))
+		return (machdep->mhz = mhz);
+
+        readmem(xen_hyper_per_cpu(xht->cpu_data_address, xht->cpu_idxs[0]) + 
+		XEN_HYPER_OFFSET(cpuinfo_ia64_proc_freq),
+        	KVADDR, &proc_freq, sizeof(ulong),
+                "cpuinfo_ia64 proc_freq", FAULT_ON_ERROR);
+
+	mhz = proc_freq/1000000;
+
+	return (machdep->mhz = mhz);
+}
+
+
+
+/*
+ * Print an aligned string with specified length.
+ */
+void
+xen_hyper_fpr_indent(FILE *fp, int len, char *str1, char *str2, int flag)
+{
+	char buf[XEN_HYPER_CMD_BUFSIZE];
+	int sl, r;
+	char *s1, *s2;
+
+	sl = strlen(str1);
+	if (sl > len) {
+		r = 0;
+	} else {
+		r = len - sl;
+	}
+
+	memset(buf, ' ', sizeof(buf));
+	buf[r] =  '\0';
+	if (flag & XEN_HYPER_PRI_L) {
+		s1 = str1;
+		s2 = buf;
+	} else {
+		s1 = buf;
+		s2 = str1;
+	}
+	if (str2) {
+		fprintf(fp, "%s%s%s", s1, s2, str2);
+	} else {
+		fprintf(fp, "%s%s", s1, s2);
+	}
+	if (flag & XEN_HYPER_PRI_LF) {
+		fprintf(fp, "\n");
+	}
+}
+
+ulong
+xen_hyper_get_active_vcpu_from_pcpuid(ulong pcpuid)
+{
+	struct xen_hyper_pcpu_context *pcc;
+	struct xen_hyper_vcpu_context_array *vcca;
+	struct xen_hyper_vcpu_context *vcc;
+	int i, j;
+
+	if (!xen_hyper_test_pcpu_id(pcpuid))
+		return 0;
+
+	pcc = &xhpct->context_array[pcpuid];
+	if (pcc->current_vcpu)
+		return pcc->current_vcpu;
+
+	for (i = 0, vcca = xhvct->vcpu_context_arrays;
+		i < xhvct->vcpu_context_arrays_cnt; i++, vcca++) {
+		for (j = 0, vcc = vcca->context_array;
+			j < vcca->context_array_cnt; j++, vcc++) {
+			if (vcc->processor == pcpuid && 
+				vcc->state == XEN_HYPER_RUNSTATE_running) {
+				return vcc->vcpu;
+			}
+		}
+	}
+
+	return 0;
+}
+
+ulong
+xen_hyper_pcpu_to_active_vcpu(ulong pcpu)
+{
+	ulong vcpu;
+
+	/* if pcpu is vcpu address, return it. */
+	if (pcpu & (~(PAGESIZE() - 1))) {
+		return pcpu;
+	}
+
+	if(!(vcpu = XEN_HYPER_CURR_VCPU(pcpu)))
+		error(FATAL, "invalid pcpu id\n");
+	return vcpu;
+}
+
+void
+xen_hyper_print_bt_header(FILE *out, ulong vcpu, int newline)
+{
+	struct xen_hyper_vcpu_context *vcc;
+
+	if (newline)
+		fprintf(out, "\n");
+
+	vcc = xen_hyper_vcpu_to_vcpu_context(vcpu);
+	if (!vcc)
+		error(FATAL, "invalid vcpu\n");
+	fprintf(out, "PCPU: %2d  VCPU: %lx\n", vcc->processor, vcpu);
+}
+#endif
--- crash/configure.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/configure.c	2009-01-26 14:55:25.000000000 -0500
@@ -1,8 +1,8 @@
 /* configure.c - core analysis suite
  *
  * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc.
- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson
- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 David Anderson
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 Red Hat, Inc. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -56,7 +56,7 @@
 
 void build_configure(void);
 void release_configure(char *);
-void make_rh_rpm_package(char *);
+void make_rh_rpm_package(char *, int);
 void unconfigure(void);
 void set_warnings(int);
 void show_configuration(void);
@@ -222,7 +222,7 @@
 
 	setup_gdb_defaults();
 
-	while ((c = getopt(argc, argv, "gsqnWwubdr:p:")) > 0) {
+	while ((c = getopt(argc, argv, "gsqnWwubdr:p:P:")) > 0) {
 		switch (c) {
 		case 'q':
 			target_data.flags |= QUIET;
@@ -239,7 +239,10 @@
 			release_configure(optarg);
 			break;
 		case 'p':
-			make_rh_rpm_package(optarg);
+			make_rh_rpm_package(optarg, 0);
+			break;
+		case 'P':
+			make_rh_rpm_package(optarg, 1);
 			break;
 		case 'W':
 		case 'w':
@@ -566,10 +569,11 @@
  *  Create an .rh_rpm_package file if the passed-in variable is set.
  */
 void 
-make_rh_rpm_package(char *package)
+make_rh_rpm_package(char *package, int release)
 {
-	char *p;
+	char *p, *cur;
 	FILE *fp;
+	char buf[256];
 
 	if ((strcmp(package, "remove") == 0)) {
 		if (file_exists(".rh_rpm_package")) {
@@ -589,6 +593,33 @@
 	if (!strlen(++p))
 		return;
 
+	if (release) {
+		if (!(fp = popen("./crash -v", "r"))) {
+			fprintf(stderr, "cannot execute \"crash -v\"\n");
+			exit(1);
+		}
+		cur = NULL;
+		while (fgets(buf, 256, fp)) {
+			if (strncmp(buf, "crash ", 6) == 0) {
+				cur = &buf[6];
+				break;
+			} 
+		}
+		fclose(fp);
+	
+		if (!cur) {
+			fprintf(stderr, "cannot get version from \"crash -v\"\n");
+			exit(1);
+		} 
+		strip_linefeeds(cur);
+
+		if (strcmp(cur, p) != 0) {
+			fprintf(stderr, "./crash version: %s\n", cur);
+			fprintf(stderr, "release version: %s\n", p);
+			exit(1);
+		}
+	}
+
         if ((fp = fopen(".rh_rpm_package", "w")) == NULL) {
                 perror("fopen");
                 fprintf(stderr, "cannot open .rh_rpm_package\n");
@@ -910,7 +941,7 @@
         p = &LASTCHAR(line);
 
         while (*p == '\n')
-                *p = (char)NULL;
+                *p = '\0';
 
         return(line);
 }
@@ -1121,14 +1152,14 @@
 	printf("#\n");
 	printf("# crash core analysis suite\n");
 	printf("#\n");
-	printf("Summary: crash utility for live systems; netdump, diskdump, LKCD or mcore dumpfiles\n");
+	printf("Summary: crash utility for live systems; netdump, diskdump, kdump, LKCD or mcore dumpfiles\n");
 	printf("Name: %s\n", lower_case(target_data.program, buf));
 	printf("Version: %s\n", Version);
 	printf("Release: %s\n", Release);
-	printf("License: GPL\n");
+	printf("License: GPLv2\n");
 	printf("Group: Development/Debuggers\n");
 	printf("Source: %%{name}-%%{version}-%%{release}.tar.gz\n");
-	printf("URL: ftp://people.redhat.com/anderson/%%{name}-%%{version}-%%{release}.tar.gz\n");
+	printf("URL: http://people.redhat.com/anderson\n");
 	printf("Distribution: Linux 2.2 or greater\n");
 	printf("Vendor: Red Hat, Inc.\n");
 	printf("Packager: Dave Anderson <anderson@redhat.com>\n");
@@ -1136,12 +1167,24 @@
 	printf("ExclusiveArch: i386 alpha ia64 ppc ppc64 ppc64pseries ppc64iseries x86_64 s390 s390x\n");
 	printf("Buildroot: %%{_tmppath}/%%{name}-root\n");
 	printf("BuildRequires: ncurses-devel zlib-devel\n");
+	printf("Requires: binutils\n");
 	printf("# Patch0: crash-3.3-20.installfix.patch (patch example)\n");
 	printf("\n");
 	printf("%%description\n");
 	printf("The core analysis suite is a self-contained tool that can be used to\n");
 	printf("investigate either live systems, kernel core dumps created from the\n");
-	printf("netdump and diskdump packages from Red Hat Linux, the mcore kernel patch\n");
+	printf("netdump, diskdump and kdump facilities from Red Hat Linux, the mcore kernel patch\n");
+	printf("offered by Mission Critical Linux, or the LKCD kernel patch.\n");
+	printf("\n");
+	printf("%%package devel\n");
+	printf("Requires: %%{name} = %%{version}, zlib-devel\n");
+	printf("Summary: crash utility for live systems; netdump, diskdump, kdump, LKCD or mcore dumpfiles\n");
+	printf("Group: Development/Debuggers\n");
+	printf("\n");
+	printf("%%description devel\n");
+	printf("The core analysis suite is a self-contained tool that can be used to\n");
+	printf("investigate either live systems, kernel core dumps created from the\n");
+	printf("netdump, diskdump and kdump packages from Red Hat Linux, the mcore kernel patch\n");
 	printf("offered by Mission Critical Linux, or the LKCD kernel patch.\n");
 	printf("\n");
 	printf("%%prep\n");
@@ -1158,6 +1201,8 @@
 	printf("make DESTDIR=%%{buildroot} install\n");
 	printf("mkdir -p %%{buildroot}%%{_mandir}/man8\n");
 	printf("cp crash.8 %%{buildroot}%%{_mandir}/man8/crash.8\n");
+	printf("mkdir -p %%{buildroot}%%{_includedir}/crash\n");
+	printf("cp defs.h %%{buildroot}%%{_includedir}/crash\n");
 	printf("\n");
 	printf("%%clean\n");
 	printf("rm -rf %%{buildroot}\n");
@@ -1167,6 +1212,10 @@
 	printf("%%{_mandir}/man8/crash.8*\n");
      /*	printf("/usr/bin/crashd\n"); */
 	printf("%%doc README\n");
+	printf("\n");
+	printf("%%files devel\n");
+	printf("%%defattr(-,root,root)\n");
+	printf("%%{_includedir}/*\n");
 }
 
 /*
--- crash/help.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/help.c	2009-01-26 14:57:23.000000000 -0500
@@ -1,8 +1,8 @@
 /* help.c - core analysis suite
  *
  * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc.
- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson
- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 David Anderson
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 Red Hat, Inc. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -19,7 +19,6 @@
 
 static void reshuffle_cmdlist(void);
 static int sort_command_name(const void *, const void *);
-static void display_help_screen(char *);
 static void display_commands(void);
 static void display_copying_info(void);
 static void display_warranty_info(void);
@@ -106,34 +105,33 @@
 void
 program_usage(int form)
 {
-	int i;
-	char **p;
-	FILE *less;
+	if (form == SHORT_FORM) {
+		fprintf(fp, program_usage_info[0], pc->program_name);
+		fprintf(fp, "\nEnter \"%s -h\" for details.\n",
+			pc->program_name);
+		clean_exit(1);
+	} else {
+		FILE *scroll;
+		char *scroll_command;
+		char **p;
+
+		if ((scroll_command = setup_scroll_command()) &&
+		    (scroll = popen(scroll_command, "w")))
+			fp = scroll;
+		else
+			scroll = NULL;
 
-	if (form == LONG_FORM)
-		less = popen("/usr/bin/less", "w");
-	else
-		less = NULL;
-
-	p = program_usage_info;
-
-	if (form == LONG_FORM) {
-		if (less)
-			fp = less;
-        	for (i = 0; program_usage_info[i]; i++, p++) {
-                	fprintf(fp, *p, pc->program_name);
+		for (p = program_usage_info; *p; p++) {
+			fprintf(fp, *p, pc->program_name);
 			fprintf(fp, "\n");
 		}
-	} else {
-               	fprintf(fp, *p, pc->program_name);
-		fprintf(fp, "\nEnter \"%s -h\" for details.\n",
-			pc->program_name);
-	}
-	fflush(fp);
-	if (less)
-		pclose(less);
+		fflush(fp);
 
-	clean_exit(1);
+		if (scroll)
+			pclose(scroll);
+
+		clean_exit(0);
+	}
 }
 
 
@@ -147,14 +145,16 @@
         struct command_table_entry *cp;
 	struct extension_table *ext;
 
-	for (pc->ncmds = 0, cp = &base_command_table[0]; cp->name; cp++) {
+	for (pc->ncmds = 0, cp = pc->cmd_table; cp->name; cp++) {
 		if (!(cp->flags & HIDDEN_COMMAND))
                 	pc->ncmds++;
 	}
 
         for (ext = extension_table; ext; ext = ext->next) {
-		for (cp = ext->command_table; cp->name; cp++)
-			pc->ncmds++;
+		for (cp = ext->command_table; cp->name; cp++) {
+			if (!(cp->flags & (CLEANUP|HIDDEN_COMMAND)))
+				pc->ncmds++;
+		}
 	}
 
         if (!pc->cmdlist) {
@@ -188,14 +188,16 @@
 	for (i = 0; i < pc->cmdlistsz; i++) 
 		pc->cmdlist[i] = NULL;
 
-        for (cnt = 0, cp = &base_command_table[0]; cp->name; cp++) {
+        for (cnt = 0, cp = pc->cmd_table; cp->name; cp++) {
 		if (!(cp->flags & HIDDEN_COMMAND))
                 	pc->cmdlist[cnt++] = cp->name;
 	}
 
         for (ext = extension_table; ext; ext = ext->next) {
-                for (cp = ext->command_table; cp->name; cp++)
-                	pc->cmdlist[cnt++] = cp->name;
+                for (cp = ext->command_table; cp->name; cp++) {
+			if (!(cp->flags & (CLEANUP|HIDDEN_COMMAND)))
+				pc->cmdlist[cnt++] = cp->name;
+		}
         }
 
 	if (cnt > pc->cmdlistsz)
@@ -239,7 +241,7 @@
 	oflag = 0;
 
         while ((c = getopt(argcnt, args, 
-	        "efNDdmM:ngcaBbHhksvVoptTzLxO")) != EOF) {
+	        "efNDdmM:ngcaBbHhkKsvVoptTzLxO")) != EOF) {
                 switch(c)
                 {
 		case 'e':
@@ -303,7 +305,11 @@
  			return;
 
 		case 'k':
-			dump_kernel_table();
+			dump_kernel_table(!VERBOSE);
+			return;
+
+		case 'K':
+			dump_kernel_table(VERBOSE);
 			return;
 
 		case 's':
@@ -349,6 +355,7 @@
 			fprintf(fp, " -D - dumpfile memory usage\n");
 			fprintf(fp, " -f - filesys table\n");
 			fprintf(fp, " -k - kernel_table\n");
+			fprintf(fp, " -K - kernel_table (verbose)\n");
 			fprintf(fp, " -M <num> machine specific\n");
 			fprintf(fp, " -m - machdep_table\n");
 			fprintf(fp, " -s - symbol table data\n");
@@ -389,7 +396,7 @@
 		if (oflag) 
 			dump_offset_table(args[optind], FALSE);
 		else	
-        		cmd_usage(args[optind], COMPLETE_HELP);
+        		cmd_usage(args[optind], COMPLETE_HELP|MUST_HELP);
 		optind++;
         } while (args[optind]);
 }
@@ -398,7 +405,7 @@
  *  Format and display the help menu.
  */
 
-static void
+void
 display_help_screen(char *indent)
 {
         int i, j, rows;
@@ -508,16 +515,16 @@
 "   active  perform the command(s) on the active thread on each CPU.\n",
 "  If none of the task-identifying arguments above are entered, the command",
 "  will be performed on all tasks.\n",
-"  command  select one or more of the following commands on the tasks",
+"  command  select one or more of the following commands to be run on the tasks",
 "           selected, or on all tasks:\n",
-"             bt  same as the \"bt\" command  (optional flags: -r -t -l -e -R -f)",
-"             vm  same as the \"vm\" command  (optional flags: -p -v -m -R)",
-"           task  same as the \"task\" command  (optional flag: -R)",
-"          files  same as the \"files\" command  (optional flag: -R)",
-"            net  same as the \"net\" command  (optional flags: -s -S -R)",
-"            set  same as the \"set\" command",
-"            sig  same as the \"sig\" command",
-"           vtop  same as the \"vtop\" command  (optional flags: -c -u -k)\n",
+"             bt  run the \"bt\" command  (optional flags: -r -t -l -e -R -f -o)",
+"             vm  run the \"vm\" command  (optional flags: -p -v -m -R)",
+"           task  run the \"task\" command  (optional flag: -R)",
+"          files  run the \"files\" command  (optional flag: -R)",
+"            net  run the \"net\" command  (optional flags: -s -S -R)",
+"            set  run the \"set\" command",
+"            sig  run the \"sig\" command (optional flag: -g)",
+"           vtop  run the \"vtop\" command  (optional flags: -c -u -k)\n",
 "     flag  Pass this optional flag to the command selected.",
 " argument  Pass this argument to the command selected.",
 " ",
@@ -651,6 +658,10 @@
 "  argument is entered, the current value of the %s variable is shown.  These",
 "  are the %s variables, acceptable arguments, and purpose:\n",
 "          scroll  on | off     controls output scrolling.",
+"          scroll  less         /usr/bin/less as the output scrolling program.",
+"          scroll  more         /bin/more as the output scrolling program.",
+"          scroll  CRASHPAGER   use CRASHPAGER environment variable as the",
+"                               output scrolling program.",
 "           radix  10 | 16      sets output radix to 10 or 16.",
 "         refresh  on | off     controls internal task list refresh.",
 "       print_max  number       set maximum number of array elements to print.",
@@ -665,6 +676,10 @@
 "            edit  vi | emacs   set line editing mode (from .%src file only).",
 "        namelist  filename     name of kernel (from .%src file only).",
 "        dumpfile  filename     name of core dumpfile (from .%src file only).",
+"   zero_excluded  on | off     controls whether excluded pages from a dumpfile",
+"                               should return zero-filled memory.",
+"       null-stop  on | off     if on, gdb's printing of character arrays will",
+"                               stop at the first NULL encountered.", 
 " ",
 "  Internal variables may be set in four manners:\n",
 "    1. entering the set command in $HOME/.%src.",
@@ -694,11 +709,11 @@
 "       STATE: TASK_RUNNING (PANIC)\n",
 "  Turn off output scrolling:\n",
 "    %s> set scroll off",
-"    scroll: off",
+"    scroll: off (/usr/bin/less)",
 " ",
 "  Show the current state of %s internal variables:\n", 
 "    %s> set -v",
-"            scroll: on",
+"            scroll: on (/usr/bin/less)",
 "             radix: 10 (decimal)",
 "           refresh: on",
 "         print_max: 256",
@@ -710,6 +725,8 @@
 "              edit: vi",
 "          namelist: vmlinux",
 "          dumpfile: vmcore",
+"     zero_excluded: off",
+"         null-stop: on",
 " ",
 "  Show the current context:\n",
 "    %s> set",
@@ -724,10 +741,14 @@
 char *help_p[] = {
 "p",
 "print the value of an expression",
-"expression",
+"[-x|-d][-u] expression",
 "  This command passes its arguments on to gdb \"print\" command for evaluation.",
 "",
-"    expression   The expression to be evaluated.\n",
+"    expression   The expression to be evaluated.",
+"            -x  override default output format with hexadecimal format.",
+"            -d  override default output format with decimal format.",
+"            -u  the expression evaluates to a user address reference.", 
+"",
 "  The default output format is decimal, but that can be changed at any time",
 "  with the two built-in aliases \"hex\" and \"dec\".  Alternatively, there",
 "  are two other built-in aliases, \"px\" and \"pd\", which force the command",
@@ -787,7 +808,7 @@
 char *help_ps[] = {
 "ps",
 "display process status information",
-"[-k|-u][-s][-p|-c|-t|-l] [pid | taskp | command] ...",
+"[-k|-u][-s][-p|-c|-t|-l|-a|-g|-r] [pid | taskp | command] ...",
 "  This command displays process status for selected, or all, processes" ,
 "  in the system.  If no arguments are entered, the process data is",
 "  is displayed for all processes.  Selected process identifiers can be",
@@ -822,8 +843,9 @@
 "  On SMP machines, the active task on each CPU will be highlighted by an",
 "  angle bracket (\">\") preceding its information.",
 " ",
-"  Alternatively, information regarding parent-child relationships, or",
-"  per-task time usage data may be displayed:",
+"  Alternatively, information regarding parent-child relationships,",
+"  per-task time usage data, argument/environment data, thread groups,",
+"  or resource limits may be displayed:",
 " ",
 "       -p  display the parental hierarchy of selected, or all, tasks.",  
 "       -c  display the children of selected, or all, tasks.",
@@ -832,6 +854,10 @@
 "       -l  display the task last_run or timestamp value, whichever applies,",
 "           of selected, or all, tasks; the list is sorted with the most",
 "           recently-run task (largest last_run/timestamp) shown first.",
+"       -a  display the command line arguments and environment strings of",
+"           selected, or all, user-mode tasks.",
+"       -g  display tasks by thread group, of selected, or all, tasks.",
+"       -r  display resource limits (rlimits) of selected, or all, tasks.",
 "\nEXAMPLES",
 "  Show the process status of all current tasks:\n",
 "    %s> ps",
@@ -1031,13 +1057,73 @@
 "        381      1   0  c34ddf28  IN   0.2  1316   224  automount",
 "        391      1   1  c2777f28  IN   0.2  1316   224  automount",
 "    ...",
+" ",
+"  Display the argument and environment data for the automount task:\n",
+"    %s> ps -a automount",
+"    PID: 3948   TASK: f722ee30  CPU: 0   COMMAND: \"automount\"",
+"    ARG: /usr/sbin/automount --timeout=60 /net program /etc/auto.net",
+"    ENV: SELINUX_INIT=YES",
+"         CONSOLE=/dev/console",
+"         TERM=linux",
+"         INIT_VERSION=sysvinit-2.85",
+"         PATH=/sbin:/usr/sbin:/bin:/usr/bin",
+"         LC_MESSAGES=en_US",
+"         RUNLEVEL=3",
+"         runlevel=3",
+"         PWD=/",
+"         LANG=ja_JP.UTF-8",
+"         PREVLEVEL=N",
+"         previous=N",
+"         HOME=/",
+"         SHLVL=2",
+"         _=/usr/sbin/automount",
+" ",
+"  Display the tasks in the thread group containing task c20ab0b0:\n",
+"    %s> ps -g c20ab0b0",
+"    PID: 6425   TASK: f72f50b0  CPU: 0   COMMAND: \"firefox-bin\"",
+"      PID: 6516   TASK: f71bf1b0  CPU: 0   COMMAND: \"firefox-bin\"",
+"      PID: 6518   TASK: d394b930  CPU: 0   COMMAND: \"firefox-bin\"",
+"      PID: 6520   TASK: c20aa030  CPU: 0   COMMAND: \"firefox-bin\"",
+"      PID: 6523   TASK: c20ab0b0  CPU: 0   COMMAND: \"firefox-bin\"",
+"      PID: 6614   TASK: f1f181b0  CPU: 0   COMMAND: \"firefox-bin\"",
+" ",   
+"  Display the tasks in the thread group for each instance of the",
+"  program named \"multi-thread\":\n",
+"    %s> ps -g multi-thread",
+"    PID: 2522   TASK: 1003f0dc7f0       CPU: 1   COMMAND: \"multi-thread\"",
+"      PID: 2523   TASK: 10037b13030       CPU: 1   COMMAND: \"multi-thread\"",
+"      PID: 2524   TASK: 1003e064030       CPU: 1   COMMAND: \"multi-thread\"",
+"      PID: 2525   TASK: 1003e13a7f0       CPU: 1   COMMAND: \"multi-thread\"",
+"    ",
+"    PID: 2526   TASK: 1002f82b7f0       CPU: 1   COMMAND: \"multi-thread\"",
+"      PID: 2527   TASK: 1003e1737f0       CPU: 1   COMMAND: \"multi-thread\"",
+"      PID: 2528   TASK: 10035b4b7f0       CPU: 1   COMMAND: \"multi-thread\"",
+"      PID: 2529   TASK: 1003f0c37f0       CPU: 1   COMMAND: \"multi-thread\"",
+"      PID: 2530   TASK: 10035597030       CPU: 1   COMMAND: \"multi-thread\"",
+"      PID: 2531   TASK: 100184be7f0       CPU: 1   COMMAND: \"multi-thread\"",
+" ",
+"  Display the resource limits of \"bash\" task 13896:\n",
+"    %s> ps -r 13896",
+"    PID: 13896  TASK: cf402000  CPU: 0   COMMAND: \"bash\"",
+"       RLIMIT     CURRENT       MAXIMUM",
+"          CPU   (unlimited)   (unlimited)",
+"        FSIZE   (unlimited)   (unlimited)",
+"         DATA   (unlimited)   (unlimited)",
+"        STACK    10485760     (unlimited)",
+"         CORE   (unlimited)   (unlimited)",
+"          RSS   (unlimited)   (unlimited)",
+"        NPROC      4091          4091",
+"       NOFILE      1024          1024",
+"      MEMLOCK      4096          4096",
+"           AS   (unlimited)   (unlimited)",
+"        LOCKS   (unlimited)   (unlimited)",
 NULL               
 };
 
 char *help_rd[] = {
 "rd",
 "read memory",
-"[-dDsup][-8|-16|-32|-64][-o offs][-e addr] [address|symbol] [count]",
+"[-dDsSupxmf][-8|-16|-32|-64][-o offs][-e addr] [address|symbol] [count]",
 "  This command displays the contents of memory, with the output formatted",
 "  in several different manners.  The starting address may be entered either",
 "  symbolically or by address.  The default output size is the size of a long",
@@ -1046,9 +1132,15 @@
 "       -p  address argument is a physical address.",
 "       -u  address argument is a user virtual address; only required on",
 "           processors with common user and kernel virtual address spaces.",
+"       -m  address argument is a xen host machine address.",
+"       -f  address argument is a dumpfile offset.",
 "       -d  display output in signed decimal format (default is hexadecimal).",
 "       -D  display output in unsigned decimal format (default is hexadecimal).",
 "       -s  displays output symbolically when appropriate.",
+"       -S  displays output symbolically when appropriate; if the address",
+"           references a slab cache object, the name of the slab cache will",
+"           be displayed in brackets.",
+"       -x  do not display ASCII translation at end of each line.",
 #ifdef NOTDEF
 "    -o       Shows offset value from the starting address.",
 #endif
@@ -1064,40 +1156,44 @@
 "             3. -u specifies a user virtual address, but is only necessary on",
 "                processors with common user and kernel virtual address spaces.",
 "   symbol  symbol of starting address to read.",
-"    count  number of memory locations to display (default is 1).",
+"    count  number of memory locations to display (default is 1); if entered,",
+"           must be the last argument on the command line.",
 "\nEXAMPLES",
 "  Display the kernel_version string:\n",
 "    %s> rd kernel_version 4 ",
 "    c0226a6c:  2e322e32 35312d35 00000000 00000001   2.2.5-15........\n",
-"  Display the same block of memory, with and without symbols:\n",
-"    %s> rd c1157f00 52   ",
-"    c1157f00:  c0131f7a 00000400 00000015 c013206e   z...........n ..",
-"    c1157f10:  00000100 c3d4c140 00000100 00000246   ....@.......F...",
-"    c1157f20:  019b2065 c2a5bb90 080ac618 c02a83d0   e ............*.",
-"    c1157f30:  40000025 01a45067 c1156000 00000000   %..@gP...`......",
-"    c1157f40:  c011b4f7 c1156000 c2a5bb90 080ac618   .....`..........",
-"    c1157f50:  00000001 00000000 c1a45000 c19b2000   .........P... ..",
-"    c1157f60:  c1157f84 0000003b c022c000 c1156000   ....;.....\"..`..",
-"    c1157f70:  00000000 fffffe00 bffff6fc 0000002e   ................",
-"    c1157f80:  c022c000 ffffffff c01178ba c1156000   ..\"......x...`..",
-"    c1157f90:  00000000 080ac618 bffff6ac 00000001   ................",
-"    c1157fa0:  c1156000 c1156000 c1157fb8 c1156000   .`...`.......`..",
-"    c1157fb0:  c1157fb8 c1156000 c1156000 c115608c   .....`...`...`..",
-"    c1157fc0:  c01096c8 ffffffff bffff6fc 00000002   ................\n",
-"    %s> rd -s c1157f00 52",
-"    c1157f00:  alloc_fd_array+0x1a 00000400 00000015 expand_fd_array+0x72 ",
-"    c1157f10:  00000100 c3d4c140 00000100 00000246 ",
-"    c1157f20:  019b2065 c2a5bb90 080ac618 c02a83d0 ",
-"    c1157f30:  40000025 01a45067 c1156000 00000000 ",
-"    c1157f40:  do_wp_page+0x17f c1156000 c2a5bb90 080ac618 ",
-"    c1157f50:  00000001 00000000 c1a45000 c19b2000 ",
-"    c1157f60:  c1157f84 0000003b init_task_union c1156000 ",
-"    c1157f70:  00000000 fffffe00 bffff6fc 0000002e ",
-"    c1157f80:  init_task_union ffffffff sys_wait4+0x2be c1156000 ",
-"    c1157f90:  00000000 080ac618 bffff6ac 00000001 ",
-"    c1157fa0:  c1156000 c1156000 c1157fb8 c1156000 ",
-"    c1157fb0:  c1157fb8 c1156000 c1156000 c115608c ",
-"    c1157fc0:  system_call+0x34 ffffffff bffff6fc 00000002\n",
+"  Display the same block of memory, first without symbols, again",
+"  with symbols, and then with symbols and slab cache references:\n",
+"    %s> rd dff12e80 36",
+"    dff12e80:  dff12e94 00000000 c05a363a dff12ed0   ........:6Z.....",
+"    dff12e90:  00000001 dff12e98 0041fe3f ffffffff   ........?.A.....",
+"    dff12ea0:  00000001 d5147800 00000000 def8abc0   .....x..........",
+"    dff12eb0:  dff12ebc c05a4aa0 00000000 dff12ed0   .....JZ.........",
+"    dff12ec0:  00000001 00000000 00000000 00000000   ................",
+"    dff12ed0:  0808b353 00000000 dff12efc c0698220   S........... .i.",
+"    dff12ee0:  dff12efc df7c6480 00000001 c046f99b   .....d|.......F.",
+"    dff12ef0:  00000000 00000000 0808b352 dff12f68   ........R...h/..",
+"    dff12f00:  c155a128 00000000 00000001 ffffffff   (.U.............",
+"    %s> rd -s dff12e80 36",
+"    dff12e80:  dff12e94 00000000 sock_aio_write+83 dff12ed0 ",
+"    dff12e90:  00000001 dff12e98 0041fe3f ffffffff ",
+"    dff12ea0:  00000001 d5147800 00000000 def8abc0 ",
+"    dff12eb0:  dff12ebc sys_recvfrom+207 00000000 dff12ed0 ",
+"    dff12ec0:  00000001 00000000 00000000 00000000 ",
+"    dff12ed0:  0808b353 00000000 dff12efc socket_file_ops ",
+"    dff12ee0:  dff12efc df7c6480 00000001 do_sync_write+182 ",
+"    dff12ef0:  00000000 00000000 0808b352 dff12f68 ",
+"    dff12f00:  c155a128 00000000 00000001 ffffffff ",
+"    %s> rd -S dff12e80 36",
+"    dff12e80:  [size-4096] 00000000 sock_aio_write+83 [size-4096] ",
+"    dff12e90:  00000001 [size-4096] 0041fe3f ffffffff ",
+"    dff12ea0:  00000001 [sock_inode_cache] 00000000 [filp]   ",
+"    dff12eb0:  [size-4096] sys_recvfrom+207 00000000 [size-4096] ",
+"    dff12ec0:  00000001 00000000 00000000 00000000 ",
+"    dff12ed0:  0808b353 00000000 [size-4096] socket_file_ops ",
+"    dff12ee0:  [size-4096] [filp]   00000001 do_sync_write+182 ",
+"    dff12ef0:  00000000 00000000 0808b352 [size-4096] ",
+"    dff12f00:  [vm_area_struct] 00000000 00000001 ffffffff\n",
 "  Read jiffies in hexadecimal and decimal format:\n",
 "    %s> rd jiffies",
 "    c0213ae0:  0008cc3a                              :...\n",
@@ -1155,7 +1251,7 @@
 "bt",
 "backtrace",
 #if defined(GDB_6_0) || defined(GDB_6_1)
-"[-a|-r|-t|-l|-e|-E|-f] [-R ref] [ -I ip ] [-S sp] [pid | taskp]",
+"[-a|-r|-t|-T|-l|-e|-E|-f|-o|-O] [-R ref] [ -I ip ] [-S sp] [pid | taskp]",
 #else
 "[-a|-r|-t|-l|-e|-f|-g] [-R ref] [ -I ip ] [-S sp] [pid | taskp]",
 #endif
@@ -1167,14 +1263,26 @@
 "           pages of memory containing the task_union structure.",
 "       -t  display all text symbols found from the last known stack location",
 "           to the top of the stack. (helpful if the back trace fails)",
+"       -T  display all text symbols found from just above the task_struct or",
+"           thread_info to the top of the stack. (helpful if the back trace",
+"           fails or the -t option starts too high in the process stack).",
 "       -l  show file and line number of each stack trace text location.",
 "       -e  search the stack for possible kernel and user mode exception frames.",
-"       -E  search the IRQ stacks (x86, x86_64 and PPC64), and the exception",
+"       -E  search the IRQ stacks (x86, x86_64 and ppc64), and the exception",
 "           stacks (x86_64) for possible exception frames; all other arguments",
 "           will be ignored since this is not a context-sensitive operation.",
 "       -f  display all stack data contained in a frame; this option can be",
-"           used to determine the arguments passed to each function (x86 only);",
-"           on IA64, the argument register contents are dumped.",
+"           used to determine the arguments passed to each function; on ia64,",
+"           the argument register contents are dumped.",
+"       -o  x86: use old backtrace method, permissable only on kernels that were",
+"           compiled without the -fomit-frame_pointer.",
+"           x86_64: use old backtrace method, which dumps potentially stale",
+"           kernel text return addresses found on the stack.",
+"       -O  x86: use old backtrace method by default, permissable only on kernels",
+"           that were compiled without the -fomit-frame_pointer; subsequent usage",
+"           of this option toggles the backtrace method.",
+"           x86_64: use old backtrace method by default; subsequent usage of this",
+"           option toggles the backtrace method.",  
 #if !defined(GDB_6_0) && !defined(GDB_6_1)
 "       -g  use gdb stack trace code. (alpha only)",
 #endif
@@ -1189,11 +1297,8 @@
 "  Note that all examples below are for x86 only.  The output format will differ",
 "  for other architectures.  x86 backtraces from kernels that were compiled",
 "  with the --fomit-frame-pointer CFLAG occasionally will drop stack frames,",
-"  or display a stale frame reference.  x86_64 backtraces are only slightly",
-"  more intelligent than those generated from kernel oops messages; text return",
-"  addresses shown in the back trace may include stale references.  When in",
-"  doubt as to the accuracy of a backtrace, the -t option may help fill in",
-"  the blanks.\n",
+"  or display a stale frame reference.  When in doubt as to the accuracy of a",
+"  backtrace, the -t or -T options may help fill in the blanks.\n",
 "EXAMPLES",
 "  Display the stack trace of the active task(s) when the kernel panicked:\n",
 "    %s> bt -a",
@@ -1437,14 +1542,22 @@
 " ",
 "  Below is an example shared object file consisting of just one command, ",
 "  called \"echo\", which simply echoes back all arguments passed to it.",
-"  Note the comments contained within it for further details.  To build it,",
-"  cut and paste the following output into a file, and call it, for example,",
-"  \"extlib.c\".  Then compile like so:",
+"  Note the comments contained within it for further details.  Cut and paste",
+"  the following output into a file, and call it, for example, \"echo.c\".",
+"  Then compiled in either of two manners.  Either manually like so:",
+" ",
+"  gcc -nostartfiles -shared -rdynamic -o echo.so echo.c -fPIC -D<machine-type> $(TARGET_CFLAGS)",
+" ",
+"  where <machine-type> must be one of the MACHINE_TYPE #define's in defs.h,",
+"  and where $(TARGET_CFLAGS) is the same as it is declared in the top-level",
+"  Makefile after a build is completed.  Or alternatively, the \"echo.c\" file",
+"  can be copied into the \"extensions\" subdirectory, and compiled automatically",
+"  like so:",
 " ",
-"    gcc -nostartfiles -shared -rdynamic -o extlib.so extlib.c",
+"  make extensions",
 " ",
-"  The extlib.so file may be dynamically linked into %s during runtime, or",
-"  during initialization by putting \"extend extlib.so\" into a .%src file",
+"  The echo.so file may be dynamically linked into %s during runtime, or",
+"  during initialization by putting \"extend echo.so\" into a .%src file",
 "  located in the current directory, or in the user's $HOME directory.",
 "  ",
 "---------------------------------- cut here ----------------------------------",
@@ -1556,7 +1669,7 @@
 "        PROCESSOR SPEED: 1993 Mhz",
 "                     HZ: 100",
 "              PAGE SIZE: 4096",
-"          L1 CACHE SIZE: 32",
+// "          L1 CACHE SIZE: 32",
 "    KERNEL VIRTUAL BASE: c0000000",
 "    KERNEL VMALLOC BASE: e0800000",
 "      KERNEL STACK SIZE: 8192",
@@ -1583,7 +1696,8 @@
 "  This command displays the timer queue entries, both old- and new-style,",
 "  in chronological order.  In the case of the old-style timers, the",
 "  timer_table array index is shown; in the case of the new-style timers, ",
-"  the timer_list address is shown.",
+"  the timer_list address is shown.  On later kernels, the timer data is",
+"  per-cpu.",
 "\nEXAMPLES",
 "    %s> timer",
 "    JIFFIES",
@@ -1610,6 +1724,37 @@
 "     372010      c2323f7c      c0112d6c  <process_timeout>",
 "     372138      c2191f10      c0112d6c  <process_timeout>",
 "    8653052      c1f13f10      c0112d6c  <process_timeout>",
+" ",
+"  Display the timer queue on a 2-cpu system:\n",
+"    %s> timer",
+"    TVEC_BASES[0]: c1299be0",
+"     JIFFIES",
+"    18256298",
+"     EXPIRES  TIMER_LIST  FUNCTION",
+"    18256406   cd5ddec0   c01232bb  <process_timeout>",
+"    18256677   ceea93e0   c011e3cc  <it_real_fn>",
+"    18256850   ceea7f64   c01232bb  <process_timeout>",
+"    18258751   cd1d4f64   c01232bb  <process_timeout>",
+"    18258792   cf5782f0   c011e3cc  <it_real_fn>",
+"    18261266   c03c9f80   c022fad5  <rt_check_expire>",
+"    18262196   c02dc2e0   c0233329  <peer_check_expire>",
+"    18270518   ceb8bf1c   c01232bb  <process_timeout>",
+"    18271327   c03c9120   c0222074  <flow_cache_new_hashrnd>",
+"    18271327   c03ca580   c0233ace  <ipfrag_secret_rebuild>",
+"    18272532   c02d1e18   c0129946  <delayed_work_timer_fn>",
+"    18276518   c03c9fc0   c022fd40  <rt_secret_rebuild>",
+"    18332334   ceea9970   c011e3cc  <it_real_fn>",
+"    18332334   cfb6a840   c011e3cc  <it_real_fn>",
+"    18665378   cec25ec0   c01232bb  <process_timeout>",
+"    TVEC_BASES[1]: c12a1be0",
+"     JIFFIES",
+"    18256298",
+"     EXPIRES  TIMER_LIST  FUNCTION",
+"    18256493   c02c7d00   c013dad5  <wb_timer_fn>",
+"    18256499   c12a2db8   c0129946  <delayed_work_timer_fn>",
+"    18277900   ceebaec0   c01232bb  <process_timeout>",
+"    18283769   cf739f64   c01232bb  <process_timeout>",
+"    18331902   cee8af64   c01232bb  <process_timeout>",
 NULL               
 };
 
@@ -1905,7 +2050,7 @@
 char *help_irq[] = {
 "irq",
 "IRQ data",
-"[-d | -b | [index ...]]",
+"[[[index ...] | -u] | -d | -b]",
 "  This command collaborates the data in an irq_desc_t, along with its",
 "  associated hw_interrupt_type and irqaction structure data, into a",
 "  consolidated per-IRQ display.  Alternatively, the intel interrupt",
@@ -1913,6 +2058,7 @@
 "  If no index value argument(s) nor any options are entered, the IRQ",
 "  data for all IRQs will be displayed.\n",
 "    index   a valid IRQ index.",
+"       -u   dump data for in-use IRQs only.",  
 "       -d   dump the intel interrupt descriptor table.",
 "       -b   dump bottom half data.",
 "\nEXAMPLES",
@@ -2013,7 +2159,7 @@
 char *help_sys[] = {
 "sys",
 "system data",
-"[-c [name|number]] ",
+"[-c [name|number]] config",
 "  This command displays system-specific data.  If no arguments are entered,\n"
 "  the same system data shown during %s invocation is shown.\n",
 "    -c [name|number]  If no name or number argument is entered, dump all",
@@ -2023,6 +2169,8 @@
 "                      that number is displayed.  If the current output radix",
 "                      has been set to 16, the system call numbers will be ",
 "                      displayed in hexadecimal.",
+"    config            If the kernel was configured with CONFIG_IKCONFIG, then",
+"                      dump the in-kernel configuration data.",
 "    -panic            Panic a live system.  Requires write permission to", 
 "                      /dev/mem.  Results in the %s context causing an",
 "                      \"Attempted to kill the idle task!\" panic.  (The dump",
@@ -2043,6 +2191,27 @@
 "         VERSION: #24 SMP Mon Oct 11 17:41:40 CDT 1999",
 "         MACHINE: i686  (500 MHz)",
 "          MEMORY: 1 GB",
+"\n  Dump the system configuration data (if CONFIG_IKCONFIG):\n",
+"    %s> sys config",
+"    #",
+"    # Automatically generated make config: don't edit",
+"    # Linux kernel version: 2.6.16",
+"    # Mon Apr 10 07:58:06 2006",
+"    #",
+"    CONFIG_X86_64=y",
+"    CONFIG_64BIT=y",
+"    CONFIG_X86=y",
+"    CONFIG_SEMAPHORE_SLEEPERS=y",
+"    CONFIG_MMU=y",
+"    CONFIG_RWSEM_GENERIC_SPINLOCK=y",
+"    CONFIG_GENERIC_CALIBRATE_DELAY=y",
+"    CONFIG_X86_CMPXCHG=y",
+"    CONFIG_EARLY_PRINTK=y",
+"    CONFIG_GENERIC_ISA_DMA=y",
+"    CONFIG_GENERIC_IOMAP=y",
+"    CONFIG_ARCH_MAY_HAVE_PC_FDC=y",
+"    CONFIG_DMI=y",
+"    ...",
 "\n  Dump the system call table:\n",
 "    %s> sys -c",
 "    NUM  SYSTEM CALL                FILE AND LINE NUMBER",
@@ -2191,13 +2360,18 @@
 char *help_mount[] = {
 "mount",
 "mounted filesystem data",
-"[-f] [-i] [vfsmount | superblock | devname | dirname | inode]",
+"[-f] [-i] [-n pid|task] [vfsmount|superblock|devname|dirname|inode]",
 "  This command displays basic information about the currently-mounted",
 "  filesystems.  The per-filesystem dirty inode list or list of open",
 "  files for the filesystem may also be displayed.\n",
 "     -f  dump dentries and inodes for open files in each filesystem.",
 "     -i  dump all dirty inodes associated with each filesystem.\n",
-"  Filesystems may be selected in the following forms:\n",
+"  For kernels supporting namespaces, the -n option may be used to",
+"  display the mounted filesystems with respect to the namespace of a",
+"  specified task:\n",
+"     -n pid   a process PID.",
+"     -n task  a hexadecimal task_struct pointer.\n",
+"  Specific filesystems may be selected using the following forms:\n",
 "    vfsmount  hexadecimal address of filesystem vfsmount structure.",
 "  superblock  hexadecimal address of filesystem super_block structure.",
 "     devname  device name of filesystem.",
@@ -2721,22 +2895,22 @@
 char *help_sig[] = {
 "sig",
 "task signal handling",
-"[[-l] | [-s sigset]] | [pid | taskp] ...",
+"[[-l] | [-s sigset]] | [-g] [pid | taskp] ...",
 "  This command displays signal-handling data of one or more tasks.  Multiple",
 "  task or PID numbers may be entered; if no arguments are entered, the signal",
 "  handling data of the current context will be displayed.  The default display",
 "  shows:",
 " ",
-"    1.  Whether the task has an unblocked signal pending.",
-"    2.  The contents of the \"signal\" and \"blocked\" sigset_t structures",
-"        from the task_struct, both of which are represented as a 64-bit ",
-"        hexadecimal value.", 
-"    3.  A formatted dump of the \"sig\" signal_struct structure referenced by",
+"    1.  A formatted dump of the \"sig\" signal_struct structure referenced by",
 "        the task_struct.  For each defined signal, it shows the sigaction",
 "        structure address, the signal handler, the signal sigset_t mask ",
 "        (also expressed as a 64-bit hexadecimal value), and the flags.",
-"    4.  For each queued signal, if any, its signal number and associated",
-"        siginfo structure address.",
+"    2.  Whether the task has an unblocked signal pending.",
+"    3.  The contents of the \"blocked\" and \"signal\" sigset_t structures",
+"        from the task_struct/signal_struct, both of which are represented ",
+"        as a 64-bit hexadecimal value.", 
+"    4.  For each queued signal, private and/or shared, if any, its signal",
+"        number and associated siginfo structure address.",
 " ",
 "  The -l option lists the signal numbers and their name(s).  The -s option",
 "  translates a 64-bit hexadecimal value representing the contents of a",
@@ -2744,56 +2918,105 @@
 " ",        
 "        pid  a process PID.",
 "      taskp  a hexadecimal task_struct pointer.",
+"         -g  displays signal information for all threads in a task's ",
+"             thread group.",
 "         -l  displays the defined signal numbers and names.",
 "  -s sigset  translates a 64-bit hexadecimal value representing a sigset_t",
 "             into a list of signal names associated with the bits set.",
 "\nEXAMPLES",
-"  Dump the signal-handling data of PID 614:\n",
-"    %s> sig 614",
-"    PID: 614    TASK: c6f26000  CPU: 1   COMMAND: \"httpd\"",
-"    SIGPENDING: no",
-"        SIGNAL: 0000000000000000",
-"       BLOCKED: 0000000000000000",
-"    SIGNAL_STRUCT: c1913800  COUNT: 1",
+"  Dump the signal-handling data of PID 8970:\n",
+"    %s> sig 8970",
+"    PID: 8970   TASK: f67d8560  CPU: 1   COMMAND: \"procsig\"",
+"    SIGNAL_STRUCT: f6018680  COUNT: 1",
 "     SIG SIGACTION  HANDLER       MASK       FLAGS   ",
-"     [1]  c1913804  8057c98 0000000000000201 0 ",
-"     [2]  c1913818  8057c8c 0000000000000000 0 ",
-"     [3]  c191382c  SIG_DFL 0000000000000000 0 ",
-"     [4]  c1913840  8057bd8 0000000000000000 80000000 (SA_RESETHAND)",
-"     [5]  c1913854  SIG_DFL 0000000000000000 0 ",
-"     [6]  c1913868  8057bd8 0000000000000000 80000000 (SA_RESETHAND)",
-"     [7]  c191387c  8057bd8 0000000000000000 80000000 (SA_RESETHAND)",
-"     [8]  c1913890  SIG_DFL 0000000000000000 0 ",
-"     [9]  c19138a4  SIG_DFL 0000000000000000 0 ",
-"    [10]  c19138b8  8057c98 0000000000000201 0 ",
-"    [11]  c19138cc  8057bd8 0000000000000000 80000000 (SA_RESETHAND)",
-"    [12]  c19138e0  SIG_DFL 0000000000000000 0 ",
-"    [13]  c19138f4  SIG_IGN 0000000000000000 0 ",
-"    [14]  c1913908  SIG_DFL 0000000000000000 0 ",
-"    [15]  c191391c  8057c8c 0000000000000000 0 ",
-"    [16]  c1913930  SIG_DFL 0000000000000000 0 ",
-"    [17]  c1913944  SIG_DFL 0000000000000000 0 ",
-"    [18]  c1913958  SIG_DFL 0000000000000000 0 ",
-"    [19]  c191396c  SIG_DFL 0000000000000000 0 ",
-"    [20]  c1913980  SIG_DFL 0000000000000000 0 ",
-"    [21]  c1913994  SIG_DFL 0000000000000000 0 ",
-"    [22]  c19139a8  SIG_DFL 0000000000000000 0 ",
-"    [23]  c19139bc  SIG_DFL 0000000000000000 0 ",
-"    [24]  c19139d0  SIG_DFL 0000000000000000 0 ",
-"    [25]  c19139e4  SIG_DFL 0000000000000000 0 ",
-"    [26]  c19139f8  SIG_DFL 0000000000000000 0 ",
-"    [27]  c1913a0c  SIG_DFL 0000000000000000 0 ",
-"    [28]  c1913a20  SIG_DFL 0000000000000000 0 ",
-"    [29]  c1913a34  SIG_DFL 0000000000000000 0 ",
-"    [30]  c1913a48  SIG_DFL 0000000000000000 0 ",
-"    [31]  c1913a5c  SIG_DFL 0000000000000000 0 ",
-"    SIGQUEUE: (empty)",
+"     [1]  f7877684  SIG_DFL 0000000000000000 0 ",
+"     [2]  f7877698  SIG_DFL 0000000000000000 0 ",
+"    ...",
+"     [8]  f7877710  SIG_DFL 0000000000000000 0 ",
+"     [9]  f7877724  SIG_DFL 0000000000000000 0 ",
+"    [10]  f7877738  804867a 0000000000000000 80000000 (SA_RESETHAND)",
+"    [11]  f787774c  SIG_DFL 0000000000000000 0 ",
+"    [12]  f7877760  804867f 0000000000000000 10000004 (SA_SIGINFO|SA_RESTART)",
+"    [13]  f7877774  SIG_DFL 0000000000000000 0 ",
+"    ...",
+"    [31]  f78778dc  SIG_DFL 0000000000000000 0 ",
+"    [32]  f78778f0  SIG_DFL 0000000000000000 0 ",
+"    [33]  f7877904  SIG_DFL 0000000000000000 0 ",
+"    [34]  f7877918  804867f 0000000000000000 10000004 (SA_SIGINFO|SA_RESTART)",
+"    [35]  f787792c  SIG_DFL 0000000000000000 0 ",
+"    [36]  f7877940  SIG_DFL 0000000000000000 0 ",
+"    ...",
+"    [58]  f7877af8  SIG_DFL 0000000000000000 0 ",
+"    [59]  f7877b0c  SIG_DFL 0000000000000000 0 ",
+"    [60]  f7877b20  SIG_DFL 0000000000000000 0 ",
+"    [61]  f7877b34  SIG_DFL 0000000000000000 0 ",
+"    [62]  f7877b48  SIG_DFL 0000000000000000 0 ",
+"    [63]  f7877b5c  SIG_DFL 0000000000000000 0 ",
+"    [64]  f7877b70  804867f 0000000000000000 10000004 (SA_SIGINFO|SA_RESTART)",
+"   SIGPENDING: no",
+"      BLOCKED: 8000000200000800",
+"   PRIVATE_PENDING",
+"       SIGNAL: 0000000200000800",
+"     SIGQUEUE:  SIG  SIGINFO ",
+"                 12  f51b9c84",
+"                 34  f51b9594",
+"   SHARED_PENDING",
+"       SIGNAL: 8000000000000800",
+"     SIGQUEUE:  SIG  SIGINFO ",
+"                 12  f51b9188",
+"                 64  f51b9d18",
+"                 64  f51b9500",
+"    ",
+"  Dump the signal-handling data for all tasks in the thread group containing",
+"  PID 2578:\n",
+"    %s> sig -g 2578",
+"    PID: 2387   TASK: f617d020  CPU: 0   COMMAND: \"slapd\"",
+"    SIGNAL_STRUCT: f7dede00  COUNT: 6",
+"    SIG SIGACTION  HANDLER       MASK       FLAGS",
+"    [1]  c1f60c04   a258a7 0000000000000000 10000000 (SA_RESTART)",
+"    [2]  c1f60c18   a258a7 0000000000000000 10000000 (SA_RESTART)",
+"    [3]  c1f60c2c  SIG_DFL 0000000000000000 0",
+"    [4]  c1f60c40  SIG_DFL 0000000000000000 0",
+"    [5]  c1f60c54   a258a7 0000000000000000 10000000 (SA_RESTART)",
+"    [6]  c1f60c68  SIG_DFL 0000000000000000 0",
+"    [7]  c1f60c7c  SIG_DFL 0000000000000000 0",
+"    [8]  c1f60c90  SIG_DFL 0000000000000000 0",
+"    [9]  c1f60ca4  SIG_DFL 0000000000000000 0",
+"   [10]  c1f60cb8   a25911 0000000000000000 10000000 (SA_RESTART)",
+"   ...",
+"   [64]  c1f610f0  SIG_DFL 0000000000000000 0",
+"   SHARED_PENDING",
+"       SIGNAL: 0000000000000000",
+"     SIGQUEUE: (empty)",
+"     ",
+"     PID: 2387   TASK: f617d020  CPU: 0   COMMAND: \"slapd\"",
+"     SIGPENDING: no",
+"        BLOCKED: 0000000000000000",
+"     PRIVATE_PENDING",
+"         SIGNAL: 0000000000000000",
+"       SIGQUEUE: (empty)",
+"    ",
+"     PID: 2392   TASK: f6175aa0  CPU: 0   COMMAND: \"slapd\"",
+"     SIGPENDING: no",
+"        BLOCKED: 0000000000000000",
+"     PRIVATE_PENDING",
+"         SIGNAL: 0000000000000000",
+"       SIGQUEUE: (empty)",
+"    ",
+"     PID: 2523   TASK: f7cd4aa0  CPU: 1   COMMAND: \"slapd\"",
+"     SIGPENDING: no",
+"        BLOCKED: 0000000000000000",
+"     PRIVATE_PENDING",
+"         SIGNAL: 0000000000000000",
+"       SIGQUEUE: (empty)",
+"    ",
+"     ...",
 "    ",
 "  Translate the sigset_t mask value, cut-and-pasted from the signal handling",
 "  data from signals 1 and 10 above:",
 " ",
-"    %s> sig -s 0000000000000201",
-"    SIGHUP SIGUSR1",
+"    %s> sig -s 800A000000000201",
+"    SIGHUP SIGUSR1 SIGRTMAX-14 SIGRTMAX-12 SIGRTMAX",
 " ",
 "  List the signal numbers and their names:",
 " ",
@@ -2829,6 +3052,40 @@
 "    [29] SIGIO/SIGPOLL",
 "    [30] SIGPWR",
 "    [31] SIGSYS",
+"    [32] SIGRTMIN",
+"    [33] SIGRTMIN+1",
+"    [34] SIGRTMIN+2",
+"    [35] SIGRTMIN+3",
+"    [36] SIGRTMIN+4",
+"    [37] SIGRTMIN+5",
+"    [38] SIGRTMIN+6",
+"    [39] SIGRTMIN+7",
+"    [40] SIGRTMIN+8",
+"    [41] SIGRTMIN+9",
+"    [42] SIGRTMIN+10",
+"    [43] SIGRTMIN+11",
+"    [44] SIGRTMIN+12",
+"    [45] SIGRTMIN+13",
+"    [46] SIGRTMIN+14",
+"    [47] SIGRTMIN+15",
+"    [48] SIGRTMIN+16",
+"    [49] SIGRTMAX-15",
+"    [50] SIGRTMAX-14",
+"    [51] SIGRTMAX-13",
+"    [52] SIGRTMAX-12",
+"    [53] SIGRTMAX-11",
+"    [54] SIGRTMAX-10",
+"    [55] SIGRTMAX-9",
+"    [56] SIGRTMAX-8",
+"    [57] SIGRTMAX-7",
+"    [58] SIGRTMAX-6",
+"    [59] SIGRTMAX-5",
+"    [60] SIGRTMAX-4",
+"    [61] SIGRTMAX-3",
+"    [62] SIGRTMAX-2",
+"    [63] SIGRTMAX-1",
+"    [64] SIGRTMAX",
+
 
 NULL               
 };
@@ -2836,8 +3093,8 @@
 char *help_struct[] = {
 "struct",
 "structure contents",
-"struct_name[.member] [[-o][-l offset][-r] [address | symbol] [count]]\n"
-"                              [-c count]",
+"struct_name[.member[,member]][-o][-l offset][-rfu] [address | symbol]\n"
+"                                       [count | -c count]",
 "  This command displays either a structure definition, or a formatted display",
 "  of the contents of a structure at a specified address.  When no address is",
 "  specified, the structure definition is shown along with the structure size.",
@@ -2845,7 +3102,8 @@
 "  the scope of the data displayed to that particular member; when no address",
 "  is specified, the member's offset and definition are shown.\n",
 "    struct_name  name of a C-code structure used by the kernel.",
-"        .member  name of a structure member.",
+"        .member  name of a structure member; to display multiple members of a",
+"                 structure, use a comma-separated list of members.",
 "             -o  show member offsets when displaying structure definitions.",
 "      -l offset  if the address argument is a pointer to a list_head structure",
 "                 that is embedded in the target data structure, the offset",
@@ -2854,6 +3112,9 @@
 "                   1. in \"structure.member\" format.",
 "                   2. a number of bytes. ",
 "             -r  raw dump of structure data.",
+"             -f  address argument is a dumpfile offset.",
+"             -u  address argument is a user virtual address in the current",
+"                 context.",
 "        address  hexadecimal address of a structure; if the address points",  
 "                 to an embedded list_head structure contained within the",
 "                 target data structure, then the \"-l\" option must be used.",
@@ -2944,6 +3205,21 @@
 "    struct mm_struct {",
 "       [12] pgd_t *pgd;",
 "    }\n",
+"  Display the flags and virtual members of 4 contigous page structures",
+"  in the mem_map page structure array:\n",
+"    %s> page.flags,virtual c101196c 4",
+"      flags = 0x8000,",
+"      virtual = 0xc04b0000",
+"    ",
+"      flags = 0x8000,",
+"      virtual = 0xc04b1000",
+"    ",
+"      flags = 0x8000,",
+"      virtual = 0xc04b2000",
+"    ",
+"      flags = 0x8000,",
+"      virtual = 0xc04b3000",
+" ",
 "  Display the array of tcp_sl_timer structures declared by tcp_slt_array[]:\n",
 "    %s> struct tcp_sl_timer tcp_slt_array 4",
 "    struct tcp_sl_timer {",
@@ -3052,8 +3328,8 @@
 char *help_union[] = {
 "union",
 "union contents",
-"union_name[.member] [[-o][-l offset][-r] [address | symbol] [count]]\n"
-"                            [-c count]",
+"union_name[.member[,member]] [-o][-l offset][-rfu] [address | symbol]\n"
+"                                     [count | -c count]",
 "  This command displays either a union definition, or a formatted display",
 "  of the contents of a union at a specified address.  When no address is",
 "  specified, the union definition is shown along with the union size.",
@@ -3061,7 +3337,8 @@
 "  the scope of the data displayed to that particular member; when no address",
 "  is specified, the member's offset (always 0) and definition are shown.\n",
 "     union_name  name of a C-code union used by the kernel.",
-"        .member  name of a union member.",  
+"        .member  name of a union member; to display multiple members of a",
+"                 union, use a comma-separated list of members.",
 "             -o  show member offsets when displaying union definitions.",
 "                 (always 0)",
 "      -l offset  if the address argument is a pointer to a list_head structure",
@@ -3071,6 +3348,9 @@
 "                   1. in \"structure.member\" format.",
 "                   2. a number of bytes. ",
 "             -r  raw dump of union data.",
+"             -f  address argument is a dumpfile offset.",
+"             -u  address argument is a user virtual address in the current",
+"                 context.",
 "        address  hexadecimal address of a union; if the address points",
 "                 to an embedded list_head structure contained within the",
 "                 target union structure, then the \"-l\" option must be used.",
@@ -3152,7 +3432,7 @@
 char *help_mod[] = {
 "mod",
 "module information and loading of symbols and debugging data",
-"[ -s module [objfile] | -d module | -S [directory] | -D | -r ] ",
+"[ -s module [objfile] | -d module | -S [directory] | -D | -r | -o ] ",
 "  With no arguments, this command displays basic information of the currently",
 "  installed modules, consisting of the module address, name, size, the",
 "  object file name (if known), and whether the module was compiled with",
@@ -3203,6 +3483,7 @@
 "                   -r  Reinitialize module data. All currently-loaded symbolic",
 "                       and debugging data will be deleted, and the installed",
 "                       module list will be updated (live system only).",
+"                   -o  Load module symbols with old mechanism.",
 " ",
 "  After symbolic and debugging data have been loaded, backtraces and text",
 "  disassembly will be displayed appropriately.  Depending upon the processor",
@@ -3322,9 +3603,10 @@
 char *help__list[] = {
 "list",
 "linked list",
-"[[-o] offset] [-e end] [-s struct[.member]] [-H] start",
+"[[-o] offset] [-e end] [-s struct[.member[,member]]] [-H] start",
 "  This command dumps the contents of a linked list.  The entries in a linked",
-"  are typically data structures that are tied together in one of two formats:",
+"  list are typically data structures that are tied together in one of two",
+"  formats:",
 " ",
 "  1. A starting address points to a data structure; that structure contains",
 "     a member that is a pointer to the next structure, and so on.  The list",
@@ -3335,7 +3617,7 @@
 "       c. a pointer to the first item pointed to by the start address.",
 "       d. a pointer to its containing structure.",
 "  ",
-"  2. Many Linux lists are linked via embedded list_head structures contained ",
+"  2. Most Linux lists are linked via embedded list_head structures contained ",
 "     within the data structures in the list.  The linked list is headed by an",
 "     external LIST_HEAD, which is simply a list_head structure initialized to",
 "     point to itself, signifying that the list is empty:",
@@ -3370,15 +3652,17 @@
 "               entered.",
 "    -s struct  For each address in list, format and print as this type of",
 "               structure; use the \"struct.member\" format in order to display", 
-"               a particular member of the structure.",
+"               a particular member of the structure.  To display multiple",
+"               members of a structure, use a comma-separated list of members.",
 " ",
 "  The meaning of the \"start\" argument, which can be expressed either",
 "  symbolically or in hexadecimal format, depends upon whether the -H option",
 "  is pre-pended or not:",
 " ",
 "      start  The address of the first structure in the list.",
-"   -H start  The address of the LIST_HEAD structure, typically expressed",
-"             symbolically.",
+"   -H start  The address of the list_head structure, typically expressed",
+"             symbolically, but also can be an expression evaluating to the",
+"             address of the starting list_head structure.",
 "\nEXAMPLES",
 "  Note that each task_struct is linked to its parent's task_struct via the",
 "  p_pptr member:",
@@ -3416,31 +3700,66 @@
 "  The list of currently-registered file system types are headed up by a",
 "  struct file_system_type pointer named \"file_systems\", and linked by",
 "  the \"next\" field in each file_system_type structure.  The following",
-"  sequence displays the address and name of each registered file system type:",
+"  sequence displays the structure address followed by the name and ",
+"  fs_flags members of each registered file system type:",
 " ",
 "    %s> p file_systems",
-"    file_systems = $2 = (struct file_system_type *) 0xc02ebea0",
-"    %s> list file_system_type.next -s file_system_type.name 0xc02ebea0",
-"    c02ebea0",
-"      name = 0xc0280372 \"proc\", ",
-"    c02fd4a0",
-"      name = 0xc02bf348 \"sockfs\", ",
-"    c02eb544",
-"      name = 0xc027c25a \"tmpfs\", ",
-"    c02eb52c",
-"      name = 0xc027c256 \"shm\", ",
-"    c02ebbe0",
-"      name = 0xc027e054 \"pipefs\", ",
-"    c02ec9c0",
-"      name = 0xc0283c13 \"ext2\", ",
-"    c02ecaa8",
-"      name = 0xc0284567 \"iso9660\", ",
-"    c02ecc08",
-"      name = 0xc0284cf5 \"nfs\", ",
-"    c02edc60",
-"      name = 0xc028d832 \"autofs\", ",
-"    c02edfa0",
-"      name = 0xc028e1e0 \"devpts\"",
+"    file_systems = $1 = (struct file_system_type *) 0xc03adc90",
+"    %s> list file_system_type.next -s file_system_type.name,fs_flags 0xc03adc90",
+"    c03adc90",
+"      name = 0xc02c05c8 \"rootfs\",",
+"      fs_flags = 0x30,",
+"    c03abf94",
+"      name = 0xc02c0319 \"bdev\",",
+"      fs_flags = 0x10,",
+"    c03acb40",
+"      name = 0xc02c07c4 \"proc\",",
+"      fs_flags = 0x8,",
+"    c03e9834",
+"      name = 0xc02cfc83 \"sockfs\",",
+"      fs_flags = 0x10,",
+"    c03ab8e4",
+"      name = 0xc02bf512 \"tmpfs\",",
+"      fs_flags = 0x20,",
+"    c03ab8c8",
+"      name = 0xc02c3d6b \"shm\",",
+"      fs_flags = 0x20,",
+"    c03ac394",
+"      name = 0xc02c03cf \"pipefs\",",
+"      fs_flags = 0x10,",
+"    c03ada74",
+"      name = 0xc02c0e6b \"ext2\",",
+"      fs_flags = 0x1,",
+"    c03adc74",
+"      name = 0xc02c0e70 \"ramfs\",",
+"      fs_flags = 0x20,",
+"    c03ade74",
+"      name = 0xc02c0e76 \"hugetlbfs\",",
+"      fs_flags = 0x20,",
+"    c03adf8c",
+"      name = 0xc02c0f84 \"iso9660\",",
+"      fs_flags = 0x1,",
+"    c03aec14",
+"      name = 0xc02c0ffd \"devpts\",",
+"      fs_flags = 0x8,",
+"    c03e93f4",
+"      name = 0xc02cf1b9 \"pcihpfs\",",
+"      fs_flags = 0x28,",
+"    e0831a14",
+"      name = 0xe082f89f \"ext3\",",
+"      fs_flags = 0x1,",
+"    e0846af4",
+"      name = 0xe0841ac6 \"usbdevfs\",",
+"      fs_flags = 0x8,",
+"    e0846b10",
+"      name = 0xe0841acf \"usbfs\",",
+"      fs_flags = 0x8,",
+"    e0992370",
+"      name = 0xe099176c \"autofs\",",
+"      fs_flags = 0x0,",
+"    e2dcc030",
+"      name = 0xe2dc8849 \"nfs\",",
+"      fs_flags = 0x48000,",
 " ",
 "  In some kernels, the system run queue is a linked list headed up by the",
 "  \"runqueue_head\", which is defined like so:",
@@ -3555,7 +3874,7 @@
 char *help_kmem[] = {
 "kmem",
 "kernel memory",
-"[-f|-F|-p|-c|-C|-i|-s|-S|-v|-n] [-[l|L][a|i]] [slab-name] [[-P] address]",
+"[-f|-F|-p|-c|-C|-i|-s|-S|-v|-V|-n|-z] [-[l|L][a|i]] [slab] [[-P] address]",
 "  This command displays information about the use of kernel memory.\n",
 "        -f  displays the contents of the system free memory headers.",
 "            also verifies that the page count equals nr_free_pages.",
@@ -3567,23 +3886,33 @@
 "        -i  displays general memory usage information",
 "        -s  displays basic kmalloc() slab data.",
 "        -S  displays all kmalloc() slab data, including all slab objects,",
-"            and whether each object is in use or is free.",
+"            and whether each object is in use or is free.  If CONFIG_SLUB,",
+"            slab data for each per-cpu slab is displayed, along with the",
+"            address of each kmem_cache_node, its count of full and partial",
+"            slabs, and a list of all tracked slabs.",
 "        -v  displays the vmlist entries.",
+"        -V  displays the kernel vm_stat table if it exists, the cumulative",
+"            page_states counter values if they exist, and/or the cumulative",
+"            vm_event_states counter values if they exist.",
 "        -n  display memory node data (if supported).",
+"        -z  displays per-zone memory statistics.",
 "       -la  walks through the active_list and verifies nr_active_pages.",
 "       -li  walks through the inactive_list and verifies nr_inactive_pages.",
 "       -La  same as -la, but also dumps each page in the active_list.",
 "       -Li  same as -li, but also dumps each page in the inactive_list.",
-" slab-name  when used with -s or -S, limits the command to only the slab cache",
-"            of name \"slab-name\".  If the slab-name argument is \"list\", then",
+"      slab  when used with -s or -S, limits the command to only the slab cache",
+"            of name \"slab\".  If the slab argument is \"list\", then",
 "            all slab cache names and addresses are listed.",
 "        -P  declares that the following address argument is a physical address.",
 "   address  when used without any flag, the address can be a kernel virtual,",
 "            or physical address; a search is made through the symbol table,",
 "            the kmalloc() slab subsystem, the free list, the page_hash_table,",
-"            the vmalloc() vmlist subsystem, and the mem_map array. If found",
-"            in any of those areas, the information will be dumped in the",
-"            same manner as if the flags were used.",
+"            the vmalloc() vmlist subsystem, the current set of task_structs",
+"            and kernel stacks, and the mem_map array.  If found in any of",
+"            those areas, the information will be dumped in the same manner as",
+"            if the location-specific flags were used; if contained within a",
+"            curent task_struct or kernel stack, that task's context will be",
+"            displayed.",
 "   address  when used with -s or -S, searches the kmalloc() slab subsystem",
 "            for the slab containing of this virtual address, showing whether",
 "            it is in use or free.",
@@ -3781,6 +4110,24 @@
 "     c2f8ab60   c8095000 - c8097000    8192",
 "     c2f519e0   c8097000 - c8099000    8192",
 " ",
+"  Dump the vm_table contents:\n",
+"    %s> kmem -V",
+"           NR_ANON_PAGES: 38989",
+"          NR_FILE_MAPPED: 3106",
+"           NR_FILE_PAGES: 169570",
+"                 NR_SLAB: 32439",
+"            NR_PAGETABLE: 1181",
+"           NR_FILE_DIRTY: 4633",
+"            NR_WRITEBACK: 0",
+"         NR_UNSTABLE_NFS: 0",
+"               NR_BOUNCE: 0",
+"                NUMA_HIT: 63545992",
+"               NUMA_MISS: 0",
+"            NUMA_FOREIGN: 0",
+"     NUMA_INTERLEAVE_HIT: 24002",
+"              NUMA_LOCAL: 63545992",
+"              NUMA_OTHER: 0",
+" ",
 "  Determine (and verify) the page cache size:\n",
 "    %s> kmem -c",
 "    page_cache_size: 18431 (verified)",
@@ -3979,18 +4326,21 @@
 char *help_dis[] = {
 "dis",
 "disassemble",
-"[-r][-l][-u] [address | symbol | (expression)] [count]",
+"[-r][-l][-u][-b [num]] [address | symbol | (expression)] [count]",
 "  This command disassembles source code instructions starting (or ending) at",
 "  a text address that may be expressed by value, symbol or expression:\n",
 "            -r  (reverse) displays all instructions from the start of the ",
 "                routine up to and including the designated address.",
 "            -l  displays source code line number data in addition to the ",
 "                disassembly output.", 
-"            -u  address is a user virtual address; otherwise the address is ",
-"                assumed to be a kernel virtual address.  If this option is",
-"                used, then -r and -l are ignored.",
+"            -u  address is a user virtual address in the current context;",
+"                otherwise the address is assumed to be a kernel virtual address.",
+"                If this option is used, then -r and -l are ignored.",
+"      -b [num]  modify the pre-calculated number of encoded bytes to skip after",
+"                a kernel BUG (\"ud2a\") instruction; with no argument, displays",  
+"                the current number of bytes being skipped. (x86 and x86_64 only)",
 "       address  starting hexadecimal text address.",
-"        symbol  symbol of starting text address.  On PPC64, the symbol",
+"        symbol  symbol of starting text address.  On ppc64, the symbol",
 "                preceded by '.' is used.",
 "  (expression)  expression evaluating to a starting text address.",
 "         count  the number of instructions to be disassembled (default is 1).",
@@ -4419,10 +4769,11 @@
 "  Display various network related data:\n",
 "      -a  display the ARP cache.",
 "      -s  display open network socket/sock addresses, their family and type,",
-"          and their source and destination addresses and ports.",
+"          and for INET and INET6 families, their source and destination",
+"          addresses and ports.",
 "      -S  displays open network socket/sock addresses followed by a dump",
 "          of both structures.",
-"  -n addr translates an IP address expressed as a decimal or hexadecimal ",
+"  -n addr translates an IPv4 address expressed as a decimal or hexadecimal",
 "          value into a standard numbers-and-dots notation.",
 "  -R ref  socket or sock address, or file descriptor.",
 "     pid  a process PID.",
@@ -4450,8 +4801,8 @@
 "  Display the sockets for PID 2517, using both -s and -S output formats:\n",
 "    %s> net -s 2517",
 "    PID: 2517   TASK: c1598000  CPU: 1   COMMAND: \"rlogin\"",
-"    FD   SOCKET     SOCK    FAMILY:TYPE         SOURCE:PORT     DESTINATION:PORT",
-"     3  c57375dc  c1ff1850  INET:STREAM      10.1.8.20:1023      10.1.16.62:513",
+"    FD   SOCKET     SOCK    FAMILY:TYPE         SOURCE-PORT     DESTINATION-PORT",
+"     3  c57375dc  c1ff1850  INET:STREAM      10.1.8.20-1023      10.1.16.62-513",
 "    ",
 "    %s> net -S 2517",
 "    PID: 2517   TASK: c1598000  CPU: 1   COMMAND: \"rlogin\"",
@@ -4497,52 +4848,52 @@
 "  From \"foreach\", find all tasks with references to socket c08ea3cc:\n",
 "    %s> foreach net -s -R c08ea3cc",
 "    PID: 2184   TASK: c7026000  CPU: 1   COMMAND: \"klines.kss\"",
-"    FD   SOCKET     SOCK    FAMILY:TYPE         SOURCE:PORT     DESTINATION:PORT",
-"     5  c08ea3cc  c50d3c80  INET:STREAM        0.0.0.0:1026         0.0.0.0:0",
+"    FD   SOCKET     SOCK    FAMILY:TYPE         SOURCE-PORT     DESTINATION-PORT",
+"     5  c08ea3cc  c50d3c80  INET:STREAM        0.0.0.0-1026         0.0.0.0-0",
 "    ",
 "    PID: 2200   TASK: c670a000  CPU: 1   COMMAND: \"kpanel\"",
-"    FD   SOCKET     SOCK    FAMILY:TYPE         SOURCE:PORT     DESTINATION:PORT",
-"     5  c08ea3cc  c50d3c80  INET:STREAM        0.0.0.0:1026         0.0.0.0:0",
+"    FD   SOCKET     SOCK    FAMILY:TYPE         SOURCE-PORT     DESTINATION-PORT",
+"     5  c08ea3cc  c50d3c80  INET:STREAM        0.0.0.0-1026         0.0.0.0-0",
 "    ",
 "    PID: 2201   TASK: c648a000  CPU: 1   COMMAND: \"kbgndwm\"",
-"    FD   SOCKET     SOCK    FAMILY:TYPE         SOURCE:PORT     DESTINATION:PORT",
-"     5  c08ea3cc  c50d3c80  INET:STREAM        0.0.0.0:1026         0.0.0.0:0",
+"    FD   SOCKET     SOCK    FAMILY:TYPE         SOURCE-PORT     DESTINATION-PORT",
+"     5  c08ea3cc  c50d3c80  INET:STREAM        0.0.0.0-1026         0.0.0.0-0",
 "    ",
 "    PID: 19294  TASK: c250a000  CPU: 0   COMMAND: \"prefdm\"",
-"    FD   SOCKET     SOCK    FAMILY:TYPE         SOURCE:PORT     DESTINATION:PORT",
-"     5  c08ea3cc  c50d3c80  INET:STREAM        0.0.0.0:1026         0.0.0.0:0",
+"    FD   SOCKET     SOCK    FAMILY:TYPE         SOURCE-PORT     DESTINATION-PORT",
+"     5  c08ea3cc  c50d3c80  INET:STREAM        0.0.0.0-1026         0.0.0.0-0",
 "    ",
 "    PID: 2194   TASK: c62dc000  CPU: 1   COMMAND: \"kaudioserver\"",
-"    FD   SOCKET     SOCK    FAMILY:TYPE         SOURCE:PORT     DESTINATION:PORT",
-"     5  c08ea3cc  c50d3c80  INET:STREAM        0.0.0.0:1026         0.0.0.0:0",
+"    FD   SOCKET     SOCK    FAMILY:TYPE         SOURCE-PORT     DESTINATION-PORT",
+"     5  c08ea3cc  c50d3c80  INET:STREAM        0.0.0.0-1026         0.0.0.0-0",
 "    ",
 "    PID: 2195   TASK: c6684000  CPU: 1   COMMAND: \"maudio\"",
-"    FD   SOCKET     SOCK    FAMILY:TYPE         SOURCE:PORT     DESTINATION:PORT",
-"     5  c08ea3cc  c50d3c80  INET:STREAM        0.0.0.0:1026         0.0.0.0:0",
+"    FD   SOCKET     SOCK    FAMILY:TYPE         SOURCE-PORT     DESTINATION-PORT",
+"     5  c08ea3cc  c50d3c80  INET:STREAM        0.0.0.0-1026         0.0.0.0-0",
 "    ",
 "    PID: 2196   TASK: c6b58000  CPU: 1   COMMAND: \"kwmsound\"",
-"    FD   SOCKET     SOCK    FAMILY:TYPE         SOURCE:PORT     DESTINATION:PORT",
-"     5  c08ea3cc  c50d3c80  INET:STREAM        0.0.0.0:1026         0.0.0.0:0",
+"    FD   SOCKET     SOCK    FAMILY:TYPE         SOURCE-PORT     DESTINATION-PORT",
+"     5  c08ea3cc  c50d3c80  INET:STREAM        0.0.0.0-1026         0.0.0.0-0",
 "    ",
 "    PID: 2197   TASK: c6696000  CPU: 0   COMMAND: \"kfm\"",
-"    FD   SOCKET     SOCK    FAMILY:TYPE         SOURCE:PORT     DESTINATION:PORT",
-"     5  c08ea3cc  c50d3c80  INET:STREAM        0.0.0.0:1026         0.0.0.0:0",
+"    FD   SOCKET     SOCK    FAMILY:TYPE         SOURCE-PORT     DESTINATION-PORT",
+"     5  c08ea3cc  c50d3c80  INET:STREAM        0.0.0.0-1026         0.0.0.0-0",
 "    ",
 "    PID: 2199   TASK: c65ec000  CPU: 0   COMMAND: \"krootwm\"",
-"    FD   SOCKET     SOCK    FAMILY:TYPE         SOURCE:PORT     DESTINATION:PORT",
-"     5  c08ea3cc  c50d3c80  INET:STREAM        0.0.0.0:1026         0.0.0.0:0",
+"    FD   SOCKET     SOCK    FAMILY:TYPE         SOURCE-PORT     DESTINATION-PORT",
+"     5  c08ea3cc  c50d3c80  INET:STREAM        0.0.0.0-1026         0.0.0.0-0",
 "    ",
 "    PID: 694    TASK: c1942000  CPU: 0   COMMAND: \"prefdm\"",
-"    FD   SOCKET     SOCK    FAMILY:TYPE         SOURCE:PORT     DESTINATION:PORT",
-"     5  c08ea3cc  c50d3c80  INET:STREAM        0.0.0.0:1026         0.0.0.0:0",
+"    FD   SOCKET     SOCK    FAMILY:TYPE         SOURCE-PORT     DESTINATION-PORT",
+"     5  c08ea3cc  c50d3c80  INET:STREAM        0.0.0.0-1026         0.0.0.0-0",
 "    ",
 "    PID: 698    TASK: c6a2c000  CPU: 1   COMMAND: \"X\"",
-"    FD   SOCKET     SOCK    FAMILY:TYPE         SOURCE:PORT     DESTINATION:PORT",
-"     5  c08ea3cc  c50d3c80  INET:STREAM        0.0.0.0:1026         0.0.0.0:0",
+"    FD   SOCKET     SOCK    FAMILY:TYPE         SOURCE-PORT     DESTINATION-PORT",
+"     5  c08ea3cc  c50d3c80  INET:STREAM        0.0.0.0-1026         0.0.0.0-0",
 "    ",
 "    PID: 2159   TASK: c4a5a000  CPU: 1   COMMAND: \"kwm\"",
-"    FD   SOCKET     SOCK    FAMILY:TYPE         SOURCE:PORT     DESTINATION:PORT",
-"     5  c08ea3cc  c50d3c80  INET:STREAM        0.0.0.0:1026         0.0.0.0:0",
+"    FD   SOCKET     SOCK    FAMILY:TYPE         SOURCE-PORT     DESTINATION-PORT",
+"     5  c08ea3cc  c50d3c80  INET:STREAM        0.0.0.0-1026         0.0.0.0-0",
 "    ",
 NULL               
 };
@@ -4584,21 +4935,22 @@
 void
 cmd_usage(char *cmd, int helpflag)
 {
-	int i;
-        int found;
-	char **p;
+	char **p, *scroll_command;
 	struct command_table_entry *cp;
 	char buf[BUFSIZE];
-	struct alias_data *ad;
-	FILE *less;
+	FILE *scroll;
+	int i;
 
-	if (helpflag & PIPE_TO_LESS) {
-	        if ((less = popen("/usr/bin/less", "w")) != NULL)
-			fp = less;
-		helpflag &= ~PIPE_TO_LESS;
-	} else
-		less = NULL;
-		
+	if (helpflag & PIPE_TO_SCROLL) {
+		if ((scroll_command = setup_scroll_command()) &&
+                    (scroll = popen(scroll_command, "w")))
+			fp = scroll;
+                else
+                        scroll = NULL;
+	} else {
+		scroll_command = NULL;
+		scroll = NULL;
+	}
 
 	if (STREQ(cmd, "copying")) {
 		display_copying_info();
@@ -4641,46 +4993,50 @@
 		goto done_usage;
 	}
 
-	found = FALSE;
-retry:
-	if ((cp = get_command_table_entry(cmd))) {
-		if ((p = cp->help_data))
-			found = TRUE;
-	}
+	/* look up command, possibly through an alias */
+	for (;;) {
+		struct alias_data *ad;
+
+		cp = get_command_table_entry(cmd);
+		if (cp != NULL)
+			break;	/* found command */
+
+		/* try for an alias */
+		ad = is_alias(cmd);
+		if (ad == NULL)
+			break;	/* neither command nor alias */
 
-       /*
-	*  Check for alias names or gdb commands.
-	*/
-	if (!found) {
-		if ((ad = is_alias(cmd))) {
-			cmd = ad->args[0];
-			goto retry;
-		}
+		cmd = ad->args[0];
+		cp = get_command_table_entry(cmd);
+	}
 
-		if (helpflag == SYNOPSIS) { 
-                	fprintf(fp,
-                         "No usage data for the \"%s\" command is available.\n",
+	if (cp == NULL || (p = cp->help_data) == NULL) {
+		if (helpflag & SYNOPSIS) { 
+			fprintf(fp,
+				"No usage data for the \"%s\" command"
+				" is available.\n",
 				cmd);
 			RESTART();
 		}
 
-		if (STREQ(pc->curcmd, "help")) {
-			if (cp)
-                		fprintf(fp,
-                          "No help data for the \"%s\" command is available.\n",
+		if (helpflag & MUST_HELP) {
+			if (cp || !(pc->flags & GDB_INIT))
+				fprintf(fp,
+				    "No help data for the \"%s\" command"
+				    " is available.\n",
 					cmd);
 			else if (!gdb_pass_through(concat_args(buf, 0, FALSE), 
 				NULL, GNU_RETURN_ON_ERROR))
 				fprintf(fp, 
-				    "No help data for \"%s\" is available.\n",
-                                	cmd);
+					"No help data for \"%s\" is available.\n",
+					cmd);
 		}
 		goto done_usage;
         }
 
 	p++;
 
-        if (helpflag == SYNOPSIS) {
+        if (helpflag & SYNOPSIS) {
                 p++;
                 fprintf(fp, "Usage: %s ", cmd);
 		fprintf(fp, *p, pc->program_name, pc->program_name);
@@ -4711,10 +5067,12 @@
 
 done_usage:
 
-	if (less) {
-		fflush(less);
-		pclose(less);
+	if (scroll) {
+		fflush(scroll);
+		pclose(scroll);
 	}
+	if (scroll_command)
+		FREEBUF(scroll_command);
 }
 
 
@@ -4812,7 +5170,9 @@
 "The default output radix for gdb output and certain %s commands is",
 "hexadecimal.  This can be changed to decimal by entering \"set radix 10\"",
 "or the alias \"dec\".  It can be reverted back to hexadecimal by entering",
-"\"set radix 16\" or the alias \"hex\".",
+"\"set radix 16\" or the alias \"hex\".\n",
+"To execute an external shell command, precede the command with an \"!\".",
+"To escape to a shell, enter \"!\" alone.",
 " ",
 NULL
 };
@@ -4854,10 +5214,13 @@
 static 
 char *version_info[] = {
 
-"Copyright (C) 2002, 2003, 2004, 2005  Red Hat, Inc.",
-"Copyright (C) 2004, 2005  IBM Corporation", 
-"Copyright (C) 1999-2005  Hewlett-Packard Co",
-"Copyright (C) 1999, 2002  Silicon Graphics, Inc.",
+"Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009  Red Hat, Inc.",
+"Copyright (C) 2004, 2005, 2006  IBM Corporation", 
+"Copyright (C) 1999-2006  Hewlett-Packard Co",
+"Copyright (C) 2005, 2006  Fujitsu Limited",
+"Copyright (C) 2006, 2007  VA Linux Systems Japan K.K.",
+"Copyright (C) 2005  NEC Corporation",
+"Copyright (C) 1999, 2002, 2007  Silicon Graphics, Inc.",
 "Copyright (C) 1999, 2000, 2001, 2002  Mission Critical Linux, Inc.",
 "This program is free software, covered by the GNU General Public License,",
 "and you are welcome to change it and/or distribute copies of it under",
--- crash/lkcd_v8.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/lkcd_v8.c	2007-11-09 17:29:14.000000000 -0500
@@ -23,9 +23,185 @@
 #include "lkcd_dump_v8.h"				/* REMIND */
 
 static dump_header_t dump_header_v8 = { 0 };
-// static dump_header_asm_t dump_header_asm_v8 = { 0 };
+#ifndef HAVE_NO_DUMP_HEADER_ASM
+static dump_header_asm_t dump_header_asm_v8 = { 0 };
+#endif
 static dump_page_t dump_page = { 0 };
 static void mclx_cache_page_headers_v8(void);
+static off_t lkcd_offset_to_first_page = LKCD_OFFSET_TO_FIRST_PAGE;
+
+#if defined(X86_64)
+
+int
+get_lkcd_regs_for_cpu_arch(int cpu, ulong *eip, ulong *esp)
+{
+	if (eip)
+		*eip = dump_header_asm_v8.dha_smp_regs[cpu].rip;
+	if (esp)
+		*esp = dump_header_asm_v8.dha_smp_regs[cpu].rsp;
+
+	return 0;
+}
+
+#elif defined(X86)
+
+int
+get_lkcd_regs_for_cpu_arch(int cpu, ulong *eip, ulong *esp)
+{
+	if (eip)
+		*eip = dump_header_asm_v8.dha_smp_regs[cpu].eip;
+	if (esp)
+		*esp = dump_header_asm_v8.dha_smp_regs[cpu].esp;
+
+	return 0;
+}
+
+#else
+
+int
+get_lkcd_regs_for_cpu_arch(int cpu, ulong *eip, ulong *esp)
+{
+	return -1;
+}
+
+#endif
+
+
+
+int
+get_lkcd_regs_for_cpu_v8(struct bt_info *bt, ulong *eip, ulong *esp)
+{
+	int cpu = bt->tc->processor;
+
+	if (!bt || !bt->tc) {
+		fprintf(stderr, "get_lkcd_regs_for_cpu_v8: invalid tc "
+				"(CPU=%d)\n", cpu);
+		return -EINVAL;
+	}
+
+	if (cpu >= NR_CPUS) {
+		fprintf(stderr, "get_lkcd_regs_for_cpu_v8, cpu (%d) too high\n", cpu);
+		return -EINVAL;
+	}
+
+	return get_lkcd_regs_for_cpu_arch(cpu, eip, esp);
+}
+
+
+#ifndef HAVE_NO_DUMP_HEADER_ASM
+int
+lkcd_dump_init_v8_arch(dump_header_t *dh)
+{
+	off_t 			ret_of;
+	ssize_t 		ret_sz;
+	uint32_t 		hdr_size, offset, nr_cpus;
+	dump_header_asm_t 	arch_hdr;
+	char 			*hdr_buf = NULL;
+
+	ret_of = lseek(lkcd->fd, dh->dh_header_size +
+			offsetof(dump_header_asm_t, dha_header_size),
+			SEEK_SET);
+	if (ret_of < 0) {
+		perror("lseek failed in " __FILE__ ":" STR(__LINE__));
+		goto err;
+	}
+
+	ret_sz = read(lkcd->fd, (char *)&hdr_size, sizeof(hdr_size));
+	if (ret_sz != sizeof(hdr_size)) {
+		perror("Reading hdr_size failed in " __FILE__ ":" STR(__LINE__));
+		goto err;
+	}
+
+	ret_of = lseek(lkcd->fd, dh->dh_header_size, SEEK_SET);
+	if (ret_of < 0) {
+		perror("lseek failed in " __FILE__ ":" STR(__LINE__));
+		goto err;
+	}
+
+	hdr_buf = (char *)malloc(hdr_size);
+	if (!hdr_buf) {
+		perror("Could not allocate memory for dump header\n");
+		goto err;
+	}
+
+	ret_sz = read(lkcd->fd, (char *)hdr_buf, hdr_size);
+	if (ret_sz != hdr_size) {
+		perror("Could not read header " __FILE__ ":" STR(__LINE__));
+		goto err;
+	}
+
+
+	/*
+         * Though we have KL_NR_CPUS is 128, the header size is different
+         * CONFIG_NR_CPUS might be different in the kernel. Hence, need
+         * to find out how many CPUs are configured.
+         */
+        offset = offsetof(dump_header_asm_t, dha_smp_regs[0]);
+        nr_cpus = (hdr_size - offset) / sizeof(dump_CPU_info_t);
+
+	/* check for CPU overflow */
+	if (nr_cpus > NR_CPUS) {
+		fprintf(stderr, "CPU number too high %d (%s:%d)\n",
+				nr_cpus, __FILE__, __LINE__);
+		goto err;
+	}
+
+	/* parts that don't depend on the number of CPUs */
+	memcpy(&arch_hdr, (void *)hdr_buf, offset);
+
+	/* registers */
+	memcpy(&arch_hdr.dha_smp_regs, (void *)&hdr_buf[offset],
+			nr_cpus * sizeof(struct pt_regs));
+	offset += nr_cpus * sizeof(struct pt_regs);
+
+	/* current task */
+	memcpy(&arch_hdr.dha_smp_current_task, (void *)&hdr_buf[offset],
+			nr_cpus * sizeof(&arch_hdr.dha_smp_current_task[0]));
+	offset += nr_cpus * sizeof(&arch_hdr.dha_smp_current_task[0]);
+
+	/* stack */
+	memcpy(&arch_hdr.dha_stack, (void *)&hdr_buf[offset],
+			nr_cpus * sizeof(&arch_hdr.dha_stack[0]));
+	offset += nr_cpus * sizeof(&arch_hdr.dha_stack[0]);
+
+	/* stack_ptr */
+	memcpy(&arch_hdr.dha_stack_ptr, (void *)&hdr_buf[offset],
+			nr_cpus * sizeof(&arch_hdr.dha_stack_ptr[0]));
+	offset += nr_cpus * sizeof(&arch_hdr.dha_stack_ptr[0]);
+
+	if (arch_hdr.dha_magic_number != DUMP_ASM_MAGIC_NUMBER) {
+		fprintf(stderr, "Invalid magic number for x86_64\n");
+		goto err;
+	}
+
+	/*
+	 * read the kernel load address on IA64 -- other architectures have
+	 * no relocatable kernel at the lifetime of LKCD
+	 */
+#ifdef IA64
+	memcpy(&arch_hdr.dha_kernel_addr, (void *)&hdr_buf[offset], sizeof(uint64_t));
+#endif
+
+	memcpy(&dump_header_asm_v8, &arch_hdr, sizeof(dump_header_asm_t));
+
+	return 0;
+
+err:
+	free(hdr_buf);
+	return -1;
+}
+
+#else /* architecture that has no lkcd_dump_init_v8 */
+
+int
+lkcd_dump_init_v8_arch(dump_header_t *dh)
+{
+	return 0;
+}
+
+#endif
+
+
 
 /*
  *  Verify and initialize the LKCD environment, storing the common data
@@ -56,17 +232,26 @@
 	if (read(lkcd->fd, dh, sizeof(dump_header_t)) !=
 	    sizeof(dump_header_t))
 		return FALSE;
-	if ((dh->dh_version & LKCD_DUMP_VERSION_NUMBER_MASK) == LKCD_DUMP_V9)
+	if ((dh->dh_version & LKCD_DUMP_VERSION_NUMBER_MASK) == LKCD_DUMP_V9){
 	    if (read(lkcd->fd, &dh_dump_buffer_size, sizeof(dh_dump_buffer_size)) !=
 		sizeof(dh_dump_buffer_size))
 		    return FALSE;
+	    lkcd_offset_to_first_page = dh_dump_buffer_size;
+	} else
+	    lkcd_offset_to_first_page = LKCD_OFFSET_TO_FIRST_PAGE;
 	
         lkcd->dump_page = dp;
         lkcd->dump_header = dh;
 	if (lkcd->debug) 
 		dump_lkcd_environment(LKCD_DUMP_HEADER_ONLY);
+
+	if (lkcd_dump_init_v8_arch(dh) != 0) {
+		fprintf(stderr, "Warning: Failed to initialise "
+				"arch specific dump code\n");
+	}
+
 #ifdef IA64
-	if ( (fix_addr_v8(fd) == -1) )
+	if ( (fix_addr_v8(&dump_header_asm_v8) == -1) )
 	    return FALSE;
 #endif
 
@@ -146,7 +331,7 @@
    	lkcd->compression = dh->dh_dump_compress; 
         lkcd->page_header_size = sizeof(dump_page_t);
 
-        lseek(lkcd->fd, LKCD_OFFSET_TO_FIRST_PAGE, SEEK_SET);
+        lseek(lkcd->fd, lkcd_offset_to_first_page, SEEK_SET);
 
 	/*
 	 * Read all of the pages and save the page offsets for lkcd_lseek().
@@ -483,7 +668,7 @@
 	/*
 	 *  Determine the granularity between offsets.
 	 */
-        if (lseek(lkcd->fd, page_headers[0] + LKCD_OFFSET_TO_FIRST_PAGE, 
+        if (lseek(lkcd->fd, page_headers[0] + lkcd_offset_to_first_page, 
 	    SEEK_SET) == -1) 
 		return;
         if (read(lkcd->fd, dp, lkcd->page_header_size) != 
@@ -491,7 +676,7 @@
                 return;
         physaddr1 = (dp->dp_address - lkcd->kvbase) << lkcd->page_shift;
 
-        if (lseek(lkcd->fd, page_headers[1] + LKCD_OFFSET_TO_FIRST_PAGE,
+        if (lseek(lkcd->fd, page_headers[1] + lkcd_offset_to_first_page,
             SEEK_SET) == -1)
                 return;
         if (read(lkcd->fd, dp, lkcd->page_header_size) 
@@ -508,7 +693,7 @@
 	for (i = 0; i < (MCLX_PAGE_HEADERS-1); i++) {
 		if (!page_headers[i])
 			break;
-		lkcd->curhdroffs = page_headers[i] + LKCD_OFFSET_TO_FIRST_PAGE;
+		lkcd->curhdroffs = page_headers[i] + lkcd_offset_to_first_page;
 		set_mb_benchmark((granularity * (i+1))/lkcd->page_size);
 	}
 }
--- crash/lkcd_x86_trace.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/lkcd_x86_trace.c	2009-01-26 14:54:36.000000000 -0500
@@ -5,8 +5,8 @@
 /* 
  *  lkcd_x86_trace.c
  *
- *  Copyright (C) 2002, 2003, 2004, 2005 David Anderson
- *  Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved.
+ *  Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 David Anderson
+ *  Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 Red Hat, Inc. All rights reserved.
  *
  *  Adapted as noted from the following LKCD files:
  *
@@ -21,6 +21,9 @@
 
 #include "lkcd_x86_trace.h"
 
+#undef XEN_HYPER_MODE
+static int XEN_HYPER_MODE(void) { return (pc->flags & XEN_HYPER) != 0; }
+
 static void *kl_alloc_block(int, int);
 static void kl_free_block(void *);
 static void GET_BLOCK(kaddr_t, unsigned, void *);
@@ -47,12 +50,13 @@
 static int setup_trace_rec(kaddr_t, kaddr_t, int, trace_t *);
 static int valid_ra(kaddr_t);
 static int valid_ra_function(kaddr_t, char *);
+static int eframe_incr(kaddr_t, char *);
 static int find_trace(kaddr_t, kaddr_t, kaddr_t, kaddr_t, trace_t *, int);
 static void dump_stack_frame(trace_t *, sframe_t *, FILE *);
 static void print_trace(trace_t *, int, FILE *);
-struct pt_regs;
-static int eframe_type(struct pt_regs *);
-static void print_eframe(FILE *, struct pt_regs *);
+static int eframe_type(uaddr_t *);
+char *funcname_display(char *);
+static void print_eframe(FILE *, uaddr_t *);
 static void trace_banner(FILE *);
 static void print_kaddr(kaddr_t, FILE *, int);
 int do_text_list(kaddr_t, int, FILE *);
@@ -505,7 +509,9 @@
 	{ "receive_chars", NULL, 
 		COMPILER_VERSION_EQUAL, GCC(2,96,0), 0, 0, 48 },
 	{ "default_idle", NULL, 
-		COMPILER_VERSION_START, GCC(3,3,2), 0, -4, 0 },
+		COMPILER_VERSION_START, GCC(2,96,0), 0, -4, 0 },
+	{ "hidinput_hid_event", NULL, 
+		COMPILER_VERSION_START, GCC(4,1,2), 0, 0, 28 },
  	{ NULL, NULL, 0, 0, 0, 0, 0 },
 };
 
@@ -1117,8 +1123,9 @@
         return(0);
 }
 
-#include <asm/ptrace.h>
+#ifndef REDHAT
 #include <asm/segment.h>
+#endif
 #define KERNEL_EFRAME		0
 #define USER_EFRAME		1
 #define KERNEL_EFRAME_SZ	13	/* no ss and esp */
@@ -1141,31 +1148,34 @@
  * Check if the exception frame is of kernel or user type 
  * Is checking only DS and CS values sufficient ?
  */
-int eframe_type(struct pt_regs *regs)
+
+int eframe_type(uaddr_t *int_eframe)
 {
-	if (((regs->xcs & 0xffff) == __KERNEL_CS) && 
-			((regs->xds & 0xffff) == __KERNEL_DS))
+	ushort xcs, xds;
+
+	xcs = (ushort)(int_eframe[INT_EFRAME_CS] & 0xffff);
+	xds = (ushort)(int_eframe[INT_EFRAME_DS] & 0xffff);
+
+	if ((xcs == __KERNEL_CS) && (xds == __KERNEL_DS))
 		return KERNEL_EFRAME;
 #ifdef REDHAT
-	else if (((regs->xcs & 0xffff) == 0x60) && 
-			((regs->xds & 0xffff) == 0x68))
+	else if ((xcs == 0x60) && (xds == 0x68))
+		return KERNEL_EFRAME;
+	else if ((xcs == 0x60) && (xds == 0x7b))
+		return KERNEL_EFRAME;
+	else if (XEN() && (xcs == 0x61) && (xds == 0x7b))
 		return KERNEL_EFRAME;
-        else if (((regs->xcs & 0xffff) == 0x60) &&
-                        ((regs->xds & 0xffff) == 0x7b))
-                return KERNEL_EFRAME;
 #endif
-	else if (((regs->xcs & 0xffff) == __USER_CS) && 
-			((regs->xds & 0xffff) == __USER_DS))
+	else if ((xcs == __USER_CS) && (xds == __USER_DS))
 		return USER_EFRAME;
 #ifdef REDHAT
-	else if (((regs->xcs & 0xffff) == 0x73) && 
-			((regs->xds & 0xffff) == 0x7b))
+	else if ((xcs == 0x73) && (xds == 0x7b))
 		return USER_EFRAME;
 #endif
 	return -1;
 }
 
-void print_eframe(FILE *ofp, struct pt_regs *regs)
+void print_eframe(FILE *ofp, uaddr_t *regs)
 {
 	int type = eframe_type(regs);
 
@@ -1206,6 +1216,93 @@
         }                                              \
 }
 #endif
+
+/*
+ *  Determine how much to increment the stack pointer to find the 
+ *  exception frame associated with a generic "error_code" or "nmi" 
+ *  exception.
+ *
+ *  The incoming addr is that of the call to the generic error_code 
+ *  or nmi exception handler function.  Until later 2.6 kernels, the next
+ *  instruction had always been an "addl $8,%esp".  However, with later 
+ *  2.6 kernels, that esp adjustment is no long valid, and there will be 
+ *  an immediate "jmp" instruction.  Returns 4 or 12, whichever is appropriate. 
+ *  Cache the value the first time, and allow for future changes or additions.
+ */
+
+#define NMI_ADJ         (0)
+#define ERROR_CODE_ADJ  (1)
+#define EFRAME_ADJUSTS  (ERROR_CODE_ADJ+1)
+
+static int eframe_adjust[EFRAME_ADJUSTS] = { 0 };
+
+static int
+eframe_incr(kaddr_t addr, char *funcname)
+{
+	instr_rec_t irp;
+	kaddr_t next;
+	int size, adj, val;
+
+	if (STRNEQ(funcname, "nmi")) {
+		adj = NMI_ADJ;
+		val = eframe_adjust[NMI_ADJ];
+	} else if (strstr(funcname, "error_code")) {
+		adj = ERROR_CODE_ADJ;
+		val = eframe_adjust[ERROR_CODE_ADJ];
+	} else { 
+		adj = -1;
+		val = 0;
+		error(INFO, 
+		    "unexpected exception frame marker: %lx (%s)\n",
+			addr, funcname);
+	}
+
+	if (val) {
+		console("eframe_incr(%lx, %s): eframe_adjust[%d]: %d\n", 
+			addr, funcname, adj, val);
+		return val;
+	}
+		
+	console("eframe_incr(%lx, %s): TBD:\n", addr, funcname);
+
+	bzero(&irp, sizeof(irp));
+	irp.aflag = 1;
+	irp.dflag = 1;
+	if (!(size = get_instr_info(addr, &irp))) {
+		if (CRASHDEBUG(1))
+			error(INFO, 
+			    "eframe_incr(%lx, %s): get_instr_info(%lx) failed\n", 
+				addr, funcname, addr);			
+		return((THIS_KERNEL_VERSION > LINUX(2,6,9)) ? 4 : 12);
+	}
+	console("  addr: %lx size: %d  opcode: 0x%x insn: \"%s\"\n", 
+		addr, size, irp.opcode, irp.opcodep->name);
+
+	next = addr + size;
+	bzero(&irp, sizeof(irp));
+	irp.aflag = 1;
+	irp.dflag = 1;
+	if (!(size = get_instr_info(next, &irp))) {
+		if (CRASHDEBUG(1))
+			error(INFO,
+			    "eframe_incr(%lx, %s): get_instr_info(%lx) failed\n",
+				addr, funcname, next);
+		return((THIS_KERNEL_VERSION > LINUX(2,6,9)) ? 4 : 12);
+	}
+	console("  next: %lx size: %d  opcode: 0x%x insn: \"%s\"\n",
+		next, size, irp.opcode, irp.opcodep->name);
+
+	if (STREQ(irp.opcodep->name, "jmp"))
+		val = 4;
+	else
+		val = 12;
+
+	if (adj >= 0)
+		eframe_adjust[adj] = val;
+
+	return val;
+}
+
 /*
  * find_trace()
  *
@@ -1253,6 +1350,7 @@
 	int flag;
 	int interrupted_system_call = FALSE;
 	struct bt_info *bt = trace->bt;
+	uaddr_t *pt;
 #endif
 	sbp = trace->stack[curstkidx].ptr;
 	sbase = trace->stack[curstkidx].addr;
@@ -1322,7 +1420,20 @@
 			}
 		}
 		asp = (uaddr_t*)((uaddr_t)sbp + (STACK_SIZE - (saddr - sp)));
+
 #ifdef REDHAT
+		if (XEN_HYPER_MODE()) {
+			func_name = kl_funcname(pc);
+			if (STREQ(func_name, "idle_loop") || STREQ(func_name, "hypercall")
+				|| STREQ(func_name, "process_softirqs")
+				|| STREQ(func_name, "tracing_off")
+				|| STREQ(func_name, "page_fault")
+				|| STREQ(func_name, "handle_exception")) {
+				UPDATE_FRAME(func_name, pc, 0, sp, bp, asp, 0, 0, bp - sp, 0);
+				return(trace->nframes);
+			}
+		}
+
 		ra = GET_STACK_ULONG(bp + 4);
 		/*
 	  	 *  HACK: The get_framesize() function can return the proper
@@ -1447,7 +1558,8 @@
 			bp = curframe->fp + frame_size;
 		}
 #endif
-		if ((func_name = kl_funcname(pc))) {
+		func_name = kl_funcname(pc);
+		if (func_name && !XEN_HYPER_MODE()) {
 			if (strstr(func_name, "kernel_thread")) {
 				ra = 0;
 				bp = saddr - 4;
@@ -1503,25 +1615,26 @@
 				return(trace->nframes);
 #ifdef REDHAT
 			} else if (strstr(func_name, "error_code") 
+				|| STREQ(func_name, "nmi_stack_correct")
 				|| STREQ(func_name, "nmi")) {
 #else
 			} else if (strstr(func_name, "error_code")) {
 #endif
 				/* an exception frame */
-				sp = curframe->fp+12;
+				sp = curframe->fp + eframe_incr(pc, func_name);
 
 				bp = sp + (KERNEL_EFRAME_SZ-1)*4;
 				asp = (uaddr_t*)((uaddr_t)sbp + (STACK_SIZE - 
 							(saddr - sp)));
 				curframe = alloc_sframe(trace, flags);
-				ra = ((struct pt_regs *)asp)->eip;
-				frame_type = eframe_type((struct pt_regs*)asp);
+				ra = asp[INT_EFRAME_EIP];
+				frame_type = eframe_type(asp);
 				UPDATE_FRAME(func_name, pc, ra, sp, bp, asp, 
 						0, 0, (bp - sp + 4), EX_FRAME);
 
 				/* prepare for next kernel frame, if present */
 				if (frame_type == KERNEL_EFRAME) {
-					pc = ((struct pt_regs *)asp)->eip;
+					pc = asp[INT_EFRAME_EIP];
 					sp = curframe->fp+4;
 #ifdef REDHAT
 					bp = sp + get_framesize(pc, bt);
@@ -1540,20 +1653,20 @@
 				sp = curframe->fp + 4;
 				asp = (uaddr_t*)((uaddr_t)sbp + (STACK_SIZE - 
 						(saddr - sp)));
-				frame_type = eframe_type((struct pt_regs*)asp);
+				frame_type = eframe_type(asp);
 				if (frame_type == KERNEL_EFRAME)
 					bp = curframe->fp+(KERNEL_EFRAME_SZ-1)*4;
 				else 
 					bp = curframe->fp+(USER_EFRAME_SZ-1)*4;
 				curframe = alloc_sframe(trace, flags);
-				ra = ((struct pt_regs *)asp)->eip;
+				ra = asp[INT_EFRAME_EIP];
 				UPDATE_FRAME(func_name, pc, ra, sp, bp + 4, asp,
 			       	0, 0, curframe->fp - curframe->sp+4, EX_FRAME);
 
 				/* prepare for next kernel frame, if present */
 				if (frame_type == KERNEL_EFRAME) {
 					sp = curframe->fp + 4;
-					pc = ((struct pt_regs *)asp)->eip;
+					pc = asp[INT_EFRAME_EIP];
 #ifdef REDHAT
 					bp = sp + get_framesize(pc, bt);
 #else
@@ -1571,6 +1684,47 @@
 				}
 			}
 		}
+		if (func_name && XEN_HYPER_MODE()) {
+			if (STREQ(func_name, "continue_nmi") ||
+			    STREQ(func_name, "vmx_asm_vmexit_handler") ||
+			    STREQ(func_name, "handle_nmi_mce") ||
+			    STREQ(func_name, "deferred_nmi")) {
+				/* Interrupt frame */
+				sp = curframe->fp + 4;
+				asp = (uaddr_t*)((uaddr_t)sbp + (STACK_SIZE - 
+						(saddr - sp)));
+				bp = curframe->fp + (12 * 4);
+				curframe = alloc_sframe(trace, flags);
+				ra = *(asp + 9);
+				UPDATE_FRAME(func_name, pc, ra, sp, bp + 4, asp,
+			       	0, 0, curframe->fp - curframe->sp+4, 12 * 4);
+
+				/* contunue next frame */
+				pc = ra;
+				sp = curframe->fp + 4;
+				bp = sp + get_framesize(pc, bt);
+				func_name = kl_funcname(pc);
+				if (!func_name)
+					return trace->nframes;
+				continue;
+			}
+		}
+
+		/*
+		 *  Check for hypervisor_callback from user-space.
+		 */
+                if ((bt->flags & BT_XEN_STOP_THIS_CPU) && bt->tc->mm_struct &&
+                    STREQ(kl_funcname(curframe->pc), "hypervisor_callback")) {
+                	pt = curframe->asp+1;
+                        if (eframe_type(pt) == USER_EFRAME) {
+				if (program_context.debug >= 1)  /* pc above */
+                        		error(INFO, 
+					    "hypervisor_callback from user space\n");
+                                curframe->asp++;
+                                curframe->flag |= EX_FRAME;
+                                return(trace->nframes);
+                        }
+                }
 
 		/* Make sure our next frame pointer is valid (in the stack).
 		 */
@@ -1653,7 +1807,7 @@
 #ifdef REDHAT
 	kaddr_t fp = 0;
 	kaddr_t last_fp, last_pc, next_fp, next_pc;
-	struct pt_regs *pt;
+	uaddr_t *pt;
 	struct bt_info *bt;
 
 	bt = trace->bt;
@@ -1684,8 +1838,15 @@
 				(bt->flags & (BT_HARDIRQ|BT_SOFTIRQ))) 
 				return;
 
-			print_stack_entry(trace->bt, 
-				trace->bt->flags & BT_BUMP_FRAME_LEVEL ?
+			if ((frmp->level == 0) && (bt->flags & BT_XEN_STOP_THIS_CPU)) {
+				print_stack_entry(trace->bt, 0, trace->bt->stkptr,
+				symbol_value("stop_this_cpu"), 
+				value_symbol(symbol_value("stop_this_cpu")),
+				frmp, ofp);
+			}
+
+			print_stack_entry(trace->bt, (trace->bt->flags & 
+				(BT_BUMP_FRAME_LEVEL|BT_XEN_STOP_THIS_CPU)) ?
                                 frmp->level + 1 : frmp->level,
 				fp ? (ulong)fp : trace->bt->stkptr,
 				(ulong)frmp->pc, frmp->funcname, frmp, ofp);
@@ -1707,7 +1868,11 @@
 			fprintf(ofp, " [0x%x]\n", frmp->pc);
 #endif
 			if (frmp->flag & EX_FRAME) {
-				pt = (struct pt_regs *)frmp->asp;
+				pt = frmp->asp;
+				if (CRASHDEBUG(1))
+					fprintf(ofp, 
+					    " EXCEPTION FRAME: %lx\n", 
+						(unsigned long)frmp->sp);
 				print_eframe(ofp, pt);
 			}
 #ifdef REDHAT
@@ -1789,6 +1954,114 @@
 	if (kt->flags & RA_SEEK)
 		bt->flags |= BT_SPECULATE;
 
+	if (XENDUMP_DUMPFILE() && XEN() && is_task_active(bt->task) && 
+    	    STREQ(kl_funcname(bt->instptr), "stop_this_cpu")) {
+		/*
+		 *  bt->instptr of "stop_this_cpu" is not a return
+		 *  address -- replace it with the actual return
+		 *  address found at the bt->stkptr location.
+		 */
+		if (readmem((ulong)bt->stkptr, KVADDR, &eip,
+                    sizeof(ulong), "xendump eip", RETURN_ON_ERROR))
+			bt->instptr = eip;
+		bt->flags |= BT_XEN_STOP_THIS_CPU;
+		if (CRASHDEBUG(1))
+			error(INFO, "replacing stop_this_cpu with %s\n",
+				kl_funcname(bt->instptr));
+	}
+
+	if (XENDUMP_DUMPFILE() && XEN() && is_idle_thread(bt->task) &&
+	    is_task_active(bt->task) && 
+	    !(kt->xen_flags & XEN_SUSPEND) &&
+    	    STREQ(kl_funcname(bt->instptr), "schedule")) {
+		/*
+		 *  This is an invalid (stale) schedule reference
+		 *  left in the task->thread.  Move down the stack 
+		 *  until the smp_call_function_interrupt return 
+		 *  address is found.
+		 */
+		saddr = bt->stkptr;
+		while (readmem(saddr, KVADDR, &eip,
+                    sizeof(ulong), "xendump esp", RETURN_ON_ERROR)) {
+			if (STREQ(kl_funcname(eip), "smp_call_function_interrupt")) {
+				bt->instptr = eip;
+				bt->stkptr = saddr;
+				bt->flags |= BT_XEN_STOP_THIS_CPU;
+				if (CRASHDEBUG(1))
+					error(INFO,
+					    "switch schedule to smp_call_function_interrupt\n");
+				break;
+			}
+			saddr -= sizeof(void *);
+			if (saddr <= bt->stackbase)
+				break;
+		}
+	}
+
+        if (XENDUMP_DUMPFILE() && XEN() && is_idle_thread(bt->task) &&
+            is_task_active(bt->task) &&
+            (kt->xen_flags & XEN_SUSPEND) &&
+            STREQ(kl_funcname(bt->instptr), "schedule")) {
+		int framesize = 0;
+                /*
+                 *  This is an invalid (stale) schedule reference
+                 *  left in the task->thread.  Move down the stack
+                 *  until the hypercall_page() return address is
+                 *  found, and fix up its framesize as we go.
+                 */
+                saddr = bt->stacktop;
+                while (readmem(saddr, KVADDR, &eip,
+                    sizeof(ulong), "xendump esp", RETURN_ON_ERROR)) {
+
+                        if (STREQ(kl_funcname(eip), "xen_idle")) 
+				framesize += sizeof(ulong);
+			else if (framesize)
+				framesize += sizeof(ulong);
+
+                        if (STREQ(kl_funcname(eip), "hypercall_page")) {
+				int framesize = 24;
+                                bt->instptr = eip;
+                                bt->stkptr = saddr;
+                                if (CRASHDEBUG(1))
+                                        error(INFO,
+                                            "switch schedule to hypercall_page (framesize: %d)\n",
+						framesize);
+				FRAMESIZE_CACHE_ENTER(eip, &framesize);
+                                break;
+                        }
+                        saddr -= sizeof(void *);
+                        if (saddr <= bt->stackbase)
+                                break;
+                }
+        }
+
+	if (XENDUMP_DUMPFILE() && XEN() && !is_idle_thread(bt->task) &&
+	    is_task_active(bt->task) && 
+    	    STREQ(kl_funcname(bt->instptr), "schedule")) {
+		/*
+		 *  This is an invalid (stale) schedule reference
+		 *  left in the task->thread.  Move down the stack 
+		 *  until the smp_call_function_interrupt return 
+		 *  address is found.
+		 */
+		saddr = bt->stacktop;
+		while (readmem(saddr, KVADDR, &eip,
+                    sizeof(ulong), "xendump esp", RETURN_ON_ERROR)) {
+			if (STREQ(kl_funcname(eip), "smp_call_function_interrupt")) {
+				bt->instptr = eip;
+				bt->stkptr = saddr;
+				bt->flags |= BT_XEN_STOP_THIS_CPU;
+				if (CRASHDEBUG(1))
+					error(INFO,
+					    "switch schedule to smp_call_function_interrupt\n");
+				break;
+			}
+			saddr -= sizeof(void *);
+			if (saddr <= bt->stackbase)
+				break;
+		}
+	}
+
 	if (!verify_back_trace(bt) && !recoverable(bt, ofp) && 
 	    !BT_REFERENCE_CHECK(bt))
 		error(INFO, "cannot resolve stack trace:\n");
@@ -1797,12 +2070,14 @@
 		return(0);
 #endif
 
-        if (!(tsp = kl_alloc_block(TASK_STRUCT_SZ, K_TEMP))) {
-		return(1);
-	}
-	if (kl_get_task_struct(task, 2, tsp)) {
-		kl_free_block(tsp);
-		return(1);
+	if (!XEN_HYPER_MODE()) {
+	        if (!(tsp = kl_alloc_block(TASK_STRUCT_SZ, K_TEMP))) {
+			return(1);
+		}
+		if (kl_get_task_struct(task, 2, tsp)) {
+			kl_free_block(tsp);
+			return(1);
+		}
 	}
 	trace = (trace_t *)alloc_trace_rec(C_TEMP);
 	if (!trace) {
@@ -1874,7 +2149,9 @@
 #endif
 		print_trace(trace, flags, ofp);
 	}
-	kl_free_block(tsp);
+	if (!XEN_HYPER_MODE())
+		kl_free_block(tsp);
+
 	free_trace_rec(trace);
 #ifdef REDHAT
 	if (KL_ERROR == KLE_PRINT_TRACE_ERROR) {
@@ -1901,13 +2178,15 @@
 	errcnt = 0;
         KL_ERROR = 0;
 
-        if (!(tsp = kl_alloc_block(TASK_STRUCT_SZ, K_TEMP))) 
-                return FALSE;
-        
-        if (kl_get_task_struct(bt->task, 2, tsp)) {
-                kl_free_block(tsp);
-                return FALSE;
-        }
+	if (!XEN_HYPER_MODE()) {
+	        if (!(tsp = kl_alloc_block(TASK_STRUCT_SZ, K_TEMP))) 
+	                return FALSE;
+	        
+	        if (kl_get_task_struct(bt->task, 2, tsp)) {
+	                kl_free_block(tsp);
+	                return FALSE;
+	        }
+	}
 
         trace = (trace_t *)alloc_trace_rec(C_TEMP);
 	if (!trace) 
@@ -1952,7 +2231,9 @@
                 } while (frmp != trace->frame);
 	}
 
-	kl_free_block(tsp);
+	if (!XEN_HYPER_MODE())
+		kl_free_block(tsp);
+
 	free_trace_rec(trace);
         return (errcnt ? FALSE : TRUE);
 }
@@ -1982,7 +2263,7 @@
 		    (sp && (bt->ref->hexval == sp->value))) 
                         bt->ref->cmdflags |= BT_REF_FOUND;
                 if (frmp->flag & EX_FRAME) {
-			type = eframe_type((struct pt_regs *)frmp->asp);
+			type = eframe_type(frmp->asp);
 			x86_dump_eframe_common(bt, (ulong *)frmp->asp, 
 				(type == KERNEL_EFRAME));
 		}
@@ -2163,12 +2444,14 @@
         bt->flags |= BT_TEXT_SYMBOLS_PRINT|BT_ERROR_MASK;
         back_trace(bt);
 
-        bt->flags = BT_EFRAME_COUNT;
-        if ((cnt = machdep->eframe_search(bt))) {
-		error(INFO, "possible exception frame%s:\n", 
-			cnt > 1 ? "s" : "");
-		bt->flags &= ~(ulonglong)BT_EFRAME_COUNT;
-        	machdep->eframe_search(bt); 
+	if (!XEN_HYPER_MODE()) {
+		bt->flags = BT_EFRAME_COUNT;
+		if ((cnt = machdep->eframe_search(bt))) {
+			error(INFO, "possible exception frame%s:\n", 
+				cnt > 1 ? "s" : "");
+			bt->flags &= ~(ulonglong)BT_EFRAME_COUNT;
+			machdep->eframe_search(bt); 
+		}
 	}
 }
 
@@ -2192,11 +2475,12 @@
 	else
 		buf[0] = NULLCHAR;
 
-	if ((sp = eframe_label(funcname, eip)))
+	if ((sp = eframe_label(funcname, eip))) 
 		funcname = sp->name;
 
 	fprintf(ofp, "%s#%d [%8lx] %s%s at %lx\n",
-                level < 10 ? " " : "", level, esp, funcname, 
+                level < 10 ? " " : "", level, esp, 
+		funcname_display(funcname), 
 		strlen(buf) ? buf : "", eip);
 
         if (bt->flags & BT_LINE_NUMBERS) {
@@ -2236,6 +2520,9 @@
 	struct eframe_labels *efp;
 	struct syment *sp;
 
+	if (XEN_HYPER_MODE())
+		return NULL;	/* ODA: need support ? */
+
 	efp = &eframe_labels;
 
 	if (!efp->init) {
@@ -2255,7 +2542,8 @@
 			efp->tracesys_exit = symbol_search("tracesys_exit");
 		}
 
-		if ((efp->sysenter = symbol_search("sysenter_entry"))) {
+		if ((efp->sysenter = symbol_search("sysenter_entry")) ||
+		    (efp->sysenter = symbol_search("ia32_sysenter_target"))) {
                 	if ((sp = symbol_search("sysexit_ret_end_marker")))
                         	efp->sysenter_end = sp;
                 	else if ((sp = symbol_search("system_call")))
@@ -2325,6 +2613,25 @@
 }
 
 /*
+ *  If it makes sense to display a different function/label name
+ *  in a stack entry, it can be done here.  Unlike eframe_label(),
+ *  this routine won't cause the passed-in function name pointer
+ *  to be changed -- this is strictly for display purposes only.
+ */
+char *
+funcname_display(char *funcname)
+{
+	struct syment *sp;
+
+        if (STREQ(funcname, "nmi_stack_correct") &&
+            (sp = symbol_search("nmi"))) 
+                return sp->name;
+
+	return funcname;
+}
+
+
+/*
  *  Cache 2k starting from the passed-in text address.  This sits on top
  *  of the instrbuf 256-byte cache, but we don't want to extend its size
  *  because we can run off the end of a module segment -- if this routine
@@ -4858,6 +5165,8 @@
 		} else {
 			codeptr++;
 		}
+		if (STREQ(op->name, "ud2a")) 
+			codeptr += kt->BUG_bytes;
 	} else {
 		opcode = *codeptr;
 		op = &op_386[*codeptr];
--- crash/lkcd_dump_v7.h.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/lkcd_dump_v7.h	2006-10-11 09:14:35.000000000 -0400
@@ -1,8 +1,8 @@
 /* lkcd_dump_v5.h - core analysis suite
  *
  * Copyright (C) 2001, 2002 Mission Critical Linux, Inc.
- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson
- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006 David Anderson
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006 Red Hat, Inc. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -35,7 +35,7 @@
 #ifndef _DUMP_H
 #define _DUMP_H
 
-#include <linux/list.h>
+//#include <linux/list.h>
 
 /* define TRUE and FALSE for use in our dump modules */
 #ifndef FALSE
--- crash/xen_hyper_global_data.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/xen_hyper_global_data.c	2008-09-29 16:38:45.000000000 -0400
@@ -0,0 +1,399 @@
+/*
+ *  xen_hyper_global_data.c
+ *
+ *  Portions Copyright (C) 2006-2007 Fujitsu Limited
+ *  Portions Copyright (C) 2006-2007 VA Linux Systems Japan K.K.
+ *
+ *  Authors: Itsuro Oda <oda@valinux.co.jp>
+ *           Fumihiko Kakuma <kakuma@valinux.co.jp>
+ *
+ *  This file is part of Xencrash.
+ *
+ *  Xencrash is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation (version 2 of the License).
+ *
+ *  Xencrash is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with Xencrash; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA.
+ */
+
+#include "defs.h"
+
+#ifdef XEN_HYPERVISOR_ARCH
+#include "xen_hyper_defs.h"
+
+/*
+ * Global data for Xen hypervisor.
+ */
+
+struct xen_hyper_machdep_table xen_hyper_machdep_table = { 0 };
+struct xen_hyper_machdep_table *xhmachdep = &xen_hyper_machdep_table;
+
+struct xen_hyper_table xen_hyper_table = { 0 };
+struct xen_hyper_table *xht = &xen_hyper_table;
+
+struct xen_hyper_dumpinfo_table xen_hyper_dumpinfo_table = { 0 };
+struct xen_hyper_dumpinfo_table *xhdit = &xen_hyper_dumpinfo_table;
+
+struct xen_hyper_domain_table xen_hyper_domain_table = { 0 };
+struct xen_hyper_domain_table *xhdt = &xen_hyper_domain_table;
+
+struct xen_hyper_vcpu_table xen_hyper_vcpu_table = { 0 };
+struct xen_hyper_vcpu_table *xhvct = &xen_hyper_vcpu_table;
+
+struct xen_hyper_pcpu_table xen_hyper_pcpu_table = { 0 };
+struct xen_hyper_pcpu_table *xhpct = &xen_hyper_pcpu_table;
+
+struct xen_hyper_sched_table xen_hyper_sched_table = { 0 };
+struct xen_hyper_sched_table *xhscht = &xen_hyper_sched_table;
+
+struct xen_hyper_symbol_table_data xen_hyper_symbol_table_data = { 0 };
+struct xen_hyper_symbol_table_data *xhsymt = &xen_hyper_symbol_table_data;
+
+/*
+ * The following commands are for Xen hypervisor.
+ */
+
+struct command_table_entry xen_hyper_command_table[] = {
+	{"*", 	    cmd_pointer, help_pointer, 0},
+	{"alias",   cmd_alias,   help_alias,   0},
+        {"ascii",   cmd_ascii,   help_ascii,   0},
+        {"bt",      cmd_bt,      help_bt,      0},
+	{"dis",     cmd_dis,     help_dis,     0},
+	{"domain",  xen_hyper_cmd_domain,   xen_hyper_help_domain,  REFRESH_TASK_TABLE},
+	{"doms",    xen_hyper_cmd_doms,     xen_hyper_help_doms,    REFRESH_TASK_TABLE},
+#if defined(X86) || defined(X86_64)
+	{"dumpinfo",xen_hyper_cmd_dumpinfo, xen_hyper_help_dumpinfo,0},
+#endif
+	{"eval",    cmd_eval,    help_eval,    0},
+	{"exit",    cmd_quit,    help_exit,    0},
+	{"extend",  cmd_extend,  help_extend,  0},
+	{"gdb",     cmd_gdb,     help_gdb,     0},
+        {"help",    xen_hyper_cmd_help,     help_help,              0},
+	{"list",    cmd_list,    help__list,   0},
+	{"log",     xen_hyper_cmd_log,      xen_hyper_help_log,     0},
+	{"p",       cmd_p,       help_p,       0},
+	{"pcpus",   xen_hyper_cmd_pcpus,    xen_hyper_help_pcpus,   0},
+        {"pte",     cmd_pte,     help_pte,     0},
+        {"q",       cmd_quit,    help_quit,    0},
+        {"rd",      cmd_rd,      help_rd,      0},
+	{"repeat",  cmd_repeat,  help_repeat,  0},
+	{"sched",   xen_hyper_cmd_sched,    xen_hyper_help_sched,   0},
+        {"search",  cmd_search,  help_search,  0},
+        {"set",     cmd_set,     help_set,     0},
+        {"struct",  cmd_struct,  help_struct,  0},
+        {"sym",     cmd_sym,     help_sym,     0},
+        {"sys",     xen_hyper_cmd_sys,      xen_hyper_help_sys,     0},
+	{"test",    cmd_test,    NULL,         HIDDEN_COMMAND},
+	{"union",   cmd_union,   help_union,   0},
+	{"vcpu",    xen_hyper_cmd_vcpu,     xen_hyper_help_vcpu,    REFRESH_TASK_TABLE},
+	{"vcpus",   xen_hyper_cmd_vcpus,    xen_hyper_help_vcpus,   REFRESH_TASK_TABLE},
+	{"whatis",  cmd_whatis,  help_whatis,  0},
+	{"wr",      cmd_wr,      help_wr,      0},
+	{(char *)NULL}
+};
+
+/*
+ *
+ */
+struct xen_hyper_offset_table xen_hyper_offset_table = { 0 };
+struct xen_hyper_size_table xen_hyper_size_table = { 0 };
+
+/*
+ * help data
+ */
+
+char *xen_hyper_help_domain[] = {
+"domain",
+"display contents of domain struct",
+"[domain-id | domainp] ...",
+"  This command displays contents of domain struct for selected, or all, domains",
+"     domain-id  a domain id.",
+"       domainp  a domain pointer.",
+NULL               
+};
+
+char *xen_hyper_help_doms[] = {
+"doms",
+"display domain status information",
+"[domain-id | domainp] ...",
+"  This command displays domain status for selected, or all, domains" ,
+"     domain-id  a domain id.",
+"       domainp  a domain pointer.",
+" ",
+"    1. the DOMAIN-ID.",
+"    2. the struct domain pointer.",
+"    3. the domain state",
+"       (SF:fully shut down, SH:shutting down, DY:dying,",
+"        CP:pause by controller software, PO:polling event channels,",
+"        PA:pause by the hypervisor, RU:running).",
+"    4. the TYPE of domain",
+"       (O:dom_io, X:dom_xen, I:idle domain, 0:domain 0, U:domain U).",
+"    5. displays max_pages member of domain.",
+"    6. displays tot_pages member of domain.",
+"    7. a number of vcpu that domain is assigned.",
+"    8. the shared_info pointer of domain.",
+"    9. frame containing list of mfns containing list of mfns" ,
+"       containing p2m.",
+" ",
+"  The active domain on each CPU will be highlighted by an angle ",
+"  bracket (\">\") preceding its information.",
+"  The crashing domain on each CPU will be highlighted by an aster ",
+"  (\"*\") preceding its information.",
+"\nEXAMPLES",
+"  Show the domain status of all:\n",
+"    %s> doms",
+"       DID   DOMAIN  ST T  MAXPAGE  TOTPAGE VCPU SHARED_I  P2M_MFN",
+"      32753 ffbf8080 RU O     0        0      0      0      ----",
+"      32754 ffbfa080 RU X     0        0      0      0      ----",
+"      32767 ffbfc080 RU I     0        0      2      0      ----",
+"    >*    0 ff198080 RU 0 ffffffff   32900    2  ff194000   18d0",
+"          4 ffbee080 RU U   4000     4000     2  ff18d000   3eb92",
+"          5 ff186080 RU U   4000     4000     2  ff184000   298d3",
+"    %s>",
+NULL               
+};
+
+char *xen_hyper_help_dumpinfo[] = {
+"dumpinfo",
+"display Xen dump information",
+"[-t | -r] [pcpu-id | enotep] ...",
+"  This command displays Xen dump information for selected, or all, cpus" ,
+"       pcpu-id  a physical cpu id.",
+"        enotep  a ELF Note pointer.",
+"            -t  display time information.",
+"            -r  display register information.",
+NULL               
+};
+
+char *xen_hyper_help_log[] = {
+"log",
+"dump system message buffer",
+" ",
+"  This command dumps the xen conring contents in chronological order." ,
+"  ",
+"EXAMPLES",
+"  Dump the Xen message buffer:\n",
+"    %s> log",
+"     __  __            _____  ___                     _        _     _",
+"     \\ \\/ /___ _ __   |___ / / _ \\    _   _ _ __  ___| |_ __ _| |__ | | ___",
+"      \\  // _ \\ '_ \\    |_ \\| | | |__| | | | '_ \\/ __| __/ _` | '_ \\| |/ _ \\",
+"      /  \\  __/ | | |  ___) | |_| |__| |_| | | | \\__ \\ || (_| | |_) | |  __/",
+"     /_/\\_\\___|_| |_| |____(_)___/    \\__,_|_| |_|___/\\__\\__,_|_.__/|_|\\___|",
+"    ",
+"     http://www.cl.cam.ac.uk/netos/xen",
+"     University of Cambridge Computer Laboratory",
+"    ",
+"     Xen version 3.0-unstable (damm@) (gcc version 3.4.6 (Gentoo 3.4.6-r1, ssp-3.4.5-1.0,",
+"     pie-8.7.9)) Wed Dec  6 17:34:32 JST 2006",
+"     Latest ChangeSet: unavailable",
+"    ",
+"    (XEN) Console output is synchronous.",
+"    (XEN) Command line: 12733-i386-pae/xen.gz console=com1 sync_console conswitch=bb com1",
+"    =115200,8n1,0x3f8 dom0_mem=480000 crashkernel=64M@32M",
+"    (XEN) Physical RAM map:",
+"    (XEN)  0000000000000000 - 0000000000098000 (usable)",
+"    (XEN)  0000000000098000 - 00000000000a0000 (reserved)",
+"    (XEN)  00000000000f0000 - 0000000000100000 (reserved)",
+"    (XEN)  0000000000100000 - 000000003f7f0000 (usable)",
+"    (XEN)  000000003f7f0000 - 000000003f7f3000 (ACPI NVS)",
+"    (XEN)  000000003f7f3000 - 000000003f800000 (ACPI data)",
+"    (XEN)  00000000e0000000 - 00000000f0000000 (reserved)",
+"    (XEN)  00000000fec00000 - 0000000100000000 (reserved)",
+"    (XEN) Kdump: 64MB (65536kB) at 0x2000000",
+"    (XEN) System RAM: 1015MB (1039904kB)",
+"    (XEN) ACPI: RSDP (v000 XPC                                   ) @ 0x000f9250",
+"    ...",
+NULL               
+};
+
+char *xen_hyper_help_pcpus[] = {
+"pcpus",
+"display physical cpu information",
+"[-r][-t] [pcpu-id | pcpup] ...",
+"  This command displays physical cpu information for selected, or all, cpus" ,
+"       pcpu-id  a physical cpu id.",
+"         pcpup  a physical cpu pointer.",
+"      cur-vcpu  a current virtual cpu pointer.",
+"            -r  display register information.",
+"            -t  display init_tss information.",
+" ",
+"  The crashing physical cpu will be highlighted by an aster ",
+"  (\"*\") preceding its information.",
+"\nEXAMPLES",
+"  Show the physical cpu status of all:\n",
+"    %s> pcpus",
+"       PCID   PCPU   CUR-VCPU",
+"          0 ff1a3fb4 ffbf9080",
+"     *    1 ff1dbfb4 ffbf8080",
+"    %s>",
+" ",
+"  Show the physical cpu status of all with register information:\n",
+"    %s> pcpus -r",
+"       PCID   PCPU   CUR-VCPU",
+"     *    0 ff1b7fb4 ffbef080",
+"    Register information:",
+"    struct cpu_user_regs {",
+"      ebx = 0x0,",
+"      ecx = 0xdcf4bed8,",
+"      edx = 0xc0326887,",
+"      esi = 0x63,",
+"      edi = 0x0,",
+"      ebp = 0xdcf4bee0,",
+"      eax = 0x25,",
+"      error_code = 0x6,",
+"      entry_vector = 0xe,",
+"      eip = 0xc01014a7,",
+"      cs = 0x61,",
+"      saved_upcall_mask = 0x0,",
+"      _pad0 = 0x0,",
+"      eflags = 0x202,",
+"      esp = 0xdcf4bed0,",
+"      ss = 0x69,",
+"      _pad1 = 0x0,",
+"      es = 0x7b,",
+"      _pad2 = 0x0,",
+"      ds = 0x7b,",
+"      _pad3 = 0x0,",
+"      fs = 0x0,",
+"      _pad4 = 0x0,",
+"      gs = 0x0,",
+"      _pad5 = 0x0",
+"    }",
+" ",
+"  Show the physical cpu status of all with init_tss information:\n",
+"    %s> pcpus -t",
+"       PCID   PCPU   CUR-VCPU",
+"     *    0 ff1b7fb4 ffbef080",
+"    init_tss information:",
+"    struct tss_struct {",
+"      back_link = 0x0,",
+"      __blh = 0x0,",
+"      esp0 = 0xff1b7fe8,",
+"      ss0 = 0xe010,",
+"      __ss0h = 0x0,",
+"      esp1 = 0xdcf4bff8,",
+"      ss1 = 0x69,",
+"      __ss1h = 0x0,",
+"      esp2 = 0x0,",
+"      ss2 = 0x0,",
+"      __ss2h = 0x0,",
+"      __cr3 = 0x0,",
+"      eip = 0x0,",
+"      eflags = 0x0,",
+"      eax = 0x0,",
+"      ecx = 0x0,",
+"      edx = 0x0,",
+"      ebx = 0x0,",
+"      esp = 0x0,",
+"      ebp = 0x0,",
+"      esi = 0x0,",
+"      edi = 0x0,",
+"      es = 0x0,",
+"      __esh = 0x0,",
+"      cs = 0x0,",
+"      __csh = 0x0,",
+"      ss = 0x0,",
+"      __ssh = 0x0,",
+"      ds = 0x0,",
+"      __dsh = 0x0,",
+"      fs = 0x0,",
+"      __fsh = 0x0,",
+"      gs = 0x0,",
+"      __gsh = 0x0,",
+"      ldt = 0x0,",
+"      __ldth = 0x0,",
+"      trace = 0x0,",
+"      bitmap = 0x8000,",
+"      __cacheline_filler = \"\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\"",
+"    }",
+NULL               
+};
+
+char *xen_hyper_help_sched[] = {
+"pcpus",
+"display scheduler information",
+"[-v] [pcpu-id] ...",
+"  This command displays scheduler information for selected, or all, cpus" ,
+"       pcpu-id  a physical cpu id.",
+"            -v  display verbosely scheduler information.",
+" ",
+NULL               
+};
+
+char *xen_hyper_help_sys[] = {
+"sys",
+"system data",
+"[-c [name|number]] config",
+"  This command displays system-specific data.  If no arguments are entered,\n"
+"  the same system data shown during %s invocation is shown.\n",
+"\nEXAMPLES",
+"  Display essential system information:\n",
+"    %s> sys",
+"      DEBUG KERNEL: xen-syms",
+"          DUMPFILE: vmcore",
+"              CPUS: 2",
+"           DOMAINS: 2",
+"           MACHINE: Pentium III (Coppermine)  (866 Mhz)",
+"            MEMORY: 2 GB",
+"    %s>",
+NULL               
+};
+
+char *xen_hyper_help_vcpu[] = {
+"vcpu",
+"display contents of vcpu struct",
+"[vcpup] ...",
+"  This command displays contents of vcpu struct for selected, or all, vcpus",
+"       vcpu-id  a virtual cpu id.",
+"         vcpup  a virtual cpu pointer.",
+NULL               
+};
+
+char *xen_hyper_help_vcpus[] = {
+"vcpus",
+"display vcpu status information",
+"[-i domain-id vcpu-id | vcpup] ...",
+"  This command displays vcpu status for selected, or all, vcpus" ,
+"     domain-id  a domain id.",
+"       vcpu-id  a VCPU-ID.",
+"         vcpup  a hexadecimal struct vcpu pointer.",
+"            -i  specify vcpu id as an argument.",
+" ",
+"    1. the VCPU-ID.",
+"    2. the physical CPU-ID.",
+"    3. the struct vcpu pointer.",
+"    4. the vcpu state (RU, BL, OF).",
+"    5. the TYPE of domain that vcpu is assigned(I, 0, G).",
+"    6. the DOMAIN-ID of domain that vcpu is assigned.",
+"    7. the struct domain pointer of domain that vcpu is assigned.",
+" ",
+"  The active vcpu on each CPU will be highlighted by an angle ",
+"  bracket (\">\") preceding its information.",
+"  The crashing vcpu on each CPU will be highlighted by an aster ",
+"  (\"*\") preceding its information.",
+"\nEXAMPLES",
+"  Show the vcpu status of all:\n",
+"    %s> vcpus",
+"       VCID  PCID   VCPU   ST T DOMID  DOMAIN",
+"          0     0 ffbfe080 RU I 32767 ffbfc080",
+"          1     1 ff1df080 RU I 32767 ffbfc080",
+"    >*    0     0 ff195180 RU 0     0 ff198080",
+"    >     1     1 ff190080 BL 0     0 ff198080",
+"          0     1 ff18a080 BL G     4 ffbee080",
+"          1     0 ff189080 BL G     4 ffbee080",
+"          0     1 ff1f3080 BL G     5 ff186080",
+"          1     0 ff1f2080 BL G     5 ff186080",
+"    %s>",
+NULL               
+};
+
+struct task_context fake_tc = { 0 };
+
+#endif
--- crash/xen_hyper_defs.h.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/xen_hyper_defs.h	2008-12-03 12:03:24.000000000 -0500
@@ -0,0 +1,976 @@
+/*
+ *  xen_hyper_defs.h
+ *
+ *  Portions Copyright (C) 2006-2007 Fujitsu Limited
+ *  Portions Copyright (C) 2006-2007 VA Linux Systems Japan K.K.
+ *
+ *  Authors: Itsuro Oda <oda@valinux.co.jp>
+ *           Fumihiko Kakuma <kakuma@valinux.co.jp>
+ *
+ *  This file is part of Xencrash.
+ *
+ *  Xencrash is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation (version 2 of the License).
+ *
+ *  Xencrash is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with Xencrash; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA.
+ */
+
+#ifdef XEN_HYPERVISOR_ARCH
+
+#include <sys/types.h>
+#include <elf.h>
+
+#ifdef X86
+/* Xen Hypervisor address space layout */
+#define IOREMAP_VIRT_END        (0UL)
+#define IOREMAP_VIRT_START      (0xFFC00000UL)
+#define DIRECTMAP_VIRT_END      IOREMAP_VIRT_START
+#define DIRECTMAP_VIRT_START    (0xFF000000UL)
+#define MAPCACHE_VIRT_END       DIRECTMAP_VIRT_START
+#define MAPCACHE_VIRT_START     (0xFFC00000UL)
+#define PERDOMAIN_VIRT_END      DIRECTMAP_VIRT_START
+#define PERDOMAIN_VIRT_START    (0xFE800000UL)
+#define SH_LINEAR_PT_VIRT_END   PERDOMAIN_VIRT_START
+#define SH_LINEAR_PT_VIRT_START (0xFE400000UL)
+#define SH_LINEAR_PT_VIRT_START_PAE (0xFE000000UL)
+#define LINEAR_PT_VIRT_END      SH_LINEAR_PT_VIRT_START
+#define LINEAR_PT_VIRT_START    (0xFE000000UL)
+#define LINEAR_PT_VIRT_START_PAE    (0xFD800000UL)
+#define RDWR_MPT_VIRT_END       LINEAR_PT_VIRT_START
+#define RDWR_MPT_VIRT_START     (0xFDC00000UL)
+#define RDWR_MPT_VIRT_START_PAE     (0xFC800000UL)
+#define FRAMETABLE_VIRT_END     RDWR_MPT_VIRT_START
+#define FRAMETABLE_VIRT_START   (0xFC400000UL)
+#define FRAMETABLE_VIRT_START_PAE   (0xF6800000UL)
+#define RO_MPT_VIRT_END         FRAMETABLE_VIRT_START
+#define RO_MPT_VIRT_START       (0xFC000000UL)
+#define RO_MPT_VIRT_START_PAE       (0xF5800000UL)
+
+#define HYPERVISOR_VIRT_START   RO_MPT_VIRT_START
+#define HYPERVISOR_VIRT_START_PAE   RO_MPT_VIRT_START_PAE
+#endif
+
+#ifdef X86_64
+#define HYPERVISOR_VIRT_START (0xffff800000000000)
+#define HYPERVISOR_VIRT_END   (0xffff880000000000)
+#define DIRECTMAP_VIRT_START  (0xffff830000000000)
+#define DIRECTMAP_VIRT_END    (0xffff840000000000)
+#define PAGE_OFFSET_XEN_HYPER DIRECTMAP_VIRT_START
+#define XEN_VIRT_START        (0xffff828c80000000)
+#define XEN_VIRT_ADDR(vaddr) \
+    (((vaddr) >= XEN_VIRT_START) && ((vaddr) < DIRECTMAP_VIRT_START))
+#endif
+
+#ifdef IA64
+#define HYPERVISOR_VIRT_START (0xe800000000000000)
+#define HYPERVISOR_VIRT_END   (0xf800000000000000)
+#define DEFAULT_SHAREDINFO_ADDR (0xf100000000000000)
+#define PERCPU_PAGE_SIZE      65536
+#define PERCPU_ADDR	      (DEFAULT_SHAREDINFO_ADDR - PERCPU_PAGE_SIZE)
+#define DIRECTMAP_VIRT_START  (0xf000000000000000)
+#define DIRECTMAP_VIRT_END    PERCPU_ADDR
+#define VIRT_FRAME_TABLE_SIZE     (0x0100000000000000)
+
+#define PERCPU_VIRT_ADDR(vaddr) \
+    (((vaddr) >= PERCPU_ADDR) && ((vaddr) < PERCPU_ADDR + PERCPU_PAGE_SIZE))
+
+#define FRAME_TABLE_VIRT_ADDR(vaddr) \
+    ((vaddr) >= xhmachdep->frame_table && (vaddr) < xhmachdep->frame_table + VIRT_FRAME_TABLE_SIZE)
+
+#undef IA64_RBS_OFFSET
+#define IA64_RBS_OFFSET   ((XEN_HYPER_SIZE(vcpu) + 15) & ~15)
+
+#endif /* IA64 */
+
+#define DIRECTMAP_VIRT_ADDR(vaddr) \
+    (((vaddr) >= DIRECTMAP_VIRT_START) && ((vaddr) < DIRECTMAP_VIRT_END))
+
+typedef uint16_t	domid_t;
+typedef uint32_t	Elf_Word;
+
+/*
+ * NOTE kakuma: The following defines are temporary version for
+ * elf note format which is used only in crash.
+ */
+#define XEN_HYPER_ELF_NOTE_V1	1
+#define XEN_HYPER_ELF_NOTE_V2	2
+#define XEN_HYPER_ELF_NOTE_V3	3
+#define XEN_HYPER_ELF_NOTE_V4	4
+
+#ifdef X86
+#define XEN_HYPER_ELF_NOTE_V4_NOTE_SIZE	0x100
+#endif
+#if defined(X86_64) || defined(IA64)
+#define XEN_HYPER_ELF_NOTE_V4_NOTE_SIZE	0x200
+#endif
+
+/*
+ * Xen Hyper
+ */
+#define XEN_HYPER_SMP (0x400)
+
+#ifdef X86
+#define XEN_HYPER_MAX_VIRT_CPUS  (32)
+#define XEN_HYPER_HZ 100
+#endif
+#ifdef X86_64
+#define XEN_HYPER_MAX_VIRT_CPUS  (32)
+#define XEN_HYPER_HZ 100
+#endif
+#ifdef IA64
+#define XEN_HYPER_MAX_VIRT_CPUS  (64)
+#define XEN_HYPER_HZ 100
+#endif
+#ifndef XEN_HYPER_MAX_VIRT_CPUS
+#define XEN_HYPER_MAX_VIRT_CPUS  (1)
+#endif
+
+#if defined(X86) || defined(X86_64)
+#define xen_hyper_per_cpu(var, cpu)  \
+	((ulong)(var) + (((ulong)(cpu))<<xht->percpu_shift))
+#elif defined(IA64)
+#define xen_hyper_per_cpu(var, cpu)  \
+	((xht->flags & XEN_HYPER_SMP) ? \
+		(ulong)(var) + (xht->__per_cpu_offset[cpu]) : \
+		(ulong)(var))
+#endif
+
+#if defined(X86) || defined(X86_64)
+#define XEN_HYPER_STACK_ORDER 2
+#if 0
+#define XEN_HYPER_STACK_SIZE (machdep->pagesize << XEN_HYPER_STACK_ORDER)
+#endif
+#define XEN_HYPER_GET_CPU_INFO(sp) \
+	((sp & ~(STACKSIZE()-1)) | \
+	(STACKSIZE() - XEN_HYPER_SIZE(cpu_info)))
+#endif
+
+#define XEN_HYPER_CONRING_SIZE 16384
+
+/* system time */
+#define XEN_HYPER_NANO_TO_SEC(ns)	((ulonglong)((ns) / 1000000000ULL))
+#define XEN_HYPER_MICR_TO_SEC(us)	((ulonglong)((us) / 1000000ULL))
+#define XEN_HYPER_MILI_TO_SEC(ms)	((ulonglong)((ms) / 1000ULL))
+
+/*
+ * Domain
+ */
+/* Prepared domain ID. */
+#define XEN_HYPER_DOMID_IO		(0x7FF1U)
+#define XEN_HYPER_DOMID_XEN		(0x7FF2U)
+#define XEN_HYPER_DOMID_IDLE		(0x7FFFU)
+
+/* Domain flags (domain_flags). */
+ /* Is this domain privileged? */
+#define XEN_HYPER__DOMF_privileged       0
+#define XEN_HYPER_DOMF_privileged        (1UL<<XEN_HYPER__DOMF_privileged)
+ /* Guest shut itself down for some reason. */
+#define XEN_HYPER__DOMF_shutdown         1
+#define XEN_HYPER_DOMF_shutdown          (1UL<<XEN_HYPER__DOMF_shutdown)
+ /* Death rattle. */
+#define XEN_HYPER__DOMF_dying            2
+#define XEN_HYPER_DOMF_dying             (1UL<<XEN_HYPER__DOMF_dying)
+ /* Domain is paused by controller software. */
+#define XEN_HYPER__DOMF_ctrl_pause       3
+#define XEN_HYPER_DOMF_ctrl_pause        (1UL<<XEN_HYPER__DOMF_ctrl_pause)
+ /* Domain is being debugged by controller software. */
+#define XEN_HYPER__DOMF_debugging        4
+#define XEN_HYPER_DOMF_debugging         (1UL<<XEN_HYPER__DOMF_debugging)
+ /* Are any VCPUs polling event channels (SCHEDOP_poll)? */
+#define XEN_HYPER__DOMF_polling          5
+#define XEN_HYPER_DOMF_polling           (1UL<<XEN_HYPER__DOMF_polling)
+ /* Domain is paused by the hypervisor? */
+#define XEN_HYPER__DOMF_paused           6
+#define XEN_HYPER_DOMF_paused            (1UL<<XEN_HYPER__DOMF_paused)
+ /* Domain flag error */
+#define XEN_HYPER_DOMF_ERROR            ((ulong)(-1))
+
+/* Domain status. */
+ /* Is this an HVM guest? */
+#define XEN_HYPER__DOMS_HVM              0
+#define XEN_HYPER_DOMS_HVM               (1UL<<XEN_HYPER__DOMS_HVM)
+ /* Is this guest fully privileged (aka dom0)? */
+#define XEN_HYPER__DOMS_privileged       1
+#define XEN_HYPER_DOMS_privileged        (1UL<<XEN_HYPER__DOMS_privileged)
+ /* Is this guest being debugged by dom0? */
+#define XEN_HYPER__DOMS_debugging        2
+#define XEN_HYPER_DOMS_debugging         (1UL<<XEN_HYPER__DOMS_debugging)
+ /* Are any VCPUs polling event channels (SCHEDOP_poll)? */
+#define XEN_HYPER__DOMS_polling          3
+#define XEN_HYPER_DOMS_polling           (1UL<<XEN_HYPER__DOMS_polling)
+ /* Domain is paused by controller software? */
+#define XEN_HYPER__DOMS_ctrl_pause       4
+#define XEN_HYPER_DOMS_ctrl_pause        (1UL<<XEN_HYPER__DOMS_ctrl_pause)
+ /* Is this guest dying (i.e., a zombie)? */
+#define XEN_HYPER__DOMS_dying            5
+#define XEN_HYPER_DOMS_dying             (1UL<<XEN_HYPER__DOMS_dying)
+ /* In process of shutting down? */
+#define XEN_HYPER__DOMS_shuttingdown     6
+#define XEN_HYPER_DOMS_shuttingdown      (1UL<<XEN_HYPER__DOMS_shutdown)
+ /* Fully shut down? */
+#define XEN_HYPER__DOMS_shutdown         7
+#define XEN_HYPER_DOMS_shutdown          (1UL<<XEN_HYPER__DOMS_shutdown)
+
+/*
+ * VCPU
+ */
+/* VCPU flags (vcpu_flags). */
+ /* Has the FPU been initialised? */
+#define XEN_HYPER__VCPUF_fpu_initialised 0
+#define XEN_HYPER_VCPUF_fpu_initialised  (1UL<<XEN_HYPER__VCPUF_fpu_initialised)
+ /* Has the FPU been used since it was last saved? */
+#define XEN_HYPER__VCPUF_fpu_dirtied     1
+#define XEN_HYPER_VCPUF_fpu_dirtied      (1UL<<XEN_HYPER__VCPUF_fpu_dirtied)
+ /* Domain is blocked waiting for an event. */
+#define XEN_HYPER__VCPUF_blocked         2
+#define XEN_HYPER_VCPUF_blocked          (1UL<<XEN_HYPER__VCPUF_blocked)
+ /* Currently running on a CPU? */
+#define XEN_HYPER__VCPUF_running         3
+#define XEN_HYPER_VCPUF_running          (1UL<<XEN_HYPER__VCPUF_running)
+ /* Initialization completed. */
+#define XEN_HYPER__VCPUF_initialised     4
+#define XEN_HYPER_VCPUF_initialised      (1UL<<XEN_HYPER__VCPUF_initialised)
+ /* VCPU is offline. */
+#define XEN_HYPER__VCPUF_down            5
+#define XEN_HYPER_VCPUF_down             (1UL<<XEN_HYPER__VCPUF_down)
+ /* NMI callback pending for this VCPU? */
+#define XEN_HYPER__VCPUF_nmi_pending     8
+#define XEN_HYPER_VCPUF_nmi_pending      (1UL<<XEN_HYPER__VCPUF_nmi_pending)
+ /* Avoid NMI reentry by allowing NMIs to be masked for short periods. */
+#define XEN_HYPER__VCPUF_nmi_masked      9
+#define XEN_HYPER_VCPUF_nmi_masked       (1UL<<XEN_HYPER__VCPUF_nmi_masked)
+ /* VCPU is polling a set of event channels (SCHEDOP_poll). */
+#define XEN_HYPER__VCPUF_polling         10
+#define XEN_HYPER_VCPUF_polling          (1UL<<XEN_HYPER__VCPUF_polling)
+ /* VCPU is paused by the hypervisor? */
+#define XEN_HYPER__VCPUF_paused          11
+#define XEN_HYPER_VCPUF_paused           (1UL<<XEN_HYPER__VCPUF_paused)
+/* VCPU is blocked awaiting an event to be consumed by Xen. */
+#define XEN_HYPER__VCPUF_blocked_in_xen  12
+#define XEN_HYPER_VCPUF_blocked_in_xen   (1UL<<XEN_HYPER__VCPUF_blocked_in_xen)
+ /* VCPU flag error */
+#define XEN_HYPER_VCPUF_ERROR            ((ulong)(-1))
+
+/* VCPU state (vcpu_runstate_info.state). */
+/* VCPU is currently running on a physical CPU. */
+#define XEN_HYPER_RUNSTATE_running  0
+/* VCPU is runnable, but not currently scheduled on any physical CPU. */
+#define XEN_HYPER_RUNSTATE_runnable 1
+/* VCPU is blocked (a.k.a. idle). It is therefore not runnable. */
+#define XEN_HYPER_RUNSTATE_blocked  2
+/*
+ * VCPU is not runnable, but it is not blocked.
+ * This is a 'catch all' state for things like hotplug and pauses by the
+ * system administrator (or for critical sections in the hypervisor).
+ * RUNSTATE_blocked dominates this state (it is the preferred state).
+ */
+#define XEN_HYPER_RUNSTATE_offline  3
+#define XEN_HYPER_RUNSTATE_ERROR        ((int)(-1))
+
+/*
+ * PCPU
+ */
+#define XEN_HYPER_TSS_IST_MAX	7
+
+/*
+ * Scheduler
+ */
+#define XEN_SCHEDULER_SEDF	4
+#define XEN_SCHEDULER_CREDIT	5
+
+#define XEN_HYPER_OPT_SCHED_SIZE 10
+
+/*
+ * Constants for function
+ */
+#define XEN_HYPER_CMD_BUFSIZE (1024)
+
+#define XEN_HYPER_DOMAIN_ID_INVALID ((uint16_t)(-1))
+#define XEN_HYPER_VCPU_ID_INVALID ((int)(-1))
+#define XEN_HYPER_PCPU_ID_INVALID ((uint)(-1))
+
+#define XEN_HYPER_DOMAIN_READ_DOM0 0
+#define XEN_HYPER_DOMAIN_READ_INIT 1
+#define XEN_HYPER_DOMAIN_READ_NEXT 2
+
+#define XEN_HYPER_DOMAIN_FLAGS_PRIV 0
+#define XEN_HYPER_DOMAIN_FLAGS_STAT 1
+
+#define XEN_HYPER_STR_ADDR	(0x1)
+#define XEN_HYPER_STR_DID	(0x11)
+#define XEN_HYPER_STR_DOMAIN	(0x12)
+#define XEN_HYPER_STR_VCID	(0x21)
+#define XEN_HYPER_STR_VCPU	(0x22)
+#define XEN_HYPER_STR_PCID	(0x31)
+#define XEN_HYPER_STR_PCPU	(0x32)
+#define XEN_HYPER_STR_INVALID	(-1)
+
+#define XEN_HYPER_DOMAIN_TYPE_IO	(0x0)
+#define XEN_HYPER_DOMAIN_TYPE_XEN	(0x1)
+#define XEN_HYPER_DOMAIN_TYPE_IDLE	(0x2)
+#define XEN_HYPER_DOMAIN_TYPE_DOM0	(0x3)
+#define XEN_HYPER_DOMAIN_TYPE_GUEST	(0x4)
+#define XEN_HYPER_DOMAIN_TYPE_INVALID	(-1)
+
+#define XEN_HYPER_ELF_NOTE_FILL_T_NOTE		(0)
+#define XEN_HYPER_ELF_NOTE_FILL_T_CORE		(1)
+#define XEN_HYPER_ELF_NOTE_FILL_T_XEN_CORE	(2)
+#define XEN_HYPER_ELF_NOTE_FILL_T_XEN_CORE_M	(3)
+#define XEN_HYPER_ELF_NOTE_FILL_T_PRS		(4)
+#define XEN_HYPER_ELF_NOTE_FILL_T_XEN_REGS	(5)
+
+/*
+ * Command interface structs
+ */
+#define XEN_HYPER_MAX_ARGS 100
+
+struct xen_hyper_cmd_args {
+	int cnt;
+	ulong value[XEN_HYPER_MAX_ARGS];
+	int type[XEN_HYPER_MAX_ARGS];
+	ulong addr[XEN_HYPER_MAX_ARGS];
+	void *context[XEN_HYPER_MAX_ARGS];
+};
+
+/*
+ * dump information command
+ */
+/* options */
+#define XEN_HYPER_DUMPINFO_TIME (0x1)
+#define XEN_HYPER_DUMPINFO_REGS (0x2)
+
+/*
+ * Domain command
+ */
+#define XEN_HYPER_MAX_DOMS_ARGS XEN_HYPER_MAX_ARGS
+
+/*
+ * Physical cpu command
+ */
+#define XEN_HYPER_MAX_PCPUS_ARGS XEN_HYPER_MAX_ARGS
+#define XEN_HYPER_PCPUS_1STCALL (0x1)
+#define XEN_HYPER_PCPUS_REGS (0x2)
+#define XEN_HYPER_PCPUS_TSS (0x4)
+
+/*
+ * Schedule command
+ */
+#define XEN_HYPER_MAX_SCHED_ARGS XEN_HYPER_MAX_ARGS
+#define XEN_HYPER_SCHED_1STCALL (0x1)
+#define XEN_HYPER_SCHED_VERBOSE (0x2)
+
+/*
+ * Virtual cpu command
+ */
+#define XEN_HYPER_MAX_VCPUS_ARGS XEN_HYPER_MAX_ARGS
+#define XEN_HYPER_VCPUS_ID (0x1)
+
+
+/*
+ * table structs
+ */
+struct xen_hyper_machdep_table {
+	void (*pcpu_init)(void);
+#ifdef IA64
+	long frame_table;
+#endif
+};
+
+struct xen_hyper_table {
+	ulong flags;
+	ulong stext;
+	ulong etext;
+	ulong cpu_data_address;
+	struct new_utsname utsname;
+	uint cpu_curr;
+	uint max_cpus;			/* max cpu in system */
+	int cores;			/* number of cpu core */
+	int pcpus;			/* number of physical cpu */
+	int vcpus;			/* number of virtual cpu */
+	int domains;			/* number of domain */
+	ulong sys_pages;
+	int crashing_cpu;
+	struct xen_hyper_vcpu_context *crashing_vcc;
+	ulong max_page;
+	ulong total_pages;
+	ulong *cpumask;
+	uint *cpu_idxs;
+	ulong *__per_cpu_offset;
+	int percpu_shift;
+	int idle_vcpu_size;
+	ulong *idle_vcpu_array;
+};
+
+struct xen_hyper_dumpinfo_context {
+	ulong note;			/* per_cpu__crash_notes address */
+	uint pcpu_id;
+	char *ELF_Prstatus_ptr;		/* pointer to ELF_Prstatus buf */
+	char *pr_reg_ptr;		/* pointer to pr_reg buf */
+};
+
+struct xen_hyper_dumpinfo_context_xen_core {
+	ulong note;			/* per_cpu__crash_notes v3:xen_regs address */
+	uint pcpu_id;
+	char *crash_xen_core_ptr;	/* pointer to crash_xen_core_t buf */
+};
+
+struct xen_hyper_dumpinfo_context_xen_info {
+	ulong note;			/* per_cpu__crash_notes v2:xen, v3:xen_info address */
+	uint pcpu_id;
+	char *crash_xen_info_ptr;	/* pointer to v2:xen_crash_xen_regs_t, v3:crash_xen_info_t buf */
+};
+
+struct xen_hyper_dumpinfo_table {
+	uint note_ver;
+	struct xen_hyper_dumpinfo_context *context_array;
+	struct xen_hyper_dumpinfo_context_xen_core *context_xen_core_array;
+	struct xen_hyper_dumpinfo_context_xen_info context_xen_info;
+	char *crash_note_core_array;
+	char *crash_note_xen_core_array;
+	char *crash_note_xen_info_ptr;
+	uint xen_info_cpu;
+	Elf_Word note_size;
+	Elf_Word core_offset;
+	Elf_Word core_size;
+	Elf_Word xen_core_offset;
+	Elf_Word xen_core_size;
+	Elf_Word xen_info_offset;
+	Elf_Word xen_info_size;
+};
+
+/* domain */
+struct xen_hyper_domain_context {
+	ulong domain;			/* domain address */
+	domid_t domain_id;
+	uint tot_pages;
+	uint max_pages;
+	uint xenheap_pages;
+	ulong shared_info;
+	ulong sched_priv;
+	ulong next_in_list;
+	ulong domain_flags;
+	ulong evtchn;
+	int vcpu_cnt;
+	ulong vcpu[XEN_HYPER_MAX_VIRT_CPUS];
+	struct xen_hyper_vcpu_context_array *vcpu_context_array;
+};
+
+struct xen_hyper_domain_table {
+	uint32_t flags;
+	struct xen_hyper_domain_context *context_array;
+	int context_array_cnt;
+	ulong running_domains;
+	struct xen_hyper_domain_context *dom_io;
+	struct xen_hyper_domain_context *dom_xen;
+	struct xen_hyper_domain_context *dom0;
+	struct xen_hyper_domain_context *idle_domain;
+	struct xen_hyper_domain_context *curr_domain;
+	struct xen_hyper_domain_context *last;
+	char *domain_struct;
+	char *domain_struct_verify;
+};
+
+/* vcpu */
+struct xen_hyper_vcpu_context {
+	ulong vcpu;			/* vcpu address */
+	int vcpu_id;
+	int processor;
+	ulong vcpu_info;
+	ulong domain;
+	ulong next_in_list;
+	ulong sleep_tick;
+	ulong sched_priv;
+	int state;
+	uint64_t state_entry_time;
+	ulong runstate_guest;
+	ulong vcpu_flags;
+};
+
+struct xen_hyper_vcpu_context_array {
+	struct xen_hyper_vcpu_context *context_array;
+	int context_array_cnt;
+	int context_array_valid;
+};
+
+struct xen_hyper_vcpu_table {
+	uint32_t flags;
+	struct xen_hyper_vcpu_context_array *vcpu_context_arrays;
+	int vcpu_context_arrays_cnt;
+	ulong idle_vcpu;
+	struct xen_hyper_vcpu_context_array *idle_vcpu_context_array;
+	struct xen_hyper_vcpu_context *last;
+	char *vcpu_struct;
+	char *vcpu_struct_verify;
+};
+
+/* pcpu */
+struct xen_hyper_pcpu_context {
+	/* pcpu info */
+	ulong pcpu;			/* pcpu address */
+	uint processor_id;
+	ulong guest_cpu_user_regs;
+	ulong current_vcpu;
+	/* tss_struct info */
+	ulong init_tss;
+	union {
+		uint32_t esp0;
+		uint64_t rsp0;	
+	} sp;
+	uint64_t ist[XEN_HYPER_TSS_IST_MAX];	/* This is valid on x86_64 */
+};
+
+struct xen_hyper_pcpu_table {
+	struct xen_hyper_pcpu_context *context_array;
+	struct xen_hyper_pcpu_context *last;
+	char *pcpu_struct;
+};
+
+/* scheduler */
+struct xen_hyper_sched_context {
+	uint cpu_id;
+	ulong schedule_data;
+	ulong curr;
+	ulong idle;
+	ulong sched_priv;
+	ulong tick;
+};
+
+struct xen_hyper_sched_table {
+	char *name;
+	char opt_sched[XEN_HYPER_OPT_SCHED_SIZE];
+	uint sched_id;
+	ulong scheduler;
+	char *scheduler_struct;
+	struct xen_hyper_sched_context *sched_context_array;
+};
+
+struct syment;
+
+struct xen_hyper_symbol_table_data {
+	struct syment *symtable;
+};
+
+struct xen_hyper_size_table {
+	long ELF_Prstatus;			/* elf note v1,v2,v3,v4 */
+	long ELF_Signifo;
+	long ELF_Gregset;
+	long ELF_Timeval;
+	long arch_domain;
+	long arch_shared_info;
+	long cpu_info;
+	long cpu_time;
+	long cpu_user_regs;
+	long cpumask_t;
+	long cpuinfo_ia64;
+	long cpuinfo_x86;
+	long crash_note_t;			/* elf note v2, v3 */
+	long crash_note_core_t;			/* elf note v2, v3 */
+	long crash_note_xen_t;			/* elf note v2 */
+	long crash_note_xen_core_t;		/* elf note v3 */
+	long crash_note_xen_info_t;		/* elf note v3 */
+	long crash_xen_core_t;			/* elf note v3,v4 */
+	long crash_xen_info_t;			/* elf note v3,v4 */
+	long domain;
+#ifdef IA64
+	long mm_struct;
+#endif
+	long note_buf_t;			/* elf note v1 */
+	long schedule_data;
+	long scheduler;
+	long shared_info;
+	long timer;
+	long tss_struct;
+	long vcpu;
+	long vcpu_runstate_info;
+	long xen_crash_xen_regs_t;		/* elf note v2 */
+};
+
+struct xen_hyper_offset_table {
+	/* ELF */
+	long ELF_Prstatus_pr_info;
+	long ELF_Prstatus_pr_cursig;
+	long ELF_Prstatus_pr_sigpend;
+	long ELF_Prstatus_pr_sighold;
+	long ELF_Prstatus_pr_pid;
+	long ELF_Prstatus_pr_ppid;
+	long ELF_Prstatus_pr_pgrp;
+	long ELF_Prstatus_pr_sid;
+	long ELF_Prstatus_pr_utime;
+	long ELF_Prstatus_pr_stime;
+	long ELF_Prstatus_pr_cutime;
+	long ELF_Prstatus_pr_cstime;
+	long ELF_Prstatus_pr_reg;
+	long ELF_Prstatus_pr_fpvalid;
+	long ELF_Timeval_tv_sec;
+	long ELF_Timeval_tv_usec;
+	/* arch_domain */
+#ifdef IA64
+	long arch_domain_mm;
+#endif
+	/* arch_shared_info */
+	long arch_shared_info_max_pfn;
+	long arch_shared_info_pfn_to_mfn_frame_list_list;
+	long arch_shared_info_nmi_reason;
+	/* cpu_info */
+	long cpu_info_guest_cpu_user_regs;
+	long cpu_info_processor_id;
+	long cpu_info_current_vcpu;
+	/* cpu_time */
+	long cpu_time_local_tsc_stamp;
+	long cpu_time_stime_local_stamp;
+	long cpu_time_stime_master_stamp;
+	long cpu_time_tsc_scale;
+	long cpu_time_calibration_timer;
+	/* cpuinfo_ia64 */
+	long cpuinfo_ia64_proc_freq;
+	long cpuinfo_ia64_vendor;
+	/* crash_note_t */
+	long crash_note_t_core;			/* elf note v2, v3 */
+	long crash_note_t_xen;			/* elf note v2 */
+	long crash_note_t_xen_regs;		/* elf note v3 */
+	long crash_note_t_xen_info;		/* elf note v3 */
+	/* crash_note_core_t elf note v2, v3 */
+	long crash_note_core_t_note;
+	long crash_note_core_t_desc;
+	/* crash_note_xen_t elf note v2 */
+	long crash_note_xen_t_note;
+	long crash_note_xen_t_desc;
+	/* crash_note_xen_core_t elf note v3 */
+	long crash_note_xen_core_t_note;
+	long crash_note_xen_core_t_desc;
+	/* crash_note_xen_info_t elf note v3 */
+	long crash_note_xen_info_t_note;
+	long crash_note_xen_info_t_desc;
+	/* domain */
+	long domain_page_list;
+	long domain_xenpage_list;
+	long domain_domain_id;
+	long domain_tot_pages;
+	long domain_max_pages;
+	long domain_xenheap_pages;
+	long domain_shared_info;
+	long domain_sched_priv;
+	long domain_next_in_list;
+	long domain_domain_flags;
+	long domain_evtchn;
+	long domain_is_hvm;
+	long domain_is_privileged;
+	long domain_debugger_attached;
+	long domain_is_polling;
+	long domain_is_dying;
+	long domain_is_paused_by_controller;
+	long domain_is_shutting_down;
+	long domain_is_shut_down;
+	long domain_vcpu;
+	long domain_arch;
+#ifdef IA64
+	/* mm_struct */
+	long mm_struct_pgd;
+#endif
+	/* schedule_data */
+	long schedule_data_schedule_lock;
+	long schedule_data_curr;
+	long schedule_data_idle;
+	long schedule_data_sched_priv;
+	long schedule_data_s_timer;
+	long schedule_data_tick;
+	/* scheduler */
+	long scheduler_name;
+	long scheduler_opt_name;
+	long scheduler_sched_id;
+	long scheduler_init;
+	long scheduler_tick;
+	long scheduler_init_vcpu;
+	long scheduler_destroy_domain;
+	long scheduler_sleep;
+	long scheduler_wake;
+	long scheduler_set_affinity;
+	long scheduler_do_schedule;
+	long scheduler_adjust;
+	long scheduler_dump_settings;
+	long scheduler_dump_cpu_state;
+	/* shared_info */
+	long shared_info_vcpu_info;
+	long shared_info_evtchn_pending;
+	long shared_info_evtchn_mask;
+	long shared_info_arch;
+	/* timer */
+	long timer_expires;
+	long timer_cpu;
+	long timer_function;
+	long timer_data;
+	long timer_heap_offset;
+	long timer_killed;
+	/* tss */
+	long tss_struct_rsp0;
+	long tss_struct_esp0;
+	long tss_struct_ist;
+	/* vcpu */
+	long vcpu_vcpu_id;
+	long vcpu_processor;
+	long vcpu_vcpu_info;
+	long vcpu_domain;
+	long vcpu_next_in_list;
+	long vcpu_timer;
+	long vcpu_sleep_tick;
+	long vcpu_poll_timer;
+	long vcpu_sched_priv;
+	long vcpu_runstate;
+	long vcpu_runstate_guest;
+	long vcpu_vcpu_flags;
+	long vcpu_pause_count;
+	long vcpu_virq_to_evtchn;
+	long vcpu_cpu_affinity;
+	long vcpu_nmi_addr;
+	long vcpu_vcpu_dirty_cpumask;
+	long vcpu_arch;
+#ifdef IA64
+	long vcpu_thread_ksp;
+#endif
+	/* vcpu_runstate_info */
+	long vcpu_runstate_info_state;
+	long vcpu_runstate_info_state_entry_time;
+	long vcpu_runstate_info_time;
+};
+
+/*
+ * offset, size
+ */
+#define XEN_HYPER_SIZE(X)		(SIZE_verify(xen_hyper_size_table.X, (char *)__FUNCTION__, __FILE__, __LINE__, #X))
+#define XEN_HYPER_OFFSET(X)		(OFFSET_verify(xen_hyper_offset_table.X, (char *)__FUNCTION__, __FILE__, __LINE__, #X))
+#define XEN_HYPER_INVALID_MEMBER(X)	(xen_hyper_offset_table.X == INVALID_OFFSET)
+#define XEN_HYPER_INVALID_SIZE(X)	(xen_hyper_size_table.X == -1)
+#define XEN_HYPER_VALID_SIZE(X)		(xen_hyper_size_table.X >= 0)
+#define XEN_HYPER_VALID_STRUCT(X)	(xen_hyper_size_table.X >= 0)
+#define XEN_HYPER_VALID_MEMBER(X)	(xen_hyper_offset_table.X >= 0)
+
+#define XEN_HYPER_ASSIGN_SIZE(X)	(xen_hyper_size_table.X)
+#define XEN_HYPER_ASSIGN_OFFSET(X)	(xen_hyper_offset_table.X)
+
+#define XEN_HYPER_STRUCT_SIZE_INIT(X, Y) (XEN_HYPER_ASSIGN_SIZE(X) = STRUCT_SIZE(Y))
+#define XEN_HYPER_MEMBER_SIZE_INIT(X, Y, Z) (XEN_HYPER_ASSIGN_SIZE(X) = MEMBER_SIZE(Y, Z))
+#define XEN_HYPER_MEMBER_OFFSET_INIT(X, Y, Z) (XEN_HYPER_ASSIGN_OFFSET(X) = MEMBER_OFFSET(Y, Z))
+
+/*
+ * System
+ */
+#define XEN_HYPER_MAX_CPUS() (xht->max_cpus)
+#define XEN_HYPER_CRASHING_CPU() (xht->crashing_cpu)
+
+/*
+ * Dump information
+ */
+#define XEN_HYPER_X86_NOTE_EIP(regs) (regs[12])
+#define XEN_HYPER_X86_NOTE_ESP(regs) (regs[15])
+#define XEN_HYPER_X86_64_NOTE_RIP(regs) (regs[16])
+#define XEN_HYPER_X86_64_NOTE_RSP(regs) (regs[19])
+
+/*
+ * Domain
+ */
+#define XEN_HYPER_DOMAIN_F_INIT 0x1
+
+#define XEN_HYPER_NR_DOMAINS() (xht->domains)
+#define XEN_HYPER_RUNNING_DOMAINS() (xhdt->running_domains)
+
+/*
+ * Phisycal CPU
+ */
+#define XEN_HYPER_NR_PCPUS() (xht->pcpus)
+#define for_cpu_indexes(i, cpuid)		\
+	for (i = 0, cpuid = xht->cpu_idxs[i];	\
+	i < XEN_HYPER_NR_PCPUS();		\
+	cpuid = xht->cpu_idxs[++i])
+#define XEN_HYPER_CURR_VCPU(pcpuid) \
+	(xen_hyper_get_active_vcpu_from_pcpuid(pcpuid))
+
+/*
+ * VCPU
+ */
+#define XEN_HYPER_VCPU_F_INIT 0x1
+
+#define XEN_HYPER_NR_VCPUS_IN_DOM(domain_context) (domain_context->vcpu_cnt)
+#define XEN_HYPER_VCPU_LAST_CONTEXT()	(xhvct->last)
+
+/*
+ * tools
+ */
+#define XEN_HYPER_PRI(fp, len, str, buf, flag, args)	\
+	sprintf args;				\
+	xen_hyper_fpr_indent(fp, len, str, buf, flag);
+#define XEN_HYPER_PRI_CONST(fp, len, str, flag)	\
+	xen_hyper_fpr_indent(fp, len, str, NULL, flag);
+
+#define XEN_HYPER_PRI_L		(0x0)
+#define XEN_HYPER_PRI_R		(0x1)
+#define XEN_HYPER_PRI_LF	(0x2)
+
+/*
+ * Global data
+ */
+extern struct xen_hyper_machdep_table *xhmachdep;
+extern struct xen_hyper_table *xht;
+extern struct xen_hyper_dumpinfo_table *xhdit;
+extern struct xen_hyper_domain_table *xhdt;
+extern struct xen_hyper_vcpu_table *xhvct;
+extern struct xen_hyper_pcpu_table *xhpct;
+extern struct xen_hyper_sched_table *xhscht;
+extern struct xen_hyper_symbol_table_data *xhsymt;
+
+extern struct xen_hyper_offset_table xen_hyper_offset_table;
+extern struct xen_hyper_size_table xen_hyper_size_table;
+
+extern struct command_table_entry xen_hyper_command_table[];
+extern struct task_context fake_tc;
+
+/*
+ * Xen Hyper command help
+ */
+extern char *xen_hyper_help_domain[];
+extern char *xen_hyper_help_doms[];
+extern char *xen_hyper_help_dumpinfo[];
+extern char *xen_hyper_help_log[];
+extern char *xen_hyper_help_pcpus[];
+extern char *xen_hyper_help_sched[];
+extern char *xen_hyper_help_sys[];
+extern char *xen_hyper_help_vcpu[];
+extern char *xen_hyper_help_vcpus[];
+
+/*
+ * Prototype
+ */
+ulonglong xen_hyper_get_uptime_hyper(void);
+
+/*
+ * x86
+ */
+int xen_hyper_x86_get_smp_cpus(void);
+uint64_t xen_hyper_x86_memory_size(void);
+
+/*
+ * IA64
+ */
+int xen_hyper_ia64_get_smp_cpus(void);
+uint64_t xen_hyper_ia64_memory_size(void);
+ulong xen_hyper_ia64_processor_speed(void);
+
+/*
+ * Xen Hyper
+ */
+void xen_hyper_init(void);
+void xen_hyper_domain_init(void);
+void xen_hyper_vcpu_init(void);
+void xen_hyper_dumpinfo_init(void);
+void xen_hyper_misc_init(void);
+void xen_hyper_post_init(void);
+struct xen_hyper_dumpinfo_context *xen_hyper_id_to_dumpinfo_context(uint id);
+struct xen_hyper_dumpinfo_context *xen_hyper_note_to_dumpinfo_context(ulong note);
+char *xen_hyper_fill_elf_notes(ulong note, char *note_buf, int type);
+
+/* domain */
+void xen_hyper_refresh_domain_context_space(void);
+int xen_hyper_get_domains(void);
+char *xen_hyper_get_domain_next(int mod, ulong *next);
+domid_t xen_hyper_domain_to_id(ulong domain);
+char *xen_hyper_id_to_domain_struct(domid_t id);
+struct xen_hyper_domain_context *
+xen_hyper_domain_to_domain_context(ulong domain);
+struct xen_hyper_domain_context *
+xen_hyper_id_to_domain_context(domid_t id);
+struct xen_hyper_domain_context *
+xen_hyper_store_domain_context(struct xen_hyper_domain_context *dc,
+		ulong domain, char *dp);
+char *xen_hyper_read_domain_from_context(struct xen_hyper_domain_context *dc);
+char *xen_hyper_read_domain(ulong domain);
+char *xen_hyper_read_domain_verify(ulong domain);
+char *xen_hyper_fill_domain_struct(ulong domain, char *domain_struct);
+void xen_hyper_alloc_domain_context_space(int domains);
+ulong xen_hyper_domain_state(struct xen_hyper_domain_context *dc);
+
+/* vcpu */
+void xen_hyper_refresh_vcpu_context_space(void);
+struct xen_hyper_vcpu_context *
+xen_hyper_vcpu_to_vcpu_context(ulong vcpu);
+struct xen_hyper_vcpu_context *
+xen_hyper_id_to_vcpu_context(ulong domain, domid_t did, int vcid);
+struct xen_hyper_vcpu_context_array *
+xen_hyper_domain_to_vcpu_context_array(ulong domain);
+struct xen_hyper_vcpu_context_array *
+xen_hyper_domid_to_vcpu_context_array(domid_t id);
+struct xen_hyper_vcpu_context *
+xen_hyper_store_vcpu_context(struct xen_hyper_vcpu_context *vcc,
+	ulong vcpu, char *vcp);
+char *
+xen_hyper_read_vcpu_from_context(struct xen_hyper_vcpu_context *vcc);
+char *xen_hyper_read_vcpu(ulong vcpu);
+char *xen_hyper_read_vcpu_verify(ulong vcpu);
+char *xen_hyper_fill_vcpu_struct(ulong vcpu, char *vcpu_struct);
+void xen_hyper_alloc_vcpu_context_arrays_space(int domains);
+void xen_hyper_alloc_vcpu_context_space(struct xen_hyper_vcpu_context_array *vcca, int vcpus);
+int xen_hyper_vcpu_state(struct xen_hyper_vcpu_context *vcc);
+
+/* pcpu */
+#if defined(X86) || defined(X86_64)
+void xen_hyper_x86_pcpu_init(void);
+#elif defined(IA64)
+void xen_hyper_ia64_pcpu_init(void);
+#endif
+struct xen_hyper_pcpu_context *xen_hyper_id_to_pcpu_context(uint id);
+struct xen_hyper_pcpu_context *xen_hyper_pcpu_to_pcpu_context(ulong pcpu);
+struct xen_hyper_pcpu_context *xen_hyper_store_pcpu_context(struct xen_hyper_pcpu_context *pcc,
+	ulong pcpu, char *pcp);
+struct xen_hyper_pcpu_context *xen_hyper_store_pcpu_context_tss(struct xen_hyper_pcpu_context *pcc,
+	ulong init_tss, char *tss);
+char *xen_hyper_read_pcpu(ulong pcpu);
+char *xen_hyper_fill_pcpu_struct(ulong pcpu, char *pcpu_struct);
+void xen_hyper_alloc_pcpu_context_space(int pcpus);
+
+/* others */
+char *xen_hyper_x86_fill_cpu_data(int idx, char *cpuinfo_x86);
+char *xen_hyper_ia64_fill_cpu_data(int idx, char *cpuinfo_ia64);
+int xen_hyper_is_vcpu_crash(struct xen_hyper_vcpu_context *vcc);
+void xen_hyper_print_bt_header(FILE *out, ulong pcpu, int newline);
+ulong xen_hyper_get_active_vcpu_from_pcpuid(ulong pcpu);
+ulong xen_hyper_pcpu_to_active_vcpu(ulong pcpu);
+void xen_hyper_get_cpu_info(void);
+int xen_hyper_test_pcpu_id(uint pcpu_id);
+
+/*
+ * Xen Hyper command
+ */
+void xen_hyper_cmd_help(void);
+void xen_hyper_cmd_domain(void);
+void xen_hyper_cmd_doms(void);
+void xen_hyper_cmd_dumpinfo(void);
+void xen_hyper_cmd_log(void);
+void xen_hyper_dump_log(void);
+void xen_hyper_cmd_pcpus(void);
+void xen_hyper_cmd_sched(void);
+void xen_hyper_cmd_sys(void);
+void xen_hyper_cmd_vcpu(void);
+void xen_hyper_cmd_vcpus(void);
+void xen_hyper_display_sys_stats(void);
+
+void xen_hyper_show_vcpu_context(struct xen_hyper_vcpu_context *vcc);
+char *xen_hyper_domain_state_string(struct xen_hyper_domain_context *dc,
+	char *buf, int verbose);
+char *xen_hyper_vcpu_state_string(struct xen_hyper_vcpu_context *vcc,
+	char *buf, int verbose);
+
+/* tools */
+void xen_hyper_fpr_indent(FILE *fp, int len, char *str1, char *str2, int flag);
+
+#else
+
+#define XEN_HYPERVISOR_NOT_SUPPORTED \
+    "Xen hypervisor mode not supported on this architecture\n"
+
+#endif
--- crash/x86_64.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/x86_64.c	2009-01-26 14:56:02.000000000 -0500
@@ -1,7 +1,7 @@
 /* x86_64.c -- core analysis suite
  *
- * Copyright (C) 2004, 2005 David Anderson
- * Copyright (C) 2004, 2005 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009 David Anderson
+ * Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009 Red Hat, Inc. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -14,11 +14,16 @@
  * GNU General Public License for more details.
  */
 #include "defs.h"
+#include "xen_hyper_defs.h"
 
 #ifdef X86_64
 
 static int x86_64_kvtop(struct task_context *, ulong, physaddr_t *, int);
+static int x86_64_kvtop_xen_wpt(struct task_context *, ulong, physaddr_t *, int);
 static int x86_64_uvtop(struct task_context *, ulong, physaddr_t *, int);
+static int x86_64_uvtop_level4(struct task_context *, ulong, physaddr_t *, int);
+static int x86_64_uvtop_level4_xen_wpt(struct task_context *, ulong, physaddr_t *, int);
+static int x86_64_uvtop_level4_rhel4_xen_wpt(struct task_context *, ulong, physaddr_t *, int);
 static ulong x86_64_vmalloc_start(void);
 static int x86_64_is_task_addr(ulong);
 static int x86_64_verify_symbol(const char *, ulong, char);
@@ -32,14 +37,17 @@
 #define EFRAME_VERIFY (0x2)
 #define EFRAME_CS     (0x4)
 #define EFRAME_SEARCH (0x8)
+static int x86_64_print_eframe_location(ulong, int, FILE *);
 static void x86_64_back_trace_cmd(struct bt_info *);
 static ulong x86_64_in_exception_stack(struct bt_info *);
 static ulong x86_64_in_irqstack(struct bt_info *);
 static void x86_64_low_budget_back_trace_cmd(struct bt_info *);
+static void x86_64_dwarf_back_trace_cmd(struct bt_info *);
 static void x86_64_get_dumpfile_stack_frame(struct bt_info *, ulong *, ulong *);
 static struct syment *x86_64_function_called_by(ulong);
 static int is_direct_call_target(struct bt_info *);
 static void get_x86_64_frame(struct bt_info *, ulong *, ulong *);
+static ulong text_lock_function(char *, struct bt_info *, ulong);
 static int x86_64_print_stack_entry(struct bt_info *, FILE *, int, int, ulong);
 static void x86_64_display_full_frame(struct bt_info *, ulong, FILE *);
 static void x86_64_do_bt_reference_check(struct bt_info *, ulong,char *);
@@ -56,6 +64,8 @@
 static void x86_64_display_memmap(void);
 static void x86_64_dump_line_number(ulong);
 static struct line_number_hook x86_64_line_number_hooks[];
+static void x86_64_calc_phys_base(void);
+static int x86_64_is_module_addr(ulong);
 static int x86_64_is_kvaddr(ulong);
 static int x86_64_is_uvaddr(ulong, struct task_context *);
 void x86_64_compiler_warning_stub(void);
@@ -63,7 +73,25 @@
 static void x86_64_cpu_pda_init(void);
 static void x86_64_ist_init(void);
 static void x86_64_post_init(void);
-
+static void parse_cmdline_arg(void);
+static void x86_64_clear_machdep_cache(void);
+static void x86_64_irq_eframe_link_init(void);
+static int x86_64_xendump_p2m_create(struct xendump_data *);
+static char *x86_64_xendump_load_page(ulong, struct xendump_data *);
+static int x86_64_xendump_page_index(ulong, struct xendump_data *);
+static int x86_64_xen_kdump_p2m_create(struct xen_kdump_data *);
+static char *x86_64_xen_kdump_load_page(ulong, char *);
+static ulong x86_64_xen_kdump_page_mfn(ulong);
+static void x86_64_debug_dump_page(FILE *, char *, char *);
+static void x86_64_get_xendump_regs(struct xendump_data *, struct bt_info *, ulong *, ulong *);
+static ulong x86_64_xendump_panic_task(struct xendump_data *);
+static void x86_64_init_hyper(int);
+static ulong x86_64_get_stackbase_hyper(ulong);
+static ulong x86_64_get_stacktop_hyper(ulong);
+static int x86_64_framesize_cache_resize(void);
+static int x86_64_framesize_cache_func(int, ulong, int *);
+static int x86_64_get_framesize(struct bt_info *, ulong);
+static void x86_64_framesize_debug(struct bt_info *);
 
 struct machine_specific x86_64_machine_specific = { 0 };
 
@@ -74,6 +102,11 @@
 void
 x86_64_init(int when)
 {
+        if (XEN_HYPER_MODE()) {
+                x86_64_init_hyper(when);
+                return;
+        }
+
 	switch (when)
 	{
 	case PRE_SYMTAB:
@@ -86,6 +119,8 @@
                 machdep->pageoffset = machdep->pagesize - 1;
                 machdep->pagemask = ~((ulonglong)machdep->pageoffset);
 		machdep->stacksize = machdep->pagesize * 2;
+                if ((machdep->machspec->upml = (char *)malloc(PAGESIZE())) == NULL)
+                        error(FATAL, "cannot malloc upml space.");
                 if ((machdep->pgd = (char *)malloc(PAGESIZE())) == NULL)
                         error(FATAL, "cannot malloc pgd space.");
                 if ((machdep->pmd = (char *)malloc(PAGESIZE())) == NULL)
@@ -93,17 +128,99 @@
                 if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL)
                         error(FATAL, "cannot malloc ptbl space.");
 		if ((machdep->machspec->pml4 = 
-			(char *)malloc(PAGESIZE())) == NULL)
+			(char *)malloc(PAGESIZE()*2)) == NULL)
                         error(FATAL, "cannot malloc pml4 space.");
+                machdep->machspec->last_upml_read = 0;
+                machdep->machspec->last_pml4_read = 0;
                 machdep->last_pgd_read = 0;
                 machdep->last_pmd_read = 0;
                 machdep->last_ptbl_read = 0;
 		machdep->verify_paddr = generic_verify_paddr;
 		machdep->ptrs_per_pgd = PTRS_PER_PGD;
 		machdep->flags |= MACHDEP_BT_TEXT;
+		machdep->flags |= FRAMESIZE_DEBUG;
+		machdep->machspec->irq_eframe_link = UNINITIALIZED;
+                if (machdep->cmdline_arg)
+                        parse_cmdline_arg();
 		break;
 
 	case PRE_GDB:
+		if (!(machdep->flags & VM_FLAGS)) {
+			if (symbol_exists("xen_start_info")) {
+				if (PVOPS())
+					machdep->flags |= VM_2_6_11;
+				else if (symbol_exists("low_pml4") && 
+				    symbol_exists("swap_low_mappings"))
+					machdep->flags |= VM_XEN_RHEL4;
+				else
+					machdep->flags |= VM_XEN;
+			} else if (symbol_exists("boot_vmalloc_pgt"))
+				machdep->flags |= VM_ORIG;
+			else
+				machdep->flags |= VM_2_6_11;
+		}
+
+		switch (machdep->flags & VM_FLAGS) 
+		{
+		case VM_ORIG:
+		        /* pre-2.6.11 layout */
+                        machdep->machspec->userspace_top = USERSPACE_TOP_ORIG;
+                        machdep->machspec->page_offset = PAGE_OFFSET_ORIG;
+                        machdep->machspec->vmalloc_start_addr = VMALLOC_START_ADDR_ORIG;
+                        machdep->machspec->vmalloc_end = VMALLOC_END_ORIG;
+                        machdep->machspec->modules_vaddr = MODULES_VADDR_ORIG;
+                        machdep->machspec->modules_end = MODULES_END_ORIG;
+
+			free(machdep->machspec->upml);
+			machdep->machspec->upml = NULL;
+
+	        	machdep->uvtop = x86_64_uvtop;
+			break;
+		
+		case VM_2_6_11:
+			/* 2.6.11 layout */
+			machdep->machspec->userspace_top = USERSPACE_TOP_2_6_11;
+			machdep->machspec->vmalloc_start_addr = VMALLOC_START_ADDR_2_6_11;
+			machdep->machspec->vmalloc_end = VMALLOC_END_2_6_11;
+			machdep->machspec->modules_vaddr = MODULES_VADDR_2_6_11;
+			machdep->machspec->modules_end = MODULES_END_2_6_11;
+
+			/* 2.6.24 layout */
+			machdep->machspec->vmemmap_vaddr = VMEMMAP_VADDR_2_6_24;
+			machdep->machspec->vmemmap_end = VMEMMAP_END_2_6_24;
+			if (symbol_exists("vmemmap_populate"))
+				machdep->flags |= VMEMMAP;
+
+			if (kernel_symbol_exists("end_pfn"))
+				/* 2.6.11 layout */
+				machdep->machspec->page_offset = PAGE_OFFSET_2_6_11;
+			else
+				/* 2.6.27 layout */
+				machdep->machspec->page_offset = PAGE_OFFSET_2_6_27;
+
+	        	machdep->uvtop = x86_64_uvtop_level4;
+			break;
+
+                case VM_XEN:
+                        /* Xen layout */
+                        machdep->machspec->userspace_top = USERSPACE_TOP_XEN;
+                        machdep->machspec->page_offset = PAGE_OFFSET_XEN;
+                        machdep->machspec->vmalloc_start_addr = VMALLOC_START_ADDR_XEN;
+                        machdep->machspec->vmalloc_end = VMALLOC_END_XEN;
+                        machdep->machspec->modules_vaddr = MODULES_VADDR_XEN;
+                        machdep->machspec->modules_end = MODULES_END_XEN;
+                        break;
+
+		case VM_XEN_RHEL4:
+			/* RHEL4 Xen layout */
+                        machdep->machspec->userspace_top = USERSPACE_TOP_XEN_RHEL4;
+                        machdep->machspec->page_offset = PAGE_OFFSET_XEN_RHEL4;
+                        machdep->machspec->vmalloc_start_addr = VMALLOC_START_ADDR_XEN_RHEL4;
+                        machdep->machspec->vmalloc_end = VMALLOC_END_XEN_RHEL4;
+                        machdep->machspec->modules_vaddr = MODULES_VADDR_XEN_RHEL4;
+                        machdep->machspec->modules_end = MODULES_END_XEN_RHEL4;
+			break;
+		}
 	        machdep->kvbase = (ulong)PAGE_OFFSET;
 		machdep->identity_map_base = (ulong)PAGE_OFFSET;
                 machdep->is_kvaddr = x86_64_is_kvaddr;
@@ -111,7 +228,6 @@
 	        machdep->eframe_search = x86_64_eframe_search;
 	        machdep->back_trace = x86_64_low_budget_back_trace_cmd;
 	        machdep->processor_speed = x86_64_processor_speed;
-	        machdep->uvtop = x86_64_uvtop;
 	        machdep->kvtop = x86_64_kvtop;
 	        machdep->get_task_pgd = x86_64_get_task_pgd;
 		machdep->get_stack_frame = x86_64_get_stack_frame;
@@ -126,6 +242,12 @@
 		machdep->line_number_hooks = x86_64_line_number_hooks;
 		machdep->value_to_symbol = generic_machdep_value_to_symbol;
 		machdep->init_kernel_pgd = x86_64_init_kernel_pgd;
+		machdep->clear_machdep_cache = x86_64_clear_machdep_cache;
+		machdep->xendump_p2m_create = x86_64_xendump_p2m_create;
+		machdep->get_xendump_regs = x86_64_get_xendump_regs;
+		machdep->xen_kdump_p2m_create = x86_64_xen_kdump_p2m_create;
+		machdep->xendump_panic_task = x86_64_xendump_panic_task;
+		x86_64_calc_phys_base();
 		break;
 
 	case POST_GDB:
@@ -140,8 +262,23 @@
 		MEMBER_OFFSET_INIT(thread_struct_rip, "thread_struct", "rip");
 		MEMBER_OFFSET_INIT(thread_struct_rsp, "thread_struct", "rsp");
 		MEMBER_OFFSET_INIT(thread_struct_rsp0, "thread_struct", "rsp0");
+		if (INVALID_MEMBER(thread_struct_rip))
+			MEMBER_OFFSET_INIT(thread_struct_rip, "thread_struct", "ip");
+		if (INVALID_MEMBER(thread_struct_rsp))
+			MEMBER_OFFSET_INIT(thread_struct_rsp, "thread_struct", "sp");
+		if (INVALID_MEMBER(thread_struct_rsp0))
+			MEMBER_OFFSET_INIT(thread_struct_rsp0, "thread_struct", "sp0");
 		STRUCT_SIZE_INIT(tss_struct, "tss_struct");
 		MEMBER_OFFSET_INIT(tss_struct_ist, "tss_struct", "ist");
+		if (INVALID_MEMBER(tss_struct_ist)) {
+			long x86_tss_offset, ist_offset;
+			x86_tss_offset = MEMBER_OFFSET("tss_struct", "x86_tss");
+			ist_offset = MEMBER_OFFSET("x86_hw_tss", "ist");
+			if ((x86_tss_offset != INVALID_OFFSET) &&
+			    (ist_offset != INVALID_OFFSET))
+				ASSIGN_OFFSET(tss_struct_ist) = x86_tss_offset + 
+					ist_offset;
+		}
 		MEMBER_OFFSET_INIT(user_regs_struct_rip,
 			"user_regs_struct", "rip");
 		MEMBER_OFFSET_INIT(user_regs_struct_rsp,
@@ -158,16 +295,49 @@
                 if ((machdep->machspec->irqstack = (char *)
 		    malloc(machdep->machspec->stkinfo.isize)) == NULL)
                         error(FATAL, "cannot malloc irqstack space.");
-               if (symbol_exists("irq_desc"))
-                        ARRAY_LENGTH_INIT(machdep->nr_irqs, irq_desc,
-                                "irq_desc", NULL, 0);
-                else
-                        machdep->nr_irqs = 224;  /* NR_IRQS (at least) */
+		if (symbol_exists("irq_desc")) {
+			if (LKCD_KERNTYPES())
+				ARRAY_LENGTH_INIT_ALT(machdep->nr_irqs,
+				    "irq_desc", "kernel_stat.irqs", NULL, 0);
+			else
+				ARRAY_LENGTH_INIT(machdep->nr_irqs, irq_desc,
+					"irq_desc", NULL, 0);
+		} else
+			machdep->nr_irqs = 224; /* NR_IRQS (at least) */
 		machdep->vmalloc_start = x86_64_vmalloc_start;
 		machdep->dump_irq = x86_64_dump_irq;
-		machdep->hz = HZ;
-		if (THIS_KERNEL_VERSION >= LINUX(2,6,0))
-			machdep->hz = 1000;
+		if (!machdep->hz) {
+			machdep->hz = HZ;
+			if (THIS_KERNEL_VERSION >= LINUX(2,6,0))
+				machdep->hz = 1000;
+		}
+		machdep->section_size_bits = _SECTION_SIZE_BITS;
+		machdep->max_physmem_bits = _MAX_PHYSMEM_BITS;
+                if (XEN()) {
+			if (kt->xen_flags & WRITABLE_PAGE_TABLES) {
+				switch (machdep->flags & VM_FLAGS)
+				{
+				case VM_XEN: 
+                        		machdep->uvtop = x86_64_uvtop_level4_xen_wpt;
+					break;
+				case VM_XEN_RHEL4:
+                        		machdep->uvtop = x86_64_uvtop_level4_rhel4_xen_wpt;
+					break;
+				}
+			} else
+                        	machdep->uvtop = x86_64_uvtop_level4;
+                        MEMBER_OFFSET_INIT(vcpu_guest_context_user_regs,
+                                "vcpu_guest_context", "user_regs");
+			ASSIGN_OFFSET(cpu_user_regs_rsp) = 
+				MEMBER_OFFSET("cpu_user_regs", "ss") - sizeof(ulong);
+			ASSIGN_OFFSET(cpu_user_regs_rip) = 
+				MEMBER_OFFSET("cpu_user_regs", "cs") - sizeof(ulong);
+                }
+		x86_64_irq_eframe_link_init();
+		break;
+
+	case POST_VM:
+                init_unwind_table();
 		break;
 
 	case POST_INIT:
@@ -191,10 +361,26 @@
 		fprintf(fp, "%sKSYMS_START", others++ ? "|" : "");
 	if (machdep->flags & PT_REGS_INIT)
 		fprintf(fp, "%sPT_REGS_INIT", others++ ? "|" : "");
-	if (machdep->flags & SYSRQ)
-		fprintf(fp, "%sSYSRQ", others++ ? "|" : "");
 	if (machdep->flags & MACHDEP_BT_TEXT)
 		fprintf(fp, "%sMACHDEP_BT_TEXT", others++ ? "|" : "");
+	if (machdep->flags & VM_ORIG)
+		fprintf(fp, "%sVM_ORIG", others++ ? "|" : "");
+	if (machdep->flags & VM_2_6_11)
+		fprintf(fp, "%sVM_2_6_11", others++ ? "|" : "");
+	if (machdep->flags & VM_XEN)
+		fprintf(fp, "%sVM_XEN", others++ ? "|" : "");
+	if (machdep->flags & VM_XEN_RHEL4)
+		fprintf(fp, "%sVM_XEN_RHEL4", others++ ? "|" : "");
+	if (machdep->flags & VMEMMAP)
+		fprintf(fp, "%sVMEMMAP", others++ ? "|" : "");
+	if (machdep->flags & NO_TSS)
+		fprintf(fp, "%sNO_TSS", others++ ? "|" : "");
+	if (machdep->flags & SCHED_TEXT)
+		fprintf(fp, "%sSCHED_TEXT", others++ ? "|" : "");
+	if (machdep->flags & PHYS_BASE)
+		fprintf(fp, "%sPHYS_BASE", others++ ? "|" : "");
+	if (machdep->flags & FRAMESIZE_DEBUG)
+		fprintf(fp, "%sFRAMESIZE_DEBUG", others++ ? "|" : "");
         fprintf(fp, ")\n");
 
 	fprintf(fp, "             kvbase: %lx\n", machdep->kvbase);
@@ -215,13 +401,32 @@
         	fprintf(fp, "         back_trace: x86_64_back_trace_cmd()\n");
 	else if (machdep->back_trace == x86_64_low_budget_back_trace_cmd)
         	fprintf(fp, 
-		   "         back_trace: x86_64_low_budget_back_trace_cmd()\n");
+		   "         back_trace: x86_64_low_budget_back_trace_cmd() %s\n",
+			kt->flags & DWARF_UNWIND ?
+			"-> x86_64_dwarf_back_trace_cmd()" : "");
+	else if (machdep->back_trace == x86_64_dwarf_back_trace_cmd)
+        	fprintf(fp, 
+		   "         back_trace: x86_64_dwarf_back_trace_cmd() %s\n",
+			kt->flags & DWARF_UNWIND ? 
+			"" : "->x86_64_low_budget_back_trace_cmd()");
 	else
 		fprintf(fp, "         back_trace: %lx\n",
 			(ulong)machdep->back_trace);
         fprintf(fp, "    processor_speed: x86_64_processor_speed()\n");
-        fprintf(fp, "              uvtop: x86_64_uvtop()\n");
-        fprintf(fp, "              kvtop: x86_64_kvtop()\n");
+	if (machdep->uvtop == x86_64_uvtop)
+        	fprintf(fp, "              uvtop: x86_64_uvtop()\n");
+	else if (machdep->uvtop == x86_64_uvtop_level4)
+        	fprintf(fp, "              uvtop: x86_64_uvtop_level4()\n");
+	else if (machdep->uvtop == x86_64_uvtop_level4_xen_wpt)
+        	fprintf(fp, "              uvtop: x86_64_uvtop_level4_xen_wpt()\n");
+	else if (machdep->uvtop == x86_64_uvtop_level4_rhel4_xen_wpt)
+        	fprintf(fp, "              uvtop: x86_64_uvtop_level4_rhel4_xen_wpt()\n");
+	else
+        	fprintf(fp, "              uvtop: %lx\n", (ulong)machdep->uvtop);
+        fprintf(fp, "              kvtop: x86_64_kvtop()");
+        if (XEN() && (kt->xen_flags & WRITABLE_PAGE_TABLES))
+                fprintf(fp, " -> x86_64_kvtop_xen_wpt()");
+	fprintf(fp, "\n");
         fprintf(fp, "       get_task_pgd: x86_64_get_task_pgd()\n");
 	fprintf(fp, "           dump_irq: x86_64_dump_irq()\n");
         fprintf(fp, "    get_stack_frame: x86_64_get_stack_frame()\n");
@@ -239,6 +444,11 @@
         fprintf(fp, "          is_uvaddr: x86_64_is_uvaddr()\n");
         fprintf(fp, "       verify_paddr: generic_verify_paddr()\n");
         fprintf(fp, "    init_kernel_pgd: x86_64_init_kernel_pgd()\n");
+        fprintf(fp, "clear_machdep_cache: x86_64_clear_machdep_cache()\n");
+	fprintf(fp, " xendump_p2m_create: x86_64_xendump_p2m_create()\n");
+	fprintf(fp, "   get_xendump_regs: x86_64_get_xendump_regs()\n");
+	fprintf(fp, " xendump_panic_task: x86_64_xendump_panic_task()\n");
+	fprintf(fp, "xen_kdump_p2m_create: x86_64_xen_kdump_p2m_create()\n");
         fprintf(fp, "  line_number_hooks: x86_64_line_number_hooks\n");
         fprintf(fp, "    value_to_symbol: generic_machdep_value_to_symbol()\n");
         fprintf(fp, "      last_pgd_read: %lx\n", machdep->last_pgd_read);
@@ -248,9 +458,33 @@
         fprintf(fp, "                pmd: %lx\n", (ulong)machdep->pmd);
         fprintf(fp, "               ptbl: %lx\n", (ulong)machdep->ptbl);
 	fprintf(fp, "       ptrs_per_pgd: %d\n", machdep->ptrs_per_pgd);
-	fprintf(fp, "           machspec: %lx\n", (ulong)machdep->machspec);
+	fprintf(fp, "  section_size_bits: %ld\n", machdep->section_size_bits);
+        fprintf(fp, "   max_physmem_bits: %ld\n", machdep->max_physmem_bits);
+        fprintf(fp, "  sections_per_root: %ld\n", machdep->sections_per_root);
+
+	fprintf(fp, "           machspec: %016lx\n", (ulong)machdep->machspec);
+	fprintf(fp, "            userspace_top: %016lx\n", (ulong)ms->userspace_top);
+	fprintf(fp, "              page_offset: %016lx\n", (ulong)ms->page_offset);
+	fprintf(fp, "       vmalloc_start_addr: %016lx\n", (ulong)ms->vmalloc_start_addr);
+	fprintf(fp, "              vmalloc_end: %016lx\n", (ulong)ms->vmalloc_end);
+	fprintf(fp, "            modules_vaddr: %016lx\n", (ulong)ms->modules_vaddr);
+	fprintf(fp, "              modules_end: %016lx\n", (ulong)ms->modules_end);
+	fprintf(fp, "            vmemmap_vaddr: %016lx %s\n", (ulong)ms->vmemmap_vaddr,
+		machdep->flags & VMEMMAP ? "" : "(unused)");
+	fprintf(fp, "              vmemmap_end: %016lx %s\n", (ulong)ms->vmemmap_end,
+		machdep->flags & VMEMMAP ? "" : "(unused)");
+	fprintf(fp, "                phys_base: %lx\n", (ulong)ms->phys_base);
 	fprintf(fp, "                     pml4: %lx\n", (ulong)ms->pml4);
+	fprintf(fp, "           last_pml4_read: %lx\n", (ulong)ms->last_pml4_read);
+	if (ms->upml) {
+		fprintf(fp, "                     upml: %lx\n", (ulong)ms->upml);
+		fprintf(fp, "           last_upml_read: %lx\n", (ulong)ms->last_upml_read);
+	} else {
+		fprintf(fp, "                     upml: (unused)\n");
+		fprintf(fp, "           last_upml_read: (unused)\n");
+	}
 	fprintf(fp, "                 irqstack: %lx\n", (ulong)ms->irqstack);
+	fprintf(fp, "          irq_eframe_link: %ld\n", ms->irq_eframe_link);
 	fprintf(fp, "                      pto: %s",
 		machdep->flags & PT_REGS_INIT ? "\n" : "(uninitialized)\n");
 	if (machdep->flags & PT_REGS_INIT) {
@@ -276,8 +510,10 @@
 	fprintf(fp, "                           rsp: %ld\n", ms->pto.rsp);
 	fprintf(fp, "                            ss: %ld\n", ms->pto.ss);
 	}
-	fprintf(fp, "                  stkinfo: esize: %d isize: %d\n", 
-		ms->stkinfo.esize, ms->stkinfo.isize);
+	fprintf(fp, "                  stkinfo: esize: %d%sisize: %d\n", 
+		ms->stkinfo.esize, 
+		machdep->flags & NO_TSS ? " (NO TSS) " : " ",
+		ms->stkinfo.isize);
 	fprintf(fp, "                           ebase[%s][7]:",
 		arg ? "NR_CPUS" : "cpus");
 	cpus = arg ? NR_CPUS : kt->cpus;
@@ -306,9 +542,9 @@
 static void 
 x86_64_cpu_pda_init(void)
 {
-	int i, cpus, nr_pda, cpunumber;
+	int i, cpus, nr_pda, cpunumber, _cpu_pda, _boot_cpu_pda;
 	char *cpu_pda_buf;
-	ulong level4_pgt, data_offset;
+	ulong level4_pgt, data_offset, cpu_pda_addr;
 	struct syment *sp, *nsp;
 	ulong offset, istacksize;
 
@@ -320,18 +556,54 @@
 	MEMBER_OFFSET_INIT(x8664_pda_irqstackptr, "x8664_pda", "irqstackptr");
 	MEMBER_OFFSET_INIT(x8664_pda_level4_pgt, "x8664_pda", "level4_pgt");
 	MEMBER_OFFSET_INIT(x8664_pda_cpunumber, "x8664_pda", "cpunumber");
+	MEMBER_OFFSET_INIT(x8664_pda_me, "x8664_pda", "me");
 
 	cpu_pda_buf = GETBUF(SIZE(x8664_pda));
 
-	if (!(nr_pda = get_array_length("cpu_pda", NULL, 0)))
-		nr_pda = NR_CPUS;
-
+	if (LKCD_KERNTYPES()) {
+		if (symbol_exists("_cpu_pda"))
+			_cpu_pda = TRUE;
+		else
+ 			_cpu_pda = FALSE;
+		nr_pda = get_cpus_possible();
+	} else {
+		if (symbol_exists("_cpu_pda")) {
+			if (!(nr_pda = get_array_length("_cpu_pda", NULL, 0)))
+				nr_pda = NR_CPUS;
+			_cpu_pda = TRUE;
+		} else {
+			if (!(nr_pda = get_array_length("cpu_pda", NULL, 0)))
+				nr_pda = NR_CPUS;
+			_cpu_pda = FALSE;
+		}
+	}
+	if (_cpu_pda) {
+		if (symbol_exists("_boot_cpu_pda"))
+			_boot_cpu_pda = TRUE;
+		else
+			_boot_cpu_pda = FALSE;
+	}
 	for (i = cpus = 0; i < nr_pda; i++) {
-		if (!CPU_PDA_READ(i, cpu_pda_buf))
-			break;
-		level4_pgt = ULONG(cpu_pda_buf + OFFSET(x8664_pda_level4_pgt));
+		if (_cpu_pda) {
+			if (_boot_cpu_pda) {
+				if (!_CPU_PDA_READ2(i, cpu_pda_buf))
+					break;
+			} else {
+				if (!_CPU_PDA_READ(i, cpu_pda_buf))
+					break;
+			}
+		} else {
+			if (!CPU_PDA_READ(i, cpu_pda_buf))
+				break;
+		}
+
+		if (VALID_MEMBER(x8664_pda_level4_pgt)) {
+			level4_pgt = ULONG(cpu_pda_buf + OFFSET(x8664_pda_level4_pgt));
+			if (!VALID_LEVEL4_PGT_ADDR(level4_pgt))
+				break;
+		}
 		cpunumber = INT(cpu_pda_buf + OFFSET(x8664_pda_cpunumber));
-		if (!VALID_LEVEL4_PGT_ADDR(level4_pgt) || (cpunumber != cpus))
+		if (cpunumber != cpus)
 			break;
 		cpus++;
 
@@ -351,8 +623,8 @@
 				i, level4_pgt, data_offset);
 	}
 
-
-	if ((i = get_array_length("boot_cpu_stack", NULL, 0))) {
+	if (!LKCD_KERNTYPES() &&
+	    (i = get_array_length("boot_cpu_stack", NULL, 0))) {
 		istacksize = i;
 	} else if ((sp = symbol_search("boot_cpu_stack")) &&
  	    (nsp = next_symbol(NULL, sp))) {
@@ -381,8 +653,9 @@
 	 *  the address of &boot_cpu_stack[0].
 	 */
 	sp = value_search(machdep->machspec->stkinfo.ibase[0], &offset);
-	if (!sp || offset || !STREQ(sp->name, "boot_cpu_stack")) {
-		if (symbol_value("boot_cpu_stack")) {
+	nsp = symbol_search("boot_cpu_stack");
+	if (!sp || offset || !nsp || (sp->value != nsp->value)) {
+		if (symbol_exists("boot_cpu_stack")) {
 			error(WARNING, 
 		       "cpu 0 IRQ stack: %lx\n         boot_cpu_stack: %lx\n\n",
 				machdep->machspec->stkinfo.ibase[0], 
@@ -448,6 +721,13 @@
                         if (ms->stkinfo.ebase[c][0] == 0)
                                 break;
 		}
+	} else if (!symbol_exists("boot_exception_stacks")) {
+		machdep->flags |= NO_TSS;
+
+		if (CRASHDEBUG(1))
+			error(NOTE, "CONFIG_X86_NO_TSS\n");
+
+		return;
 	}
 
 	if (ms->stkinfo.ebase[0][0] && ms->stkinfo.ebase[0][1])
@@ -535,6 +815,10 @@
 		if (clues >= 2) 
 			kt->cpu_flags[c] |= NMI;
         }
+
+	if (symbol_exists("__sched_text_start") && 
+	    (symbol_value("__sched_text_start") == symbol_value("schedule")))
+		machdep->flags |= SCHED_TEXT;
 }
 
 /*
@@ -576,7 +860,7 @@
 ulong x86_64_VTOP(ulong vaddr) 
 {
 	if (vaddr >= __START_KERNEL_map)
-		return ((vaddr) - (ulong)__START_KERNEL_map);
+		return ((vaddr) - (ulong)__START_KERNEL_map + machdep->machspec->phys_base);
 	else
 		return ((vaddr) - PAGE_OFFSET);
 }
@@ -584,12 +868,21 @@
 /*
  *  Include both vmalloc'd and module address space as VMALLOC space.
  */
-int x86_64_IS_VMALLOC_ADDR(ulong vaddr)
+int 
+x86_64_IS_VMALLOC_ADDR(ulong vaddr)
 {
 	return ((vaddr >= VMALLOC_START && vaddr <= VMALLOC_END) ||
+                ((machdep->flags & VMEMMAP) && 
+		 (vaddr >= VMEMMAP_VADDR && vaddr <= VMEMMAP_END)) ||
                 (vaddr >= MODULES_VADDR && vaddr <= MODULES_END));
 }
 
+static int 
+x86_64_is_module_addr(ulong vaddr)
+{
+	return (vaddr >= MODULES_VADDR && vaddr <= MODULES_END);
+}
+
 /*
  *  Refining this may cause more problems than just doing it this way.
  */
@@ -616,43 +909,52 @@
  */
 
 static int
-x86_64_uvtop(struct task_context *tc, ulong uvaddr, physaddr_t *paddr, int verbose)
+x86_64_uvtop_level4(struct task_context *tc, ulong uvaddr, physaddr_t *paddr, int verbose)
 {
-       	ulong mm;
-        ulong *pgd;
+	ulong mm;
+	ulong *pml;
+	ulong pml_paddr;
+	ulong pml_pte;
+	ulong *pgd;
 	ulong pgd_paddr;
 	ulong pgd_pte;
 	ulong *pmd;
 	ulong pmd_paddr;
 	ulong pmd_pte;
-        ulong *ptep;
-        ulong pte_paddr;
-        ulong pte;
-        physaddr_t physpage;
+	ulong *ptep;
+	ulong pte_paddr;
+	ulong pte;
+	physaddr_t physpage;
 
-        if (!tc)
-                error(FATAL, "current context invalid\n");
+	if (!tc)
+		error(FATAL, "current context invalid\n");
 
-        *paddr = 0;
+	*paddr = 0;
 
-        if (IS_KVADDR(uvaddr))
-                return x86_64_kvtop(tc, uvaddr, paddr, verbose);
+	if (IS_KVADDR(uvaddr))
+		return x86_64_kvtop(tc, uvaddr, paddr, verbose);
 
-        /*
-         *  pgd = pgd_offset(mm, address);
-         */
-        if ((mm = task_mm(tc->task, TRUE)))
-                pgd = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd));
-        else
-                readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd,
-                        sizeof(long), "mm_struct pgd", FAULT_ON_ERROR);
+	if ((mm = task_mm(tc->task, TRUE)))
+		pml = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd));
+	else
+		readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pml,
+			sizeof(long), "mm_struct pgd", FAULT_ON_ERROR);
 
-        pgd_paddr = x86_64_VTOP((ulong)pgd);
-        FILL_PGD(pgd_paddr, PHYSADDR, PAGESIZE());
+	pml_paddr = x86_64_VTOP((ulong)pml);
+	FILL_UPML(pml_paddr, PHYSADDR, PAGESIZE());
+	pml = ((ulong *)pml_paddr) + pml4_index(uvaddr); 
+	pml_pte = ULONG(machdep->machspec->upml + PAGEOFFSET(pml));
+	if (verbose) 
+		fprintf(fp, "   PML: %lx => %lx\n", (ulong)pml, pml_pte);
+	if (!(pml_pte & _PAGE_PRESENT))
+		goto no_upage;
+
+	pgd_paddr = pml_pte & PHYSICAL_PAGE_MASK;
+	FILL_PGD(pgd_paddr, PHYSADDR, PAGESIZE());
 	pgd = ((ulong *)pgd_paddr) + pgd_index(uvaddr); 
 	pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(pgd));
-        if (verbose) 
-                fprintf(fp, "   PGD: %lx => %lx\n", (ulong)pgd, pgd_pte);
+	if (verbose) 
+                fprintf(fp, "   PUD: %lx => %lx\n", (ulong)pgd, pgd_pte);
 	if (!(pgd_pte & _PAGE_PRESENT))
 		goto no_upage;
 
@@ -682,29 +984,31 @@
 
         /*
 	 *  ptep = pte_offset_map(pmd, address);
-         *  pte = *ptep;
+	 *  pte = *ptep;
 	 */
-        pte_paddr = pmd_pte & PHYSICAL_PAGE_MASK;
-        FILL_PTBL(pte_paddr, PHYSADDR, PAGESIZE());
-        ptep = ((ulong *)pte_paddr) + pte_index(uvaddr);
-        pte = ULONG(machdep->ptbl + PAGEOFFSET(ptep));
-        if (verbose)
-                fprintf(fp, "   PTE: %lx => %lx\n", (ulong)ptep, pte);
-        if (!(pte & (_PAGE_PRESENT))) {
-                if (pte && verbose) {
-                        fprintf(fp, "\n");
-                        x86_64_translate_pte(pte, 0, 0);
-                }
-                goto no_upage;
-        }
+	pte_paddr = pmd_pte & PHYSICAL_PAGE_MASK;
+	FILL_PTBL(pte_paddr, PHYSADDR, PAGESIZE());
+	ptep = ((ulong *)pte_paddr) + pte_index(uvaddr);
+	pte = ULONG(machdep->ptbl + PAGEOFFSET(ptep));
+	if (verbose)
+		fprintf(fp, "   PTE: %lx => %lx\n", (ulong)ptep, pte);
+	if (!(pte & (_PAGE_PRESENT))) {
+		*paddr = pte;
+
+		if (pte && verbose) {
+			fprintf(fp, "\n");
+			x86_64_translate_pte(pte, 0, 0);
+		}
+		goto no_upage;
+	}
 
-        *paddr = (PAGEBASE(pte) & PHYSICAL_PAGE_MASK) + PAGEOFFSET(uvaddr);
+	*paddr = (PAGEBASE(pte) & PHYSICAL_PAGE_MASK) + PAGEOFFSET(uvaddr);
 
-        if (verbose) {
-                fprintf(fp, "  PAGE: %lx\n\n", 
+	if (verbose) {
+		fprintf(fp, "  PAGE: %lx\n\n", 
 			PAGEBASE(*paddr) & PHYSICAL_PAGE_MASK);
-                x86_64_translate_pte(pte, 0, 0);
-        }
+		x86_64_translate_pte(pte, 0, 0);
+	}
 
 	return TRUE;
 
@@ -713,1982 +1017,4872 @@
 	return FALSE;
 }
 
-
-/*
- *  Translates a kernel virtual address to its physical address.  cmd_vtop()
- *  sets the verbose flag so that the pte translation gets displayed; all
- *  other callers quietly accept the translation.
- */
 static int
-x86_64_kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose)
+x86_64_uvtop_level4_xen_wpt(struct task_context *tc, ulong uvaddr, physaddr_t *paddr, int verbose)
 {
-	ulong *pml4;
-        ulong *pgd;
+	ulong mm;
+	ulong *pml;
+	ulong pml_paddr;
+	ulong pml_pte;
+	ulong *pgd;
 	ulong pgd_paddr;
 	ulong pgd_pte;
 	ulong *pmd;
 	ulong pmd_paddr;
 	ulong pmd_pte;
+	ulong pseudo_pmd_pte;
 	ulong *ptep;
 	ulong pte_paddr;
 	ulong pte;
+	ulong pseudo_pte;
 	physaddr_t physpage;
+	char buf[BUFSIZE];
 
-        if (!IS_KVADDR(kvaddr))
-                return FALSE;
+	if (!tc)
+		error(FATAL, "current context invalid\n");
 
-        if (!vt->vmalloc_start) {
-                *paddr = x86_64_VTOP(kvaddr);
-                return TRUE;
-        }
+	*paddr = 0;
 
-        if (!IS_VMALLOC_ADDR(kvaddr)) {
-                *paddr = x86_64_VTOP(kvaddr);
-                if (!verbose)
-                        return TRUE;
-        }
-	
- 	/*	
-	 *  pgd = pgd_offset_k(addr);
-	 */
-	FILL_PML4();
-	pml4 = ((ulong *)machdep->machspec->pml4) + pml4_index(kvaddr);  
-        if (verbose) {
-		fprintf(fp, "PML4 DIRECTORY: %lx\n", vt->kernel_pgd[0]);
-                fprintf(fp, "PAGE DIRECTORY: %lx\n", *pml4);
-	}
-	if (!(*pml4) & _PAGE_PRESENT)
-		goto no_kpage;
-	pgd_paddr = (*pml4) & PHYSICAL_PAGE_MASK;
+	if (IS_KVADDR(uvaddr))
+		return x86_64_kvtop(tc, uvaddr, paddr, verbose);
+
+	if ((mm = task_mm(tc->task, TRUE)))
+		pml = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd));
+	else
+		readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pml,
+			sizeof(long), "mm_struct pgd", FAULT_ON_ERROR);
+
+	pml_paddr = x86_64_VTOP((ulong)pml);
+	FILL_UPML(pml_paddr, PHYSADDR, PAGESIZE());
+	pml = ((ulong *)pml_paddr) + pml4_index(uvaddr); 
+	pml_pte = ULONG(machdep->machspec->upml + PAGEOFFSET(pml));
+	if (verbose) 
+		fprintf(fp, "   PML: %lx => %lx [machine]\n", (ulong)pml, pml_pte);
+	if (!(pml_pte & _PAGE_PRESENT))
+		goto no_upage;
+
+	pgd_paddr = pml_pte & PHYSICAL_PAGE_MASK;
+	pgd_paddr = xen_m2p(pgd_paddr);
+	if (verbose)
+		fprintf(fp, "   PML: %lx\n", pgd_paddr);
 	FILL_PGD(pgd_paddr, PHYSADDR, PAGESIZE());
-	pgd = ((ulong *)pgd_paddr) + pgd_index(kvaddr); 
+	pgd = ((ulong *)pgd_paddr) + pgd_index(uvaddr); 
 	pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(pgd));
-        if (verbose) 
-                fprintf(fp, "   PGD: %lx => %lx\n", (ulong)pgd, pgd_pte);
+	if (verbose) 
+                fprintf(fp, "   PUD: %lx => %lx [machine]\n", (ulong)pgd, pgd_pte);
 	if (!(pgd_pte & _PAGE_PRESENT))
-		goto no_kpage;
+		goto no_upage;
 
 	/*
-	 *  pmd = pmd_offset(pgd, addr); 
+         *  pmd = pmd_offset(pgd, address);
 	 */
 	pmd_paddr = pgd_pte & PHYSICAL_PAGE_MASK;
+	pmd_paddr = xen_m2p(pmd_paddr);
+	if (verbose)
+                fprintf(fp, "   PUD: %lx\n", pmd_paddr);
 	FILL_PMD(pmd_paddr, PHYSADDR, PAGESIZE());
-	pmd = ((ulong *)pmd_paddr) + pmd_index(kvaddr);
+	pmd = ((ulong *)pmd_paddr) + pmd_index(uvaddr);
 	pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(pmd));
         if (verbose) 
-                fprintf(fp, "   PMD: %lx => %lx\n", (ulong)pmd, pmd_pte);
+                fprintf(fp, "   PMD: %lx => %lx [machine]\n", (ulong)pmd, pmd_pte);
 	if (!(pmd_pte & _PAGE_PRESENT))
-		goto no_kpage;
-	if (pmd_pte & _PAGE_PSE) {
-		if (verbose) {
-			fprintf(fp, "  PAGE: %lx  (2MB)\n\n", 
+		goto no_upage;
+        if (pmd_pte & _PAGE_PSE) {
+                if (verbose)
+                        fprintf(fp, "  PAGE: %lx  (2MB) [machine]\n", 
 				PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK);
-                       	x86_64_translate_pte(pmd_pte, 0, 0);
+
+		pseudo_pmd_pte = xen_m2p(PAGEBASE(pmd_pte));
+
+                if (pseudo_pmd_pte == XEN_MACHADDR_NOT_FOUND) {
+                        if (verbose)
+                                fprintf(fp, " PAGE: page not available\n");
+                        *paddr = PADDR_NOT_AVAILABLE;
+                        return FALSE;
                 }
 
-                physpage = (PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK) + 
-			(kvaddr & ~_2MB_PAGE_MASK);
+		pseudo_pmd_pte |= PAGEOFFSET(pmd_pte);
+
+                if (verbose) {
+                        fprintf(fp, " PAGE: %s  (2MB)\n\n",
+                                mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX,
+                                MKSTR(PAGEBASE(pseudo_pmd_pte) & 
+				PHYSICAL_PAGE_MASK)));
+
+                        x86_64_translate_pte(pseudo_pmd_pte, 0, 0);
+                }
+
+                physpage = (PAGEBASE(pseudo_pmd_pte) & PHYSICAL_PAGE_MASK) + 
+			(uvaddr & ~_2MB_PAGE_MASK);
+
                 *paddr = physpage;
                 return TRUE;
-	}
+        }
 
-	/*
-	 *  ptep = pte_offset_map(pmd, addr);
+        /*
+	 *  ptep = pte_offset_map(pmd, address);
 	 *  pte = *ptep;
 	 */
 	pte_paddr = pmd_pte & PHYSICAL_PAGE_MASK;
+	pte_paddr = xen_m2p(pte_paddr);
+	if (verbose)
+		fprintf(fp, "   PMD: %lx\n", pte_paddr);
 	FILL_PTBL(pte_paddr, PHYSADDR, PAGESIZE());
-	ptep = ((ulong *)pte_paddr) + pte_index(kvaddr);
+	ptep = ((ulong *)pte_paddr) + pte_index(uvaddr);
 	pte = ULONG(machdep->ptbl + PAGEOFFSET(ptep));
-        if (verbose) 
-                fprintf(fp, "   PTE: %lx => %lx\n", (ulong)ptep, pte);
-        if (!(pte & (_PAGE_PRESENT))) {
-                if (pte && verbose) {
-                        fprintf(fp, "\n");
-                        x86_64_translate_pte(pte, 0, 0);
-                }
-                goto no_kpage;
-        }
-
-        *paddr = (PAGEBASE(pte) & PHYSICAL_PAGE_MASK) + PAGEOFFSET(kvaddr);
-
-        if (verbose) {
-                fprintf(fp, "  PAGE: %lx\n\n", 
+	if (verbose)
+		fprintf(fp, "   PTE: %lx => %lx [machine]\n", (ulong)ptep, pte);
+	if (!(pte & (_PAGE_PRESENT))) {
+		*paddr = pte;
+
+		if (pte && verbose) {
+			fprintf(fp, "\n");
+			x86_64_translate_pte(pte, 0, 0);
+		}
+		goto no_upage;
+	}
+	
+	pseudo_pte = xen_m2p(pte & PHYSICAL_PAGE_MASK);
+	if (verbose)
+		fprintf(fp, "   PTE: %lx\n", pseudo_pte + PAGEOFFSET(pte));
+
+	*paddr = (PAGEBASE(pseudo_pte) & PHYSICAL_PAGE_MASK) + PAGEOFFSET(uvaddr);
+
+	if (verbose) {
+		fprintf(fp, "  PAGE: %lx [machine]\n", 
+			PAGEBASE(pte) & PHYSICAL_PAGE_MASK);
+		fprintf(fp, "  PAGE: %lx\n\n", 
 			PAGEBASE(*paddr) & PHYSICAL_PAGE_MASK);
-                x86_64_translate_pte(pte, 0, 0);
-        }
+		x86_64_translate_pte(pseudo_pte + PAGEOFFSET(pte), 0, 0);
+	}
 
-        return TRUE;
+	return TRUE;
 
-no_kpage:
-        return FALSE;
-}
+no_upage:
 
-/*
- *  Determine where vmalloc'd memory starts.
- */
-static ulong
-x86_64_vmalloc_start(void)
-{
-	return ((ulong)VMALLOC_START);
+	return FALSE;
 }
 
-/*
- *  thread_info implementation makes for less accurate results here.
- */
 static int
-x86_64_is_task_addr(ulong task)
+x86_64_uvtop_level4_rhel4_xen_wpt(struct task_context *tc, ulong uvaddr, physaddr_t *paddr, int verbose)
 {
-        if (tt->flags & THREAD_INFO)
-                return IS_KVADDR(task);
-        else
-                return (IS_KVADDR(task) && (ALIGNED_STACK_OFFSET(task) == 0));
-}
+	ulong mm;
+	ulong *pgd;
+	ulong pgd_paddr;
+	ulong pgd_pte;
+	ulong *pmd;
+	ulong pmd_paddr;
+	ulong pmd_pte;
+	ulong pseudo_pmd_pte;
+	ulong *ptep;
+	ulong pte_paddr;
+	ulong pte;
+	ulong pseudo_pte;
+	physaddr_t physpage;
+	char buf[BUFSIZE];
 
+	if (!tc)
+		error(FATAL, "current context invalid\n");
 
-/*
- *  easy enough...
- */
-static ulong
-x86_64_processor_speed(void)
-{
-        unsigned long cpu_khz;
+	*paddr = 0;
 
-        if (machdep->mhz)
-                return (machdep->mhz);
+	if (IS_KVADDR(uvaddr))
+		return x86_64_kvtop(tc, uvaddr, paddr, verbose);
 
-        if (symbol_exists("cpu_khz")) {
-                get_symbol_data("cpu_khz", sizeof(long), &cpu_khz);
-                if (cpu_khz)
-                        return(machdep->mhz = cpu_khz/1000);
-        }
+	if ((mm = task_mm(tc->task, TRUE)))
+		pgd = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd));
+	else
+		readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd,
+			sizeof(long), "mm_struct pgd", FAULT_ON_ERROR);
 
-        return 0;
-}
+	pgd_paddr = x86_64_VTOP((ulong)pgd);
+	FILL_PGD(pgd_paddr, PHYSADDR, PAGESIZE());
+	pgd = ((ulong *)pgd_paddr) + pgd_index(uvaddr); 
+	pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(pgd));
+	if (verbose) 
+                fprintf(fp, "   PGD: %lx => %lx [machine]\n", (ulong)pgd, pgd_pte);
+	if (!(pgd_pte & _PAGE_PRESENT))
+		goto no_upage;
+
+	/*
+         *  pmd = pmd_offset(pgd, address);
+	 */
+	pmd_paddr = pgd_pte & PHYSICAL_PAGE_MASK;
+	pmd_paddr = xen_m2p(pmd_paddr);
+	if (verbose)
+                fprintf(fp, "   PGD: %lx\n", pmd_paddr);
+	FILL_PMD(pmd_paddr, PHYSADDR, PAGESIZE());
+	pmd = ((ulong *)pmd_paddr) + pmd_index(uvaddr);
+	pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(pmd));
+        if (verbose) 
+                fprintf(fp, "   PMD: %lx => %lx [machine]\n", (ulong)pmd, pmd_pte);
+	if (!(pmd_pte & _PAGE_PRESENT))
+		goto no_upage;
+        if (pmd_pte & _PAGE_PSE) {
+                if (verbose)
+                        fprintf(fp, "  PAGE: %lx  (2MB) [machine]\n", 
+				PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK);
 
+		pseudo_pmd_pte = xen_m2p(PAGEBASE(pmd_pte));
 
-/*
- *  Accept or reject a symbol from the kernel namelist.
- */
-static int
-x86_64_verify_symbol(const char *name, ulong value, char type)
-{
-        if (STREQ(name, "_text") || STREQ(name, "_stext"))
-                machdep->flags |= KSYMS_START;
+                if (pseudo_pmd_pte == XEN_MACHADDR_NOT_FOUND) {
+                        if (verbose)
+                                fprintf(fp, " PAGE: page not available\n");
+                        *paddr = PADDR_NOT_AVAILABLE;
+                        return FALSE;
+                }
 
-        if (!name || !strlen(name) || !(machdep->flags & KSYMS_START))
-                return FALSE;
+		pseudo_pmd_pte |= PAGEOFFSET(pmd_pte);
+
+                if (verbose) {
+                        fprintf(fp, " PAGE: %s  (2MB)\n\n",
+                                mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX,
+                                MKSTR(PAGEBASE(pseudo_pmd_pte) & 
+				PHYSICAL_PAGE_MASK)));
+
+                        x86_64_translate_pte(pseudo_pmd_pte, 0, 0);
+                }
+
+                physpage = (PAGEBASE(pseudo_pmd_pte) & PHYSICAL_PAGE_MASK) + 
+			(uvaddr & ~_2MB_PAGE_MASK);
+
+                *paddr = physpage;
+                return TRUE;
+        }
+
+        /*
+	 *  ptep = pte_offset_map(pmd, address);
+	 *  pte = *ptep;
+	 */
+	pte_paddr = pmd_pte & PHYSICAL_PAGE_MASK;
+	pte_paddr = xen_m2p(pte_paddr);
+	if (verbose)
+		fprintf(fp, "   PMD: %lx\n", pte_paddr);
+	FILL_PTBL(pte_paddr, PHYSADDR, PAGESIZE());
+	ptep = ((ulong *)pte_paddr) + pte_index(uvaddr);
+	pte = ULONG(machdep->ptbl + PAGEOFFSET(ptep));
+	if (verbose)
+		fprintf(fp, "   PTE: %lx => %lx [machine]\n", (ulong)ptep, pte);
+	if (!(pte & (_PAGE_PRESENT))) {
+		*paddr = pte;
+
+		if (pte && verbose) {
+			fprintf(fp, "\n");
+			x86_64_translate_pte(pte, 0, 0);
+		}
+		goto no_upage;
+	}
+	
+	pseudo_pte = xen_m2p(pte & PHYSICAL_PAGE_MASK);
+	if (verbose)
+		fprintf(fp, "   PTE: %lx\n", pseudo_pte + PAGEOFFSET(pte));
+
+	*paddr = (PAGEBASE(pseudo_pte) & PHYSICAL_PAGE_MASK) + PAGEOFFSET(uvaddr);
+
+	if (verbose) {
+		fprintf(fp, "  PAGE: %lx [machine]\n", 
+			PAGEBASE(pte) & PHYSICAL_PAGE_MASK);
+		fprintf(fp, "  PAGE: %lx\n\n", 
+			PAGEBASE(*paddr) & PHYSICAL_PAGE_MASK);
+		x86_64_translate_pte(pseudo_pte + PAGEOFFSET(pte), 0, 0);
+	}
 
 	return TRUE;
-}
 
+no_upage:
 
-/*
- *  Get the relevant page directory pointer from a task structure.
- */
-static ulong
-x86_64_get_task_pgd(ulong task)
-{
-	return (error(FATAL, "x86_64_get_task_pgd: N/A\n"));
+	return FALSE;
 }
 
-
-/*
- *  Translate a PTE, returning TRUE if the page is present.
- *  If a physaddr pointer is passed in, don't print anything.
- */
 static int
-x86_64_translate_pte(ulong pte, void *physaddr, ulonglong unused)
+x86_64_uvtop(struct task_context *tc, ulong uvaddr, physaddr_t *paddr, int verbose)
 {
-	int c, others, len1, len2, len3;
-	ulong paddr;
-	char buf[BUFSIZE];
-        char buf2[BUFSIZE];
-        char buf3[BUFSIZE];
-	char ptebuf[BUFSIZE];
-	char physbuf[BUFSIZE];
-        char *arglist[MAXARGS];
-	int page_present;
+       	ulong mm;
+        ulong *pgd;
+	ulong pgd_paddr;
+	ulong pgd_pte;
+	ulong *pmd;
+	ulong pmd_paddr;
+	ulong pmd_pte;
+        ulong *ptep;
+        ulong pte_paddr;
+        ulong pte;
+        physaddr_t physpage;
 
-        paddr = pte & PHYSICAL_PAGE_MASK;
-        page_present = pte & _PAGE_PRESENT;
+        if (!tc)
+                error(FATAL, "current context invalid\n");
 
-        if (physaddr) {
-		*((ulong *)physaddr) = paddr;
-		return page_present;
-	}
-        
-	sprintf(ptebuf, "%lx", pte);
-	len1 = MAX(strlen(ptebuf), strlen("PTE"));
-	fprintf(fp, "%s  ", mkstring(buf, len1, CENTER|LJUST, "PTE"));
+        *paddr = 0;
 
-        if (!page_present && pte) {
-                swap_location(pte, buf);
-                if ((c = parse_line(buf, arglist)) != 3)
-                        error(FATAL, "cannot determine swap location\n");
+        if (IS_KVADDR(uvaddr))
+                return x86_64_kvtop(tc, uvaddr, paddr, verbose);
 
-                len2 = MAX(strlen(arglist[0]), strlen("SWAP"));
-                len3 = MAX(strlen(arglist[2]), strlen("OFFSET"));
+        /*
+         *  pgd = pgd_offset(mm, address);
+         */
+        if ((mm = task_mm(tc->task, TRUE)))
+                pgd = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd));
+        else
+                readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd,
+                        sizeof(long), "mm_struct pgd", FAULT_ON_ERROR);
 
-                fprintf(fp, "%s  %s\n",
-                        mkstring(buf2, len2, CENTER|LJUST, "SWAP"),
-                        mkstring(buf3, len3, CENTER|LJUST, "OFFSET"));
+        pgd_paddr = x86_64_VTOP((ulong)pgd);
+        FILL_PGD(pgd_paddr, PHYSADDR, PAGESIZE());
+	pgd = ((ulong *)pgd_paddr) + pgd_index(uvaddr); 
+	pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(pgd));
+        if (verbose) 
+                fprintf(fp, "   PGD: %lx => %lx\n", (ulong)pgd, pgd_pte);
+	if (!(pgd_pte & _PAGE_PRESENT))
+		goto no_upage;
 
-                strcpy(buf2, arglist[0]);
-                strcpy(buf3, arglist[2]);
-                fprintf(fp, "%s  %s  %s\n",
-                        mkstring(ptebuf, len1, CENTER|RJUST, NULL),
-                        mkstring(buf2, len2, CENTER|RJUST, NULL),
-                        mkstring(buf3, len3, CENTER|RJUST, NULL));
+	/*
+         *  pmd = pmd_offset(pgd, address);
+	 */
+	pmd_paddr = pgd_pte & PHYSICAL_PAGE_MASK;
+	FILL_PMD(pmd_paddr, PHYSADDR, PAGESIZE());
+	pmd = ((ulong *)pmd_paddr) + pmd_index(uvaddr);
+	pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(pmd));
+        if (verbose) 
+                fprintf(fp, "   PMD: %lx => %lx\n", (ulong)pmd, pmd_pte);
+	if (!(pmd_pte & _PAGE_PRESENT))
+		goto no_upage;
+        if (pmd_pte & _PAGE_PSE) {
+                if (verbose) {
+                        fprintf(fp, "  PAGE: %lx  (2MB)\n\n", 
+				PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK);
+                        x86_64_translate_pte(pmd_pte, 0, 0);
+                }
 
-                return page_present;
+                physpage = (PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK) + 
+			(uvaddr & ~_2MB_PAGE_MASK);
+                *paddr = physpage;
+                return TRUE;
         }
 
-        sprintf(physbuf, "%lx", paddr);
-        len2 = MAX(strlen(physbuf), strlen("PHYSICAL"));
-        fprintf(fp, "%s  ", mkstring(buf, len2, CENTER|LJUST, "PHYSICAL"));
+        /*
+	 *  ptep = pte_offset_map(pmd, address);
+         *  pte = *ptep;
+	 */
+        pte_paddr = pmd_pte & PHYSICAL_PAGE_MASK;
+        FILL_PTBL(pte_paddr, PHYSADDR, PAGESIZE());
+        ptep = ((ulong *)pte_paddr) + pte_index(uvaddr);
+        pte = ULONG(machdep->ptbl + PAGEOFFSET(ptep));
+        if (verbose)
+                fprintf(fp, "   PTE: %lx => %lx\n", (ulong)ptep, pte);
+        if (!(pte & (_PAGE_PRESENT))) {
+		*paddr = pte;
 
-        fprintf(fp, "FLAGS\n");
+                if (pte && verbose) {
+                        fprintf(fp, "\n");
+                        x86_64_translate_pte(pte, 0, 0);
+                }
+                goto no_upage;
+        }
 
-        fprintf(fp, "%s  %s  ",
-                mkstring(ptebuf, len1, CENTER|RJUST, NULL),
-                mkstring(physbuf, len2, CENTER|RJUST, NULL));
-        fprintf(fp, "(");
-        others = 0;
+        *paddr = (PAGEBASE(pte) & PHYSICAL_PAGE_MASK) + PAGEOFFSET(uvaddr);
 
-	if (pte) {
-		if (pte & _PAGE_PRESENT)
-			fprintf(fp, "%sPRESENT", others++ ? "|" : "");
-		if (pte & _PAGE_RW)
-			fprintf(fp, "%sRW", others++ ? "|" : "");
-		if (pte & _PAGE_USER)
-			fprintf(fp, "%sUSER", others++ ? "|" : "");
-		if (pte & _PAGE_PWT)
-			fprintf(fp, "%sPWT", others++ ? "|" : "");
-		if (pte & _PAGE_PCD)
-			fprintf(fp, "%sPCD", others++ ? "|" : "");
-		if (pte & _PAGE_ACCESSED)
-			fprintf(fp, "%sACCESSED", others++ ? "|" : "");
-		if (pte & _PAGE_DIRTY)
-			fprintf(fp, "%sDIRTY", others++ ? "|" : "");
-		if ((pte & _PAGE_PSE) && (pte & _PAGE_PRESENT))
-			fprintf(fp, "%sPSE", others++ ? "|" : "");
-		if ((pte & _PAGE_PROTNONE) && !(pte & _PAGE_PRESENT))
-			fprintf(fp, "%sPROTNONE", others++ ? "|" : "");
-		if (pte & _PAGE_GLOBAL)
-			fprintf(fp, "%sGLOBAL", others++ ? "|" : "");
-		if (pte & _PAGE_NX)
-			fprintf(fp, "%sNX", others++ ? "|" : "");
-	} else {
-                fprintf(fp, "no mapping");
+        if (verbose) {
+                fprintf(fp, "  PAGE: %lx\n\n", 
+			PAGEBASE(*paddr) & PHYSICAL_PAGE_MASK);
+                x86_64_translate_pte(pte, 0, 0);
         }
 
-        fprintf(fp, ")\n");
+	return TRUE;
 
-	return (page_present);
+no_upage:
+
+	return FALSE;
 }
 
-static char *
-x86_64_exception_stacks[7] = {
-	"STACKFAULT",
-	"DOUBLEFAULT",
-	"NMI",
-	"DEBUG",
-	"MCE",
-	"(unknown)",
-	"(unknown)"
-};
 
 /*
- *  Look for likely exception frames in a stack.
+ *  Translates a kernel virtual address to its physical address.  cmd_vtop()
+ *  sets the verbose flag so that the pte translation gets displayed; all
+ *  other callers quietly accept the translation.
  */
-static int 
-x86_64_eframe_search(struct bt_info *bt)
+static int
+x86_64_kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose)
 {
-	int i, c, cnt;
-        ulong estack, irqstack, stacksize;
-	ulong *up;
-        struct machine_specific *ms;
-	struct bt_info bt_local;
+	ulong *pml4;
+        ulong *pgd;
+	ulong pgd_paddr;
+	ulong pgd_pte;
+	ulong *pmd;
+	ulong pmd_paddr;
+	ulong pmd_pte;
+	ulong *ptep;
+	ulong pte_paddr;
+	ulong pte;
+	physaddr_t physpage;
 
-	if (bt->flags & BT_EFRAME_SEARCH2) {
-		BCOPY(bt, &bt_local, sizeof(struct bt_info));
+        if (!IS_KVADDR(kvaddr))
+                return FALSE;
+
+	if (XEN_HYPER_MODE()) {
+		if (XEN_VIRT_ADDR(kvaddr)) {
+			*paddr = kvaddr - XEN_VIRT_START + xen_phys_start();
+			return TRUE;
+		}
+		if (DIRECTMAP_VIRT_ADDR(kvaddr)) {
+			*paddr = kvaddr - DIRECTMAP_VIRT_START;
+			return TRUE;
+		}
+		FILL_PML4_HYPER();
+		pml4 = ((ulong *)machdep->machspec->pml4) + pml4_index(kvaddr);  
+        	if (verbose) {
+			fprintf(fp, "PML4 DIRECTORY: %lx\n", vt->kernel_pgd[0]);
+               		fprintf(fp, "PAGE DIRECTORY: %lx\n", *pml4);
+		}
+	} else {
+        	if (!vt->vmalloc_start) {
+                	*paddr = x86_64_VTOP(kvaddr);
+                	return TRUE;
+        	}
+
+        	if (!IS_VMALLOC_ADDR(kvaddr)) {
+                	*paddr = x86_64_VTOP(kvaddr);
+                	if (!verbose)
+                        	return TRUE;
+        	}
+
+		if (XEN() && (kt->xen_flags & WRITABLE_PAGE_TABLES))
+			return (x86_64_kvtop_xen_wpt(tc, kvaddr, paddr, verbose));
+
+ 		/*	
+		 *  pgd = pgd_offset_k(addr);
+		 */
+		FILL_PML4();
+		pml4 = ((ulong *)machdep->machspec->pml4) + pml4_index(kvaddr);  
+        	if (verbose) {
+			fprintf(fp, "PML4 DIRECTORY: %lx\n", vt->kernel_pgd[0]);
+               		fprintf(fp, "PAGE DIRECTORY: %lx\n", *pml4);
+		}
+	}
+	if (!(*pml4) & _PAGE_PRESENT)
+		goto no_kpage;
+	pgd_paddr = (*pml4) & PHYSICAL_PAGE_MASK;
+	FILL_PGD(pgd_paddr, PHYSADDR, PAGESIZE());
+	pgd = ((ulong *)pgd_paddr) + pgd_index(kvaddr); 
+	pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(pgd));
+        if (verbose) 
+                fprintf(fp, "   PUD: %lx => %lx\n", (ulong)pgd, pgd_pte);
+	if (!(pgd_pte & _PAGE_PRESENT))
+		goto no_kpage;
+
+	/*
+	 *  pmd = pmd_offset(pgd, addr); 
+	 */
+	pmd_paddr = pgd_pte & PHYSICAL_PAGE_MASK;
+	FILL_PMD(pmd_paddr, PHYSADDR, PAGESIZE());
+	pmd = ((ulong *)pmd_paddr) + pmd_index(kvaddr);
+	pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(pmd));
+        if (verbose) 
+                fprintf(fp, "   PMD: %lx => %lx\n", (ulong)pmd, pmd_pte);
+	if (!(pmd_pte & _PAGE_PRESENT))
+		goto no_kpage;
+	if (pmd_pte & _PAGE_PSE) {
+		if (verbose) {
+			fprintf(fp, "  PAGE: %lx  (2MB)\n\n", 
+				PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK);
+                       	x86_64_translate_pte(pmd_pte, 0, 0);
+                }
+
+                physpage = (PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK) + 
+			(kvaddr & ~_2MB_PAGE_MASK);
+                *paddr = physpage;
+                return TRUE;
+	}
+
+	/*
+	 *  ptep = pte_offset_map(pmd, addr);
+	 *  pte = *ptep;
+	 */
+	pte_paddr = pmd_pte & PHYSICAL_PAGE_MASK;
+	FILL_PTBL(pte_paddr, PHYSADDR, PAGESIZE());
+	ptep = ((ulong *)pte_paddr) + pte_index(kvaddr);
+	pte = ULONG(machdep->ptbl + PAGEOFFSET(ptep));
+        if (verbose) 
+                fprintf(fp, "   PTE: %lx => %lx\n", (ulong)ptep, pte);
+        if (!(pte & (_PAGE_PRESENT))) {
+                if (pte && verbose) {
+                        fprintf(fp, "\n");
+                        x86_64_translate_pte(pte, 0, 0);
+                }
+                goto no_kpage;
+        }
+
+        *paddr = (PAGEBASE(pte) & PHYSICAL_PAGE_MASK) + PAGEOFFSET(kvaddr);
+
+        if (verbose) {
+                fprintf(fp, "  PAGE: %lx\n\n", 
+			PAGEBASE(*paddr) & PHYSICAL_PAGE_MASK);
+                x86_64_translate_pte(pte, 0, 0);
+        }
+
+        return TRUE;
+
+no_kpage:
+        return FALSE;
+}
+
+
+static int
+x86_64_kvtop_xen_wpt(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose)
+{
+	ulong *pml4;
+        ulong *pgd;
+	ulong pgd_paddr;
+	ulong pgd_pte;
+	ulong *pmd;
+	ulong pmd_paddr;
+	ulong pmd_pte;
+	ulong pseudo_pmd_pte;
+	ulong *ptep;
+	ulong pte_paddr;
+	ulong pte;
+	ulong pseudo_pte;
+	physaddr_t physpage;
+	char buf[BUFSIZE];
+
+ 	/*	
+	 *  pgd = pgd_offset_k(addr);
+	 */
+	FILL_PML4();
+	pml4 = ((ulong *)machdep->machspec->pml4) + pml4_index(kvaddr);  
+        if (verbose) {
+		fprintf(fp, "PML4 DIRECTORY: %lx\n", vt->kernel_pgd[0]);
+                fprintf(fp, "PAGE DIRECTORY: %lx [machine]\n", *pml4);
+	}
+	if (!(*pml4) & _PAGE_PRESENT)
+		goto no_kpage;
+	pgd_paddr = (*pml4) & PHYSICAL_PAGE_MASK;
+	pgd_paddr = xen_m2p(pgd_paddr);
+	if (verbose)
+                fprintf(fp, "PAGE DIRECTORY: %lx\n", pgd_paddr);
+	FILL_PGD(pgd_paddr, PHYSADDR, PAGESIZE());
+	pgd = ((ulong *)pgd_paddr) + pgd_index(kvaddr); 
+	pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(pgd));
+        if (verbose) 
+                fprintf(fp, "   PUD: %lx => %lx [machine]\n", (ulong)pgd, pgd_pte);
+	if (!(pgd_pte & _PAGE_PRESENT))
+		goto no_kpage;
+
+	/*
+	 *  pmd = pmd_offset(pgd, addr); 
+	 */
+	pmd_paddr = pgd_pte & PHYSICAL_PAGE_MASK;
+	pmd_paddr = xen_m2p(pmd_paddr);
+	if (verbose)
+                fprintf(fp, "   PUD: %lx\n", pmd_paddr);
+	FILL_PMD(pmd_paddr, PHYSADDR, PAGESIZE());
+	pmd = ((ulong *)pmd_paddr) + pmd_index(kvaddr);
+	pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(pmd));
+        if (verbose) 
+                fprintf(fp, "   PMD: %lx => %lx [machine]\n", (ulong)pmd, pmd_pte);
+	if (!(pmd_pte & _PAGE_PRESENT))
+		goto no_kpage;
+	if (pmd_pte & _PAGE_PSE) {
+		if (verbose)
+			fprintf(fp, "  PAGE: %lx  (2MB) [machine]\n", 
+				PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK);
+
+                pseudo_pmd_pte = xen_m2p(PAGEBASE(pmd_pte));
+
+                if (pseudo_pmd_pte == XEN_MACHADDR_NOT_FOUND) {
+                        if (verbose)
+                                fprintf(fp, " PAGE: page not available\n");
+                        *paddr = PADDR_NOT_AVAILABLE;
+                        return FALSE;
+                }
+
+                pseudo_pmd_pte |= PAGEOFFSET(pmd_pte);
+
+                if (verbose) {
+                        fprintf(fp, " PAGE: %s  (2MB)\n\n",
+                                mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX,
+                                MKSTR(PAGEBASE(pseudo_pmd_pte) &
+                                PHYSICAL_PAGE_MASK)));
+
+                        x86_64_translate_pte(pseudo_pmd_pte, 0, 0);
+                }
+
+                physpage = (PAGEBASE(pseudo_pmd_pte) & PHYSICAL_PAGE_MASK) +
+                        (kvaddr & ~_2MB_PAGE_MASK);
+
+                *paddr = physpage;
+                return TRUE;
+	}
+
+	/*
+	 *  ptep = pte_offset_map(pmd, addr);
+	 *  pte = *ptep;
+	 */
+	pte_paddr = pmd_pte & PHYSICAL_PAGE_MASK;
+	pte_paddr = xen_m2p(pte_paddr);
+	if (verbose)
+		fprintf(fp, "   PMD: %lx\n", pte_paddr); 
+	FILL_PTBL(pte_paddr, PHYSADDR, PAGESIZE());
+	ptep = ((ulong *)pte_paddr) + pte_index(kvaddr);
+	pte = ULONG(machdep->ptbl + PAGEOFFSET(ptep));
+        if (verbose) 
+                fprintf(fp, "   PTE: %lx => %lx [machine]\n", (ulong)ptep, pte);
+        if (!(pte & (_PAGE_PRESENT))) {
+                if (pte && verbose) {
+                        fprintf(fp, "\n");
+                        x86_64_translate_pte(pte, 0, 0);
+                }
+                goto no_kpage;
+        }
+
+	pseudo_pte = xen_m2p(pte & PHYSICAL_PAGE_MASK);
+	if (verbose)
+                fprintf(fp, "   PTE: %lx\n", pseudo_pte + PAGEOFFSET(pte));
+
+        *paddr = (PAGEBASE(pseudo_pte) & PHYSICAL_PAGE_MASK) + PAGEOFFSET(kvaddr);
+
+        if (verbose) {
+                fprintf(fp, "  PAGE: %lx [machine]\n", 
+			PAGEBASE(pte) & PHYSICAL_PAGE_MASK);
+                fprintf(fp, "  PAGE: %lx\n\n", 
+			PAGEBASE(*paddr) & PHYSICAL_PAGE_MASK);
+                x86_64_translate_pte(pseudo_pte + PAGEOFFSET(pte), 0, 0);
+        }
+
+        return TRUE;
+
+no_kpage:
+        return FALSE;
+}
+
+
+/*
+ *  Determine where vmalloc'd memory starts.
+ */
+static ulong
+x86_64_vmalloc_start(void)
+{
+	return ((ulong)VMALLOC_START);
+}
+
+/*
+ *  thread_info implementation makes for less accurate results here.
+ */
+static int
+x86_64_is_task_addr(ulong task)
+{
+        if (tt->flags & THREAD_INFO)
+                return IS_KVADDR(task);
+        else
+                return (IS_KVADDR(task) && (ALIGNED_STACK_OFFSET(task) == 0));
+}
+
+
+/*
+ *  easy enough...
+ */
+static ulong
+x86_64_processor_speed(void)
+{
+        unsigned long cpu_khz = 0;
+
+        if (machdep->mhz)
+                return (machdep->mhz);
+
+        if (symbol_exists("cpu_khz")) {
+                get_symbol_data("cpu_khz", sizeof(int), &cpu_khz);
+                if (cpu_khz)
+                        return(machdep->mhz = cpu_khz/1000);
+        }
+
+        return 0;
+}
+
+
+/*
+ *  Accept or reject a symbol from the kernel namelist.
+ */
+static int
+x86_64_verify_symbol(const char *name, ulong value, char type)
+{
+        if (STREQ(name, "_text") || STREQ(name, "_stext"))
+                machdep->flags |= KSYMS_START;
+
+        if (!name || !strlen(name) || !(machdep->flags & KSYMS_START))
+                return FALSE;
+	return TRUE;
+}
+
+
+/*
+ *  Get the relevant page directory pointer from a task structure.
+ */
+static ulong
+x86_64_get_task_pgd(ulong task)
+{
+	return (error(FATAL, "x86_64_get_task_pgd: N/A\n"));
+}
+
+
+/*
+ *  Translate a PTE, returning TRUE if the page is present.
+ *  If a physaddr pointer is passed in, don't print anything.
+ */
+static int
+x86_64_translate_pte(ulong pte, void *physaddr, ulonglong unused)
+{
+	int c, others, len1, len2, len3;
+	ulong paddr;
+	char buf[BUFSIZE];
+        char buf2[BUFSIZE];
+        char buf3[BUFSIZE];
+	char ptebuf[BUFSIZE];
+	char physbuf[BUFSIZE];
+        char *arglist[MAXARGS];
+	int page_present;
+
+        paddr = pte & PHYSICAL_PAGE_MASK;
+        page_present = pte & _PAGE_PRESENT;
+
+        if (physaddr) {
+		*((ulong *)physaddr) = paddr;
+		return page_present;
+	}
+        
+	sprintf(ptebuf, "%lx", pte);
+	len1 = MAX(strlen(ptebuf), strlen("PTE"));
+	fprintf(fp, "%s  ", mkstring(buf, len1, CENTER|LJUST, "PTE"));
+
+        if (!page_present && pte) {
+                swap_location(pte, buf);
+                if ((c = parse_line(buf, arglist)) != 3)
+                        error(FATAL, "cannot determine swap location\n");
+
+                len2 = MAX(strlen(arglist[0]), strlen("SWAP"));
+                len3 = MAX(strlen(arglist[2]), strlen("OFFSET"));
+
+                fprintf(fp, "%s  %s\n",
+                        mkstring(buf2, len2, CENTER|LJUST, "SWAP"),
+                        mkstring(buf3, len3, CENTER|LJUST, "OFFSET"));
+
+                strcpy(buf2, arglist[0]);
+                strcpy(buf3, arglist[2]);
+                fprintf(fp, "%s  %s  %s\n",
+                        mkstring(ptebuf, len1, CENTER|RJUST, NULL),
+                        mkstring(buf2, len2, CENTER|RJUST, NULL),
+                        mkstring(buf3, len3, CENTER|RJUST, NULL));
+
+                return page_present;
+        }
+
+        sprintf(physbuf, "%lx", paddr);
+        len2 = MAX(strlen(physbuf), strlen("PHYSICAL"));
+        fprintf(fp, "%s  ", mkstring(buf, len2, CENTER|LJUST, "PHYSICAL"));
+
+        fprintf(fp, "FLAGS\n");
+
+        fprintf(fp, "%s  %s  ",
+                mkstring(ptebuf, len1, CENTER|RJUST, NULL),
+                mkstring(physbuf, len2, CENTER|RJUST, NULL));
+        fprintf(fp, "(");
+        others = 0;
+
+	if (pte) {
+		if (pte & _PAGE_PRESENT)
+			fprintf(fp, "%sPRESENT", others++ ? "|" : "");
+		if (pte & _PAGE_RW)
+			fprintf(fp, "%sRW", others++ ? "|" : "");
+		if (pte & _PAGE_USER)
+			fprintf(fp, "%sUSER", others++ ? "|" : "");
+		if (pte & _PAGE_PWT)
+			fprintf(fp, "%sPWT", others++ ? "|" : "");
+		if (pte & _PAGE_PCD)
+			fprintf(fp, "%sPCD", others++ ? "|" : "");
+		if (pte & _PAGE_ACCESSED)
+			fprintf(fp, "%sACCESSED", others++ ? "|" : "");
+		if (pte & _PAGE_DIRTY)
+			fprintf(fp, "%sDIRTY", others++ ? "|" : "");
+		if ((pte & _PAGE_PSE) && (pte & _PAGE_PRESENT))
+			fprintf(fp, "%sPSE", others++ ? "|" : "");
+		if ((pte & _PAGE_PROTNONE) && !(pte & _PAGE_PRESENT))
+			fprintf(fp, "%sPROTNONE", others++ ? "|" : "");
+		if (pte & _PAGE_GLOBAL)
+			fprintf(fp, "%sGLOBAL", others++ ? "|" : "");
+		if (pte & _PAGE_NX)
+			fprintf(fp, "%sNX", others++ ? "|" : "");
+	} else {
+                fprintf(fp, "no mapping");
+        }
+
+        fprintf(fp, ")\n");
+
+	return (page_present);
+}
+
+static char *
+x86_64_exception_stacks[7] = {
+	"STACKFAULT",
+	"DOUBLEFAULT",
+	"NMI",
+	"DEBUG",
+	"MCE",
+	"(unknown)",
+	"(unknown)"
+};
+
+/*
+ *  Look for likely exception frames in a stack.
+ */
+static int 
+x86_64_eframe_search(struct bt_info *bt)
+{
+	int i, c, cnt;
+        ulong estack, irqstack, stacksize;
+	ulong *up;
+        struct machine_specific *ms;
+	struct bt_info bt_local;
+
+	if (bt->flags & BT_EFRAME_SEARCH2) {
+		BCOPY(bt, &bt_local, sizeof(struct bt_info));
 		bt->flags &= ~(ulonglong)BT_EFRAME_SEARCH2;
 
-        	ms = machdep->machspec;
+        	ms = machdep->machspec;
+
+        	for (c = 0; c < kt->cpus; c++) {
+                	if (ms->stkinfo.ibase[c] == 0)
+                        	break;
+                                bt->hp->esp = ms->stkinfo.ibase[c];
+                                fprintf(fp, "CPU %d IRQ STACK:\n", c);
+                                if ((cnt = x86_64_eframe_search(bt)))
+					fprintf(fp, "\n");
+				else
+                                        fprintf(fp, "(none found)\n\n");
+                }
+
+        	for (c = 0; c < kt->cpus; c++) {
+                	for (i = 0; i < 7; i++) {
+                        	if (ms->stkinfo.ebase[c][i] == 0)
+                                	break;
+                                bt->hp->esp = ms->stkinfo.ebase[c][i];
+                                fprintf(fp, "CPU %d %s EXCEPTION STACK:\n", 
+					c, x86_64_exception_stacks[i]);
+                                if ((cnt = x86_64_eframe_search(bt)))
+					fprintf(fp, "\n");
+				else
+                                        fprintf(fp, "(none found)\n\n");
+                	}
+        	}
+
+		return 0;
+        }
+
+        if (bt->hp && bt->hp->esp) {
+        	ms = machdep->machspec;
+		bt->stkptr = bt->hp->esp;
+		if ((estack = x86_64_in_exception_stack(bt))) {
+			stacksize = ms->stkinfo.esize;
+			bt->stackbase = estack;
+			bt->stacktop = estack + ms->stkinfo.esize;
+                	bt->stackbuf = ms->irqstack;
+                	alter_stackbuf(bt);
+		} else if ((irqstack = x86_64_in_irqstack(bt))) {
+			stacksize = ms->stkinfo.isize;
+			bt->stackbase = irqstack;
+			bt->stacktop = irqstack + ms->stkinfo.isize;
+                	bt->stackbuf = ms->irqstack;
+                	alter_stackbuf(bt);
+		} else if (!INSTACK(bt->stkptr, bt))
+			error(FATAL, 
+			    "unrecognized stack address for this task: %lx\n",
+				bt->hp->esp);
+	} 
+
+	stacksize = bt->stacktop - bt->stackbase - SIZE(pt_regs);
+
+	if (bt->stkptr)
+		i = (bt->stkptr - bt->stackbase)/sizeof(ulong);
+	else
+		i = 0;
+
+	for (cnt = 0; i <= stacksize/sizeof(ulong); i++) {
+		up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]);
+
+                if (x86_64_exception_frame(EFRAME_SEARCH|EFRAME_PRINT|
+		    EFRAME_VERIFY, 0, (char *)up, bt, fp)) 
+			cnt++;
+	}
+
+	return cnt;
+}
+
+static void
+x86_64_display_full_frame(struct bt_info *bt, ulong rsp, FILE *ofp)
+{
+	int i, u_idx;
+	ulong *up;
+	ulong words, addr;
+
+	if (rsp < bt->frameptr)
+		return;
+
+        words = (rsp - bt->frameptr) / sizeof(ulong) + 1;
+
+	addr = bt->frameptr;
+	u_idx = (bt->frameptr - bt->stackbase)/sizeof(ulong);
+	for (i = 0; i < words; i++, u_idx++) {
+		if (!(i & 1)) 
+			fprintf(ofp, "%s    %lx: ", i ? "\n" : "", addr);
+		
+		up = (ulong *)(&bt->stackbuf[u_idx*sizeof(ulong)]);
+		fprintf(ofp, "%016lx ", *up);
+		addr += sizeof(ulong);
+	}
+	fprintf(ofp, "\n");
+}
+
+/*
+ *  Check a frame for a requested reference.
+ */
+static void
+x86_64_do_bt_reference_check(struct bt_info *bt, ulong text, char *name)
+{
+	struct syment *sp;
+	ulong offset;
+
+	if (!name)
+		sp = value_search(text, &offset); 
+	else if (!text)
+		sp = symbol_search(name);
+
+        switch (bt->ref->cmdflags & (BT_REF_SYMBOL|BT_REF_HEXVAL))
+        {
+        case BT_REF_SYMBOL:
+                if (name) {
+			if (STREQ(name, bt->ref->str))
+                        	bt->ref->cmdflags |= BT_REF_FOUND;
+		} else {
+			if (sp && !offset && STREQ(sp->name, bt->ref->str))
+                        	bt->ref->cmdflags |= BT_REF_FOUND;
+		}
+                break;
+
+        case BT_REF_HEXVAL:
+                if (text) {
+			if (bt->ref->hexval == text) 
+                        	bt->ref->cmdflags |= BT_REF_FOUND;
+		} else if (sp && (bt->ref->hexval == sp->value))
+                       	bt->ref->cmdflags |= BT_REF_FOUND;
+		else if (!name && !text && (bt->ref->hexval == 0))
+			bt->ref->cmdflags |= BT_REF_FOUND;
+                break;
+        }
+}
+
+/*
+ *  Determine the function containing a .text.lock. reference.
+ */
+static ulong
+text_lock_function(char *name, struct bt_info *bt, ulong locktext)
+{
+	int c, reterror, instr, arg;
+	char buf[BUFSIZE];
+	char *arglist[MAXARGS];
+	char *p1;
+	ulong locking_func;
+	
+	instr = arg = -1;
+	locking_func = 0;
+
+        open_tmpfile2();
+
+	if (STREQ(name, ".text.lock.spinlock"))
+        	sprintf(buf, "x/4i 0x%lx", locktext);
+	else
+        	sprintf(buf, "x/1i 0x%lx", locktext);
+
+        if (!gdb_pass_through(buf, pc->tmpfile2, GNU_RETURN_ON_ERROR)) {
+                close_tmpfile2();
+                bt->flags |= BT_FRAMESIZE_DISABLE;
+                return 0;
+        }
+
+        rewind(pc->tmpfile2);
+        while (fgets(buf, BUFSIZE, pc->tmpfile2)) {
+                c = parse_line(buf, arglist);
+
+                if (instr == -1) {
+                        /*
+                         *  Check whether <function+offset> are
+                         *  in the output string.
+                         */
+                        if (LASTCHAR(arglist[0]) == ':') {
+                                instr = 1;
+                                arg = 2;
+                        } else {
+                                instr = 2;
+                                arg = 3;
+                        }
+                }
+
+                if (c < (arg+1))
+                        break;
+
+		if (STREQ(arglist[instr], "jmpq") || STREQ(arglist[instr], "jmp")) {
+                        p1 = arglist[arg];
+                        reterror = 0;
+                        locking_func = htol(p1, RETURN_ON_ERROR, &reterror);
+                        if (reterror)
+				locking_func = 0;
+			break;
+                }
+	}
+	close_tmpfile2();
+
+	if (!locking_func)
+                bt->flags |= BT_FRAMESIZE_DISABLE;
+
+	return locking_func;
+
+}
+
+
+/*
+ *  print one entry of a stack trace
+ */
+#define BACKTRACE_COMPLETE                   (1)
+#define BACKTRACE_ENTRY_IGNORED              (2)
+#define BACKTRACE_ENTRY_DISPLAYED            (3)
+#define BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED (4)
+
+static int
+x86_64_print_stack_entry(struct bt_info *bt, FILE *ofp, int level, 
+	int stkindex, ulong text)
+{
+	ulong rsp, offset, locking_func;
+	struct syment *sp, *spl;
+	char *name;
+	int result; 
+	long eframe_check;
+	char buf[BUFSIZE];
+
+	eframe_check = -1;
+	offset = 0;
+	sp = value_search(text, &offset);
+	if (!sp)
+		return BACKTRACE_ENTRY_IGNORED;
+
+	name = sp->name;
+
+	if (bt->flags & BT_TEXT_SYMBOLS) {
+		if (bt->flags & BT_EXCEPTION_FRAME)
+			rsp = bt->stkptr;
+		else
+			rsp = bt->stackbase + (stkindex * sizeof(long));
+                fprintf(ofp, "  [%s] %s at %lx\n",
+                	mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(rsp)),
+                        name, text);
+		if (BT_REFERENCE_CHECK(bt))
+			x86_64_do_bt_reference_check(bt, text, name);
+		return BACKTRACE_ENTRY_DISPLAYED;
+	}
+
+	if (!offset && !(bt->flags & BT_EXCEPTION_FRAME) &&
+	    !(bt->flags & BT_START)) { 
+		if (STREQ(name, "child_rip")) {
+			if (symbol_exists("kernel_thread"))
+				name = "kernel_thread";
+			else if (symbol_exists("arch_kernel_thread"))
+				name = "arch_kernel_thread";
+		}
+		else if (!(bt->flags & BT_SCHEDULE)) {
+			if (STREQ(name, "error_exit")) 
+				eframe_check = 8;
+			else {
+				if (CRASHDEBUG(2))
+					fprintf(ofp, 
+		              "< ignoring text symbol with no offset: %s() >\n",
+						sp->name);
+				return BACKTRACE_ENTRY_IGNORED;
+			}
+		}
+	}
+
+	if (bt->flags & BT_SCHEDULE)
+		name = "schedule";
+
+        if (STREQ(name, "child_rip")) {
+                if (symbol_exists("kernel_thread"))
+                        name = "kernel_thread";
+                else if (symbol_exists("arch_kernel_thread"))
+                        name = "arch_kernel_thread";
+		result = BACKTRACE_COMPLETE;
+        } else if (STREQ(name, "cpu_idle"))
+		result = BACKTRACE_COMPLETE;
+	else
+		result = BACKTRACE_ENTRY_DISPLAYED;
+
+	if (bt->flags & BT_EXCEPTION_FRAME)
+		rsp = bt->stkptr;
+	else if (bt->flags & BT_START)
+		rsp = bt->stkptr;
+	else
+		rsp = bt->stackbase + (stkindex * sizeof(long));
+
+	if ((bt->flags & BT_FULL)) {
+		if (bt->frameptr) 
+			x86_64_display_full_frame(bt, rsp, ofp);
+		bt->frameptr = rsp + sizeof(ulong);
+	}
+
+        fprintf(ofp, "%s#%d [%8lx] %s at %lx", level < 10 ? " " : "", level,
+		rsp, name, text);
+
+	if (STREQ(name, "tracesys"))
+		fprintf(ofp, " (via system_call)");
+	else if (STRNEQ(name, ".text.lock.")) {
+		if ((locking_func = text_lock_function(name, bt, text)) &&
+		    (spl = value_search(locking_func, &offset)))
+			fprintf(ofp, " (via %s)", spl->name);
+	}
+
+	if (bt->flags & BT_FRAMESIZE_DISABLE)
+		fprintf(ofp, " *");
+
+	fprintf(ofp, "\n");
+
+        if (bt->flags & BT_LINE_NUMBERS) {
+                get_line_number(text, buf, FALSE);
+                if (strlen(buf))
+                        fprintf(ofp, "    %s\n", buf);
+	}
+
+	if (eframe_check >= 0) {
+		if (x86_64_exception_frame(EFRAME_PRINT|EFRAME_VERIFY, 
+		    bt->stackbase + (stkindex*sizeof(long)) + eframe_check,
+		    NULL, bt, ofp))
+			result = BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED;
+	}
+
+	if (BT_REFERENCE_CHECK(bt))
+		x86_64_do_bt_reference_check(bt, text, name);
+
+	bt->call_target = name;
+
+	if (is_direct_call_target(bt)) {
+		if (CRASHDEBUG(2))
+			fprintf(ofp, "< enable BT_CHECK_CALLER for %s >\n", 
+				bt->call_target);
+		bt->flags |= BT_CHECK_CALLER;
+	} else {
+		if (CRASHDEBUG(2) && (bt->flags & BT_CHECK_CALLER))
+			fprintf(ofp, "< disable BT_CHECK_CALLER for %s >\n", 
+				bt->call_target);
+		if (bt->flags & BT_CHECK_CALLER) {
+			if (CRASHDEBUG(2))
+			    	fprintf(ofp, "< set BT_NO_CHECK_CALLER >\n");
+			bt->flags |= BT_NO_CHECK_CALLER;
+		}
+		bt->flags &= ~(ulonglong)BT_CHECK_CALLER;
+	}
+
+	return result;
+}
+
+/*
+ *  Unroll a kernel stack.
+ */
+static void
+x86_64_back_trace_cmd(struct bt_info *bt)
+{
+	error(FATAL, "x86_64_back_trace_cmd: TBD\n");
+}
+
+
+
+/*
+ *  Determine whether the initial stack pointer is located in one of the
+ *  exception stacks.
+ */
+static ulong
+x86_64_in_exception_stack(struct bt_info *bt) 
+{
+	int c, i;
+	ulong rsp;
+	ulong estack;
+	struct machine_specific *ms;
+
+	rsp = bt->stkptr;
+	ms = machdep->machspec;
+	estack = 0;
+
+        for (c = 0; !estack && (c < kt->cpus); c++) {
+		for (i = 0; i < 7; i++) {
+			if (ms->stkinfo.ebase[c][i] == 0)
+				break;
+			if ((rsp >= ms->stkinfo.ebase[c][i]) &&
+			    (rsp < (ms->stkinfo.ebase[c][i] + 
+			    ms->stkinfo.esize))) {
+				estack = ms->stkinfo.ebase[c][i]; 
+				if (CRASHDEBUG(1) && (c != bt->tc->processor)) 
+					error(INFO, 
+      		                      "task cpu: %d  exception stack cpu: %d\n",
+						bt->tc->processor, c);
+				break;
+			}
+		}
+        }
+
+	return estack;
+}
+
+/*
+ *  Determine whether the current stack pointer is in a cpu's irqstack.
+ */
+static ulong
+x86_64_in_irqstack(struct bt_info *bt) 
+{
+        int c;
+        ulong rsp;
+        ulong irqstack;
+        struct machine_specific *ms;
+
+        rsp = bt->stkptr;
+        ms = machdep->machspec;
+        irqstack = 0;
+
+        for (c = 0; !irqstack && (c < kt->cpus); c++) {
+                if (ms->stkinfo.ibase[c] == 0)
+                 	break;
+                if ((rsp >= ms->stkinfo.ibase[c]) &&
+                    (rsp < (ms->stkinfo.ibase[c] + ms->stkinfo.isize))) {
+                	irqstack = ms->stkinfo.ibase[c];
+                        if (CRASHDEBUG(1) && (c != bt->tc->processor)) 
+                                error(INFO, 
+			          "task cpu: %d  IRQ stack cpu: %d\n",
+                                	bt->tc->processor, c);
+                        break;
+                }
+        }
+
+        return irqstack;
+}
+
+#define STACK_TRANSITION_ERRMSG_E_I_P \
+"cannot transition from exception stack to IRQ stack to current process stack:\n    exception stack pointer: %lx\n          IRQ stack pointer: %lx\n      process stack pointer: %lx\n         current stack base: %lx\n" 
+#define STACK_TRANSITION_ERRMSG_E_P \
+"cannot transition from exception stack to current process stack:\n    exception stack pointer: %lx\n      process stack pointer: %lx\n         current_stack_base: %lx\n"
+#define STACK_TRANSITION_ERRMSG_I_P \
+"cannot transition from IRQ stack to current process stack:\n        IRQ stack pointer: %lx\n    process stack pointer: %lx\n       current stack base: %lx\n"
+
+/*
+ *  Low-budget back tracer -- dump text return addresses, following call chain
+ *  when possible, along with any verifiable exception frames.
+ */
+static void
+x86_64_low_budget_back_trace_cmd(struct bt_info *bt_in)
+{
+	int i, level, done, framesize;
+	ulong rsp, offset, stacktop;
+	ulong *up;
+	long cs;
+	struct syment *sp, *spt;
+	FILE *ofp;
+	ulong estack, irqstack;
+	ulong irq_eframe;
+	struct bt_info bt_local, *bt;
+	struct machine_specific *ms;
+	ulong last_process_stack_eframe;
+	ulong user_mode_eframe;
+
+        /*
+         *  User may have made a run-time switch.
+         */
+	if (kt->flags & DWARF_UNWIND) {
+		machdep->back_trace = x86_64_dwarf_back_trace_cmd;
+		x86_64_dwarf_back_trace_cmd(bt_in);
+		return;
+	}
+
+	bt = &bt_local;
+	BCOPY(bt_in, bt, sizeof(struct bt_info));
+
+	if (bt->flags & BT_FRAMESIZE_DEBUG) {
+		x86_64_framesize_debug(bt);
+		return;
+	}
+
+	level = 0;
+	done = FALSE;
+	irq_eframe = 0;
+	last_process_stack_eframe = 0;
+	bt->call_target = NULL;
+	rsp = bt->stkptr;
+	if (!rsp) {
+		error(INFO, "cannot determine starting stack pointer\n");
+		return;
+	}
+	ms = machdep->machspec;
+	if (BT_REFERENCE_CHECK(bt))
+		ofp = pc->nullfp;
+	else
+		ofp = fp;
+
+        if (bt->flags & BT_TEXT_SYMBOLS) {
+		if (!(bt->flags & BT_TEXT_SYMBOLS_ALL))
+                	fprintf(ofp, "%sSTART: %s%s at %lx\n",
+                	    space(VADDR_PRLEN > 8 ? 14 : 6),
+                	    closest_symbol(bt->instptr), 
+			    STREQ(closest_symbol(bt->instptr), "thread_return") ?
+			    " (schedule)" : "",
+			    bt->instptr);
+        } else if (bt->flags & BT_START) {
+                x86_64_print_stack_entry(bt, ofp, level,
+                        0, bt->instptr);
+		bt->flags &= ~BT_START;
+		level++;
+	}
+
+
+        if ((estack = x86_64_in_exception_stack(bt))) {
+in_exception_stack:
+		bt->flags |= BT_EXCEPTION_STACK;
+		/*
+	 	 *  The stack buffer will have been loaded with the process
+		 *  stack, so switch to the indicated exception stack.
+		 */
+                bt->stackbase = estack;
+                bt->stacktop = estack + ms->stkinfo.esize;
+                bt->stackbuf = ms->irqstack;
+
+                if (!readmem(bt->stackbase, KVADDR, bt->stackbuf,
+                    bt->stacktop - bt->stackbase,
+		    bt->hp && (bt->hp->esp == bt->stkptr) ? 
+	 	    "irqstack contents via hook" : "irqstack contents", 
+		    RETURN_ON_ERROR))
+                    	error(FATAL, "read of exception stack at %lx failed\n",
+                        	bt->stackbase);
+
+		/*
+	 	 *  If irq_eframe is set, we've jumped back here from the
+		 *  IRQ stack dump below.  Do basically the same thing as if
+		 *  had come from the processor stack, but presume that we
+		 *  must have been in kernel mode, i.e., took an exception
+	 	 *  while operating on an IRQ stack.  (untested)
+		 */
+                if (irq_eframe) {
+                        bt->flags |= BT_EXCEPTION_FRAME;
+                        i = (irq_eframe - bt->stackbase)/sizeof(ulong);
+                        x86_64_print_stack_entry(bt, ofp, level, i, 
+				bt->instptr);
+                        bt->flags &= ~(ulonglong)BT_EXCEPTION_FRAME;
+                        cs = x86_64_exception_frame(EFRAME_PRINT|EFRAME_CS, 0,
+                        	bt->stackbuf + (irq_eframe - bt->stackbase), 
+				bt, ofp);
+                        rsp += SIZE(pt_regs);  /* guaranteed kernel mode */
+                        level++;
+                        irq_eframe = 0;
+                }
+
+		stacktop = bt->stacktop - SIZE(pt_regs);
+
+		bt->flags &= ~BT_FRAMESIZE_DISABLE;
+
+        	for (i = (rsp - bt->stackbase)/sizeof(ulong);
+	     	    !done && (rsp < stacktop); i++, rsp += sizeof(ulong)) {
+
+			up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]);
+
+			if (!is_kernel_text(*up))
+		        	continue;
+
+	                switch (x86_64_print_stack_entry(bt, ofp, level, i,*up))
+	                {
+	                case BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED:
+				rsp += SIZE(pt_regs);
+				i += SIZE(pt_regs)/sizeof(ulong);
+	                case BACKTRACE_ENTRY_DISPLAYED:
+	                        level++;
+				if ((framesize = x86_64_get_framesize(bt, *up)) >= 0) {
+					rsp += framesize;
+					i += framesize/sizeof(ulong);
+				}
+	                        break;
+	                case BACKTRACE_ENTRY_IGNORED:
+	                        break;
+	                case BACKTRACE_COMPLETE:
+	                        done = TRUE;
+	                        break;
+	                }
+		}
+
+                cs = x86_64_exception_frame(EFRAME_PRINT|EFRAME_CS, 0, 
+			bt->stackbuf + (bt->stacktop - bt->stackbase) - 
+			SIZE(pt_regs), bt, ofp);
+
+		if (!BT_REFERENCE_CHECK(bt))
+			fprintf(fp, "--- <exception stack> ---\n");
+
+                /* 
+		 *  stack = (unsigned long *) estack_end[-2]; 
+		 */
+		up = (ulong *)(&bt->stackbuf[bt->stacktop - bt->stackbase]);
+		up -= 2;
+		rsp = bt->stkptr = *up;
+		up -= 3;
+		bt->instptr = *up;  
+		if (cs & 3)
+			done = TRUE;   /* user-mode exception */
+		else
+			done = FALSE;  /* kernel-mode exception */
+		bt->frameptr = 0;
+
+		/*
+		 *  Print the return values from the estack end.
+		 */
+		if (!done) {
+                	bt->flags |= BT_START;
+                	x86_64_print_stack_entry(bt, ofp, level,
+                        	0, bt->instptr);
+                	bt->flags &= ~(BT_START|BT_FRAMESIZE_DISABLE);
+			level++;
+			if ((framesize = x86_64_get_framesize(bt, bt->instptr)) >= 0)
+				rsp += framesize;
+		}
+	}
+
+	/*
+	 *  IRQ stack entry always comes in via the process stack, regardless
+	 *  whether it happened while running in user or kernel space.
+	 */
+        if (!done && (irqstack = x86_64_in_irqstack(bt))) {
+		bt->flags |= BT_IRQSTACK;
+		/*
+		 *  Until coded otherwise, the stackbase will be pointing to
+		 *  either the exception stack or, more likely, the process
+		 *  stack base.  Switch it to the IRQ stack.
+		 */
+                bt->stackbase = irqstack;
+                bt->stacktop = irqstack + ms->stkinfo.isize;
+                bt->stackbuf = ms->irqstack;
+
+                if (!readmem(bt->stackbase, KVADDR, 
+	  	    bt->stackbuf, bt->stacktop - bt->stackbase,
+                    bt->hp && (bt->hp->esp == bt_in->stkptr) ?
+		    "irqstack contents via hook" : "irqstack contents", 
+		    RETURN_ON_ERROR))
+                    	error(FATAL, "read of IRQ stack at %lx failed\n",
+				bt->stackbase);
+
+		stacktop = bt->stacktop - 64; /* from kernel code */
+
+		bt->flags &= ~BT_FRAMESIZE_DISABLE;
+
+                for (i = (rsp - bt->stackbase)/sizeof(ulong);
+                    !done && (rsp < stacktop); i++, rsp += sizeof(ulong)) {
+
+                        up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]);
+
+                        if (!is_kernel_text(*up))
+                                continue;
+
+                        switch (x86_64_print_stack_entry(bt, ofp, level, i,*up))
+                        {
+			case BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED:
+				rsp += SIZE(pt_regs);
+				i += SIZE(pt_regs)/sizeof(ulong);
+                        case BACKTRACE_ENTRY_DISPLAYED:
+                                level++;
+				if ((framesize = x86_64_get_framesize(bt, *up)) >= 0) {
+					rsp += framesize;
+					i += framesize/sizeof(ulong);
+				}
+                                break;
+                        case BACKTRACE_ENTRY_IGNORED:
+                                break;
+                        case BACKTRACE_COMPLETE:
+                                done = TRUE;
+                                break;
+                        }
+                }
+
+		if (!BT_REFERENCE_CHECK(bt))
+                	fprintf(fp, "--- <IRQ stack> ---\n");
+
+                /*
+		 *  stack = (unsigned long *) (irqstack_end[-1]);
+		 *  (where irqstack_end is 64 bytes below page end)
+                 */
+                up = (ulong *)(&bt->stackbuf[stacktop - bt->stackbase]);
+                up -= 1;
+                irq_eframe = rsp = bt->stkptr = (*up) - ms->irq_eframe_link;
+		up -= 1;
+                bt->instptr = *up;
+		/*
+		 *  No exception frame when coming from call_softirq.
+		 */
+		if ((sp = value_search(bt->instptr, &offset)) && 
+		    STREQ(sp->name, "call_softirq"))
+			irq_eframe = 0;
+                bt->frameptr = 0;
+                done = FALSE;
+        } else
+		irq_eframe = 0;
+
+        if (!done && (estack = x86_64_in_exception_stack(bt))) 
+		goto in_exception_stack;
+
+	if (!done && (bt->flags & (BT_EXCEPTION_STACK|BT_IRQSTACK))) {
+		/*
+		 *  Verify that the rsp pointer taken from either the
+		 *  exception or IRQ stack points into the process stack.
+		 */
+		bt->stackbase = GET_STACKBASE(bt->tc->task);
+		bt->stacktop = GET_STACKTOP(bt->tc->task);
+
+		if (!INSTACK(rsp, bt)) {
+			switch (bt->flags & (BT_EXCEPTION_STACK|BT_IRQSTACK))
+			{
+			case (BT_EXCEPTION_STACK|BT_IRQSTACK):
+				error(FATAL, STACK_TRANSITION_ERRMSG_E_I_P,
+					bt_in->stkptr, bt->stkptr, rsp,
+					bt->stackbase);
+
+			case BT_EXCEPTION_STACK:
+				error(FATAL, STACK_TRANSITION_ERRMSG_E_P,
+					bt_in->stkptr, rsp, bt->stackbase);
+
+			case BT_IRQSTACK:
+				error(FATAL, STACK_TRANSITION_ERRMSG_I_P,
+					bt_in->stkptr, rsp, bt->stackbase);
+			}
+		}
+
+		/*
+	 	 *  Now fill the local stack buffer from the process stack.
+	  	 */
+               	if (!readmem(bt->stackbase, KVADDR, bt->stackbuf,
+                    bt->stacktop - bt->stackbase, 
+		    "irqstack contents", RETURN_ON_ERROR))
+                	error(FATAL, "read of process stack at %lx failed\n",
+				bt->stackbase);
+	}
+
+	/*
+	 *  For a normally blocked task, hand-create the first level.
+	 */
+        if (!done && 
+	    !(bt->flags & (BT_TEXT_SYMBOLS|BT_EXCEPTION_STACK|BT_IRQSTACK)) &&
+	    STREQ(closest_symbol(bt->instptr), "thread_return")) {
+		bt->flags |= BT_SCHEDULE;
+		i = (rsp - bt->stackbase)/sizeof(ulong);
+		x86_64_print_stack_entry(bt, ofp, level, 
+			i, bt->instptr);
+		bt->flags &= ~(ulonglong)BT_SCHEDULE;
+		rsp += sizeof(ulong);
+		level++;
+	}
+
+	/*
+	 *  Dump the IRQ exception frame from the process stack.
+	 *  If the CS register indicates a user exception frame,
+	 *  then set done to TRUE to avoid the process stack walk-through.
+	 *  Otherwise, bump up the rsp past the kernel-mode eframe.
+	 */
+        if (irq_eframe) {
+                bt->flags |= BT_EXCEPTION_FRAME;
+                i = (irq_eframe - bt->stackbase)/sizeof(ulong);
+                x86_64_print_stack_entry(bt, ofp, level, i, bt->instptr);
+                bt->flags &= ~(ulonglong)BT_EXCEPTION_FRAME;
+                cs = x86_64_exception_frame(EFRAME_PRINT|EFRAME_CS, 0, 
+			bt->stackbuf + (irq_eframe - bt->stackbase), bt, ofp);
+		if (cs & 3)
+			done = TRUE;   /* IRQ from user-mode */
+		else {
+			if (x86_64_print_eframe_location(rsp, level, ofp))
+				level++;
+			rsp += SIZE(pt_regs);
+			irq_eframe = 0;
+		}
+		level++;
+        }
+
+	/*
+	 *  Walk the process stack.  
+	 */
+
+	bt->flags &= ~BT_FRAMESIZE_DISABLE;
+
+        for (i = (rsp - bt->stackbase)/sizeof(ulong);
+	     !done && (rsp < bt->stacktop); i++, rsp += sizeof(ulong)) {
+
+		up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]);
+
+		if (!is_kernel_text(*up))
+			continue;
+
+		if ((bt->flags & BT_CHECK_CALLER)) {
+			/*
+			 *  A non-zero offset value from the value_search() 
+			 *  lets us know if it's a real text return address.
+			 */
+			spt = value_search(*up, &offset);
+			if (!offset && !(bt->flags & BT_FRAMESIZE_DISABLE))
+				continue;
+
+			/*
+		         *  sp gets the syment of the function that the text 
+			 *  routine above called before leaving its return 
+			 *  address on the stack -- if it can be determined.
+			 */
+			sp = x86_64_function_called_by((*up)-5); 
+
+			if (sp == NULL) {
+				/* 
+				 *  We were unable to get the called function.
+				 *  If the text address had an offset, then
+				 *  it must have made an indirect call, and
+				 *  can't have called our target function.
+				 */
+				if (offset) {
+					if (CRASHDEBUG(1))
+						fprintf(ofp, 
+                       "< ignoring %s() -- makes indirect call and NOT %s()>\n",
+						    	spt->name, 
+						    	bt->call_target);
+					continue;
+				}
+			} else if ((machdep->flags & SCHED_TEXT) &&
+				STREQ(bt->call_target, "schedule") &&
+				STREQ(sp->name, "__sched_text_start")) {
+				;  /*  bait and switch */
+			} else if (!STREQ(sp->name, bt->call_target)) {
+				/*
+				 *  We got function called by the text routine,
+			 	 *  but it's not our target function.
+				 */
+				if (CRASHDEBUG(2))
+					fprintf(ofp, 
+ 		                "< ignoring %s() -- calls %s() and NOT %s()>\n",
+						spt->name, sp->name, 
+						bt->call_target);
+				continue;
+			}
+		}
+
+		switch (x86_64_print_stack_entry(bt, ofp, level, i,*up))
+		{
+		case BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED:
+			last_process_stack_eframe = rsp + 8;
+			if (x86_64_print_eframe_location(last_process_stack_eframe, level, ofp))
+				level++;
+			rsp += SIZE(pt_regs);
+			i += SIZE(pt_regs)/sizeof(ulong);
+		case BACKTRACE_ENTRY_DISPLAYED:
+			level++;
+			if ((framesize = x86_64_get_framesize(bt, *up)) >= 0) {
+				rsp += framesize;
+				i += framesize/sizeof(ulong);
+			}
+			break;
+		case BACKTRACE_ENTRY_IGNORED:	
+			break;
+		case BACKTRACE_COMPLETE:
+			done = TRUE;
+			break;
+		}
+        }
+
+        if (!irq_eframe && !is_kernel_thread(bt->tc->task) &&
+            (GET_STACKBASE(bt->tc->task) == bt->stackbase)) {
+		user_mode_eframe = bt->stacktop - SIZE(pt_regs);
+		if (last_process_stack_eframe < user_mode_eframe)
+                	x86_64_exception_frame(EFRAME_PRINT, 0, bt->stackbuf +
+                        	(bt->stacktop - bt->stackbase) - SIZE(pt_regs),
+                        	bt, ofp);
+	}
+
+        if (bt->flags & BT_TEXT_SYMBOLS) {
+        	if (BT_REFERENCE_FOUND(bt)) {
+                	print_task_header(fp, task_to_context(bt->task), 0);
+			BCOPY(bt_in, bt, sizeof(struct bt_info));
+                	bt->ref = NULL;
+                	machdep->back_trace(bt);
+                	fprintf(fp, "\n");
+        	}
+	}
+}
+
+/*
+ *  Use dwarf CFI encodings to correctly follow the call chain.
+ */
+static void
+x86_64_dwarf_back_trace_cmd(struct bt_info *bt_in)
+{
+	int i, level, done;
+	ulong rsp, offset, stacktop;
+	ulong *up;
+	long cs;
+	struct syment *sp;
+	FILE *ofp;
+	ulong estack, irqstack;
+	ulong irq_eframe;
+	struct bt_info bt_local, *bt;
+	struct machine_specific *ms;
+	ulong last_process_stack_eframe;
+	ulong user_mode_eframe;
+
+	/*
+	 *  User may have made a run-time switch.
+	 */
+        if (!(kt->flags & DWARF_UNWIND)) {
+                machdep->back_trace = x86_64_low_budget_back_trace_cmd;
+                x86_64_low_budget_back_trace_cmd(bt_in);
+                return;
+        }
+
+	bt = &bt_local;
+	BCOPY(bt_in, bt, sizeof(struct bt_info));
+
+        if (bt->flags & BT_FRAMESIZE_DEBUG) {
+		dwarf_debug(bt);
+		return;
+	}
+
+	level = 0;
+	done = FALSE;
+	irq_eframe = 0;
+	last_process_stack_eframe = 0;
+	bt->call_target = NULL;
+	bt->bptr = 0;
+	rsp = bt->stkptr;
+	if (!rsp) {
+		error(INFO, "cannot determine starting stack pointer\n");
+		return;
+	}
+	ms = machdep->machspec;
+	if (BT_REFERENCE_CHECK(bt))
+		ofp = pc->nullfp;
+	else
+		ofp = fp;
+
+        if (bt->flags & BT_TEXT_SYMBOLS) {
+		if (!(bt->flags & BT_TEXT_SYMBOLS_ALL))
+                	fprintf(ofp, "%sSTART: %s%s at %lx\n",
+                	    space(VADDR_PRLEN > 8 ? 14 : 6),
+                	    closest_symbol(bt->instptr), 
+			    STREQ(closest_symbol(bt->instptr), "thread_return") ?
+			    " (schedule)" : "",
+			    bt->instptr);
+        } else if (bt->flags & BT_START) {
+                x86_64_print_stack_entry(bt, ofp, level,
+                        0, bt->instptr);
+		bt->flags &= ~BT_START;
+		level++;
+	}
+
+
+        if ((estack = x86_64_in_exception_stack(bt))) {
+in_exception_stack:
+		bt->flags |= BT_EXCEPTION_STACK;
+		/*
+	 	 *  The stack buffer will have been loaded with the process
+		 *  stack, so switch to the indicated exception stack.
+		 */
+                bt->stackbase = estack;
+                bt->stacktop = estack + ms->stkinfo.esize;
+                bt->stackbuf = ms->irqstack;
+
+                if (!readmem(bt->stackbase, KVADDR, bt->stackbuf,
+                    bt->stacktop - bt->stackbase,
+		    bt->hp && (bt->hp->esp == bt->stkptr) ? 
+	 	    "irqstack contents via hook" : "irqstack contents", 
+		    RETURN_ON_ERROR))
+                    	error(FATAL, "read of exception stack at %lx failed\n",
+                        	bt->stackbase);
+
+		/*
+	 	 *  If irq_eframe is set, we've jumped back here from the
+		 *  IRQ stack dump below.  Do basically the same thing as if
+		 *  had come from the processor stack, but presume that we
+		 *  must have been in kernel mode, i.e., took an exception
+	 	 *  while operating on an IRQ stack.  (untested)
+		 */
+                if (irq_eframe) {
+                        bt->flags |= BT_EXCEPTION_FRAME;
+                        i = (irq_eframe - bt->stackbase)/sizeof(ulong);
+                        x86_64_print_stack_entry(bt, ofp, level, i, 
+				bt->instptr);
+                        bt->flags &= ~(ulonglong)BT_EXCEPTION_FRAME;
+                        cs = x86_64_exception_frame(EFRAME_PRINT|EFRAME_CS, 0,
+                        	bt->stackbuf + (irq_eframe - bt->stackbase), 
+				bt, ofp);
+                        rsp += SIZE(pt_regs);  /* guaranteed kernel mode */
+                        level++;
+                        irq_eframe = 0;
+                }
+
+		stacktop = bt->stacktop - SIZE(pt_regs);
+		
+		if (!done) {
+			level = dwarf_backtrace(bt, level, stacktop);
+			done = TRUE;
+		}
+
+                cs = x86_64_exception_frame(EFRAME_PRINT|EFRAME_CS, 0, 
+			bt->stackbuf + (bt->stacktop - bt->stackbase) - 
+			SIZE(pt_regs), bt, ofp);
+
+		if (!BT_REFERENCE_CHECK(bt))
+			fprintf(fp, "--- <exception stack> ---\n");
+
+                /* 
+		 *  stack = (unsigned long *) estack_end[-2]; 
+		 */
+		up = (ulong *)(&bt->stackbuf[bt->stacktop - bt->stackbase]);
+		up -= 2;
+		rsp = bt->stkptr = *up;
+		up -= 3;
+		bt->instptr = *up;  
+		if (cs & 3)
+			done = TRUE;   /* user-mode exception */
+		else
+			done = FALSE;  /* kernel-mode exception */
+		bt->frameptr = 0;
+
+		/*
+		 *  Print the return values from the estack end.
+		 */
+		if (!done) {
+                	bt->flags |= BT_START;
+                	x86_64_print_stack_entry(bt, ofp, level,
+                        	0, bt->instptr);
+                	bt->flags &= ~BT_START;
+			level++;
+		}
+	}
+
+	/*
+	 *  IRQ stack entry always comes in via the process stack, regardless
+	 *  whether it happened while running in user or kernel space.
+	 */
+        if (!done && (irqstack = x86_64_in_irqstack(bt))) {
+		bt->flags |= BT_IRQSTACK;
+		/*
+		 *  Until coded otherwise, the stackbase will be pointing to
+		 *  either the exception stack or, more likely, the process
+		 *  stack base.  Switch it to the IRQ stack.
+		 */
+                bt->stackbase = irqstack;
+                bt->stacktop = irqstack + ms->stkinfo.isize;
+                bt->stackbuf = ms->irqstack;
+
+                if (!readmem(bt->stackbase, KVADDR, 
+	  	    bt->stackbuf, bt->stacktop - bt->stackbase,
+                    bt->hp && (bt->hp->esp == bt_in->stkptr) ?
+		    "irqstack contents via hook" : "irqstack contents", 
+		    RETURN_ON_ERROR))
+                    	error(FATAL, "read of IRQ stack at %lx failed\n",
+				bt->stackbase);
+
+		stacktop = bt->stacktop - 64; /* from kernel code */
+
+		if (!done) {
+			level = dwarf_backtrace(bt, level, stacktop);
+			done = TRUE;
+		}
+
+		if (!BT_REFERENCE_CHECK(bt))
+                	fprintf(fp, "--- <IRQ stack> ---\n");
+
+                /*
+		 *  stack = (unsigned long *) (irqstack_end[-1]);
+		 *  (where irqstack_end is 64 bytes below page end)
+                 */
+                up = (ulong *)(&bt->stackbuf[stacktop - bt->stackbase]);
+                up -= 1;
+                irq_eframe = rsp = bt->stkptr = (*up) - ms->irq_eframe_link;
+		up -= 1;
+                bt->instptr = *up;
+                /*
+                 *  No exception frame when coming from call_softirq.
+                 */
+                if ((sp = value_search(bt->instptr, &offset)) &&
+                    STREQ(sp->name, "call_softirq"))
+                        irq_eframe = 0;
+                bt->frameptr = 0;
+                done = FALSE;
+        } else
+		irq_eframe = 0;
+
+        if (!done && (estack = x86_64_in_exception_stack(bt))) 
+		goto in_exception_stack;
+
+	if (!done && (bt->flags & (BT_EXCEPTION_STACK|BT_IRQSTACK))) {
+		/*
+		 *  Verify that the rsp pointer taken from either the
+		 *  exception or IRQ stack points into the process stack.
+		 */
+		bt->stackbase = GET_STACKBASE(bt->tc->task);
+		bt->stacktop = GET_STACKTOP(bt->tc->task);
+
+		if (!INSTACK(rsp, bt)) {
+			switch (bt->flags & (BT_EXCEPTION_STACK|BT_IRQSTACK))
+			{
+			case (BT_EXCEPTION_STACK|BT_IRQSTACK):
+				error(FATAL, STACK_TRANSITION_ERRMSG_E_I_P,
+					bt_in->stkptr, bt->stkptr, rsp,
+					bt->stackbase);
+
+			case BT_EXCEPTION_STACK:
+				error(FATAL, STACK_TRANSITION_ERRMSG_E_P,
+					bt_in->stkptr, rsp, bt->stackbase);
+
+			case BT_IRQSTACK:
+				error(FATAL, STACK_TRANSITION_ERRMSG_I_P,
+					bt_in->stkptr, rsp, bt->stackbase);
+			}
+		}
+
+		/*
+	 	 *  Now fill the local stack buffer from the process stack.
+	  	 */
+               	if (!readmem(bt->stackbase, KVADDR, bt->stackbuf,
+                    bt->stacktop - bt->stackbase, 
+		    "irqstack contents", RETURN_ON_ERROR))
+                	error(FATAL, "read of process stack at %lx failed\n",
+				bt->stackbase);
+	}
+
+	/*
+	 *  Dump the IRQ exception frame from the process stack.
+	 *  If the CS register indicates a user exception frame,
+	 *  then set done to TRUE to avoid the process stack walk-through.
+	 *  Otherwise, bump up the rsp past the kernel-mode eframe.
+	 */
+        if (irq_eframe) {
+                bt->flags |= BT_EXCEPTION_FRAME;
+		level = dwarf_print_stack_entry(bt, level);
+                bt->flags &= ~(ulonglong)BT_EXCEPTION_FRAME;
+                cs = x86_64_exception_frame(EFRAME_PRINT|EFRAME_CS, 0, 
+			bt->stackbuf + (irq_eframe - bt->stackbase), bt, ofp);
+		if (cs & 3)
+			done = TRUE;   /* IRQ from user-mode */
+		else {
+			if (x86_64_print_eframe_location(rsp, level, ofp))
+				level++;
+			rsp += SIZE(pt_regs);
+			irq_eframe = 0;
+		}
+		level++;
+        }
+
+	/*
+	 *  Walk the process stack.  
+	 */
+	if (!done) {
+		level = dwarf_backtrace(bt, level, bt->stacktop);
+		done = TRUE;
+	}
+
+        if (!irq_eframe && !is_kernel_thread(bt->tc->task) &&
+            (GET_STACKBASE(bt->tc->task) == bt->stackbase)) {
+		user_mode_eframe = bt->stacktop - SIZE(pt_regs);
+		if (last_process_stack_eframe < user_mode_eframe)
+                	x86_64_exception_frame(EFRAME_PRINT, 0, bt->stackbuf +
+                        	(bt->stacktop - bt->stackbase) - SIZE(pt_regs),
+                        	bt, ofp);
+	}
+
+        if (bt->flags & BT_TEXT_SYMBOLS) {
+        	if (BT_REFERENCE_FOUND(bt)) {
+                	print_task_header(fp, task_to_context(bt->task), 0);
+			BCOPY(bt_in, bt, sizeof(struct bt_info));
+                	bt->ref = NULL;
+                	machdep->back_trace(bt);
+                	fprintf(fp, "\n");
+        	}
+	}
+}
+
+/*
+ *  Functions that won't be called indirectly.
+ *  Add more to this as they are discovered.
+ */
+static const char *direct_call_targets[] = {
+        "schedule",
+        "schedule_timeout",
+	NULL
+};
+
+static int
+is_direct_call_target(struct bt_info *bt)
+{
+	int i;
+
+	if (!bt->call_target || (bt->flags & BT_NO_CHECK_CALLER))
+		return FALSE;
+
+	if (strstr(bt->call_target, "schedule") &&
+	    is_task_active(bt->task))
+		return FALSE;
+
+	for (i = 0; direct_call_targets[i]; i++) {
+		if (STREQ(direct_call_targets[i], bt->call_target)) 
+			return TRUE;
+	}
+
+	return FALSE;
+}
+
+static struct syment *
+x86_64_function_called_by(ulong rip)
+{
+	struct syment *sp;
+	char buf[BUFSIZE], *p1;
+	ulong value, offset;
+	unsigned char byte;
+
+	value = 0;
+	sp = NULL;
+
+        if (!readmem(rip, KVADDR, &byte, sizeof(unsigned char), "call byte",
+            RETURN_ON_ERROR)) 
+		return sp;
+
+        if (byte != 0xe8) 
+		return sp;
+
+        sprintf(buf, "x/i 0x%lx", rip);
+
+        open_tmpfile2();
+	if (gdb_pass_through(buf, pc->tmpfile2, GNU_RETURN_ON_ERROR)) {
+	        rewind(pc->tmpfile2);
+	        while (fgets(buf, BUFSIZE, pc->tmpfile2)) {
+			if ((p1 = strstr(buf, "callq")) &&
+			    whitespace(*(p1-1))) { 
+				if (extract_hex(p1, &value, NULLCHAR, TRUE)) 
+					break;
+			}
+		}
+	}
+        close_tmpfile2();
+
+	if (value)
+		sp = value_search(value, &offset);
+
+	/*
+	 *  Functions that jmp to schedule() or schedule_timeout().
+	 */
+	if (sp) {
+	    	if ((STREQ(sp->name, "schedule_timeout_interruptible") ||
+	             STREQ(sp->name, "schedule_timeout_uninterruptible")))
+			sp = symbol_search("schedule_timeout");
+
+		if (STREQ(sp->name, "__cond_resched"))
+			sp = symbol_search("schedule");
+	}
+
+	return sp;
+}
+
+/*
+ *  Unroll the kernel stack using a minimal amount of gdb services.
+ */
+static void
+x86_64_back_trace(struct gnu_request *req, struct bt_info *bt)
+{
+	error(FATAL, "x86_64_back_trace: unused\n");
+}
+
+
+/*
+ *  Print exception frame information for x86_64.
+ *
+ *    Pid: 0, comm: swapper Not tainted 2.6.5-1.360phro.rootsmp
+ *    RIP: 0010:[<ffffffff8010f534>] <ffffffff8010f534>{default_idle+36}
+ *    RSP: 0018:ffffffff8048bfd8  EFLAGS: 00000246
+ *    RAX: 0000000000000000 RBX: ffffffff8010f510 RCX: 0000000000000018
+ *    RDX: 0000010001e37280 RSI: ffffffff803ac0a0 RDI: 000001007f43c400
+ *    RBP: 0000000000000000 R08: ffffffff8048a000 R09: 0000000000000000
+ *    R10: ffffffff80482188 R11: 0000000000000001 R12: 0000000000000000
+ *    R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000
+ *    FS:  0000002a96e14fc0(0000) GS:ffffffff80481d80(0000) GS:0000000055578aa0
+ *    CS:  0010 DS: 0018 ES: 0018 CR0: 000000008005003b
+ *    CR2: 0000002a9556b000 CR3: 0000000000101000 CR4: 00000000000006e0
+ *
+ */
+
+static long 
+x86_64_exception_frame(ulong flags, ulong kvaddr, char *local, 
+	struct bt_info *bt, FILE *ofp)
+{
+        long rip, rsp, cs, ss, rflags, orig_rax, rbp; 
+	long rax, rbx, rcx, rdx, rsi, rdi;
+        long r8, r9, r10, r11, r12, r13, r14, r15;
+	struct machine_specific *ms;
+	struct syment *sp;
+	ulong offset;
+	char *pt_regs_buf;
+	long verified;
+	int err;
+
+        ms = machdep->machspec;
+
+	if (!(machdep->flags & PT_REGS_INIT)) {
+		err = 0;
+		err |= ((ms->pto.r15 = MEMBER_OFFSET("pt_regs", "r15")) == 
+			INVALID_OFFSET);
+		err |= ((ms->pto.r14 = MEMBER_OFFSET("pt_regs", "r14")) == 
+			INVALID_OFFSET);
+		err |= ((ms->pto.r13 = MEMBER_OFFSET("pt_regs", "r13")) == 
+			INVALID_OFFSET);
+		err |= ((ms->pto.r12 = MEMBER_OFFSET("pt_regs", "r12")) == 
+			INVALID_OFFSET);
+		err |= ((ms->pto.r11 = MEMBER_OFFSET("pt_regs", "r11")) == 
+			INVALID_OFFSET);
+		err |= ((ms->pto.r10 = MEMBER_OFFSET("pt_regs", "r10")) == 
+			INVALID_OFFSET);
+		err |= ((ms->pto.r9 = MEMBER_OFFSET("pt_regs", "r9")) == 
+			INVALID_OFFSET);
+		err |= ((ms->pto.r8 = MEMBER_OFFSET("pt_regs", "r8")) == 
+			INVALID_OFFSET);
+		err |= ((ms->pto.cs = MEMBER_OFFSET("pt_regs", "cs")) == 
+			INVALID_OFFSET);
+		err |= ((ms->pto.ss = MEMBER_OFFSET("pt_regs", "ss")) == 
+			INVALID_OFFSET);
+		/*
+		 *  x86/x86_64 merge changed traditional register names.
+		 */
+		if (((ms->pto.rbp = MEMBER_OFFSET("pt_regs", "rbp")) == 
+		    INVALID_OFFSET) &&
+		    ((ms->pto.rbp = MEMBER_OFFSET("pt_regs", "bp")) == 
+		    INVALID_OFFSET))
+			err++; 
+		if (((ms->pto.rax = MEMBER_OFFSET("pt_regs", "rax")) == 
+		    INVALID_OFFSET) &&
+		    ((ms->pto.rax = MEMBER_OFFSET("pt_regs", "ax")) == 
+		    INVALID_OFFSET))
+			err++; 
+		if (((ms->pto.rbx = MEMBER_OFFSET("pt_regs", "rbx")) == 
+		    INVALID_OFFSET) &&
+		    ((ms->pto.rbx = MEMBER_OFFSET("pt_regs", "bx")) == 
+		    INVALID_OFFSET))
+			err++; 
+		if (((ms->pto.rcx = MEMBER_OFFSET("pt_regs", "rcx")) == 
+		    INVALID_OFFSET) &&
+		    ((ms->pto.rcx = MEMBER_OFFSET("pt_regs", "cx")) == 
+		    INVALID_OFFSET))
+			err++; 
+		if (((ms->pto.rdx = MEMBER_OFFSET("pt_regs", "rdx")) == 
+		    INVALID_OFFSET) &&
+		    ((ms->pto.rdx = MEMBER_OFFSET("pt_regs", "dx")) == 
+		    INVALID_OFFSET))
+			err++; 
+		if (((ms->pto.rsi = MEMBER_OFFSET("pt_regs", "rsi")) == 
+		    INVALID_OFFSET) &&
+		    ((ms->pto.rsi = MEMBER_OFFSET("pt_regs", "si")) == 
+		    INVALID_OFFSET))
+			err++; 
+		if (((ms->pto.rdi = MEMBER_OFFSET("pt_regs", "rdi")) == 
+		    INVALID_OFFSET) &&
+		    ((ms->pto.rdi = MEMBER_OFFSET("pt_regs", "di")) == 
+		    INVALID_OFFSET))
+			err++; 
+		if (((ms->pto.rip = MEMBER_OFFSET("pt_regs", "rip")) == 
+		    INVALID_OFFSET) &&
+		    ((ms->pto.rip = MEMBER_OFFSET("pt_regs", "ip")) == 
+		    INVALID_OFFSET))
+			err++; 
+		if (((ms->pto.rsp = MEMBER_OFFSET("pt_regs", "rsp")) == 
+		    INVALID_OFFSET) &&
+		    ((ms->pto.rsp = MEMBER_OFFSET("pt_regs", "sp")) == 
+		    INVALID_OFFSET))
+			err++; 
+		if (((ms->pto.eflags = MEMBER_OFFSET("pt_regs", "eflags")) == 
+		    INVALID_OFFSET) &&
+		    ((ms->pto.eflags = MEMBER_OFFSET("pt_regs", "flags")) == 
+		    INVALID_OFFSET))
+			err++; 
+		if (((ms->pto.orig_rax = MEMBER_OFFSET("pt_regs", "orig_rax")) == 
+		    INVALID_OFFSET) &&
+		    ((ms->pto.orig_rax = MEMBER_OFFSET("pt_regs", "orig_ax")) == 
+		    INVALID_OFFSET))
+			err++; 
+
+		if (err)
+			error(WARNING, "pt_regs structure has changed\n");
+
+		machdep->flags |= PT_REGS_INIT;
+	}
+
+	if (kvaddr) {
+		pt_regs_buf = GETBUF(SIZE(pt_regs));
+        	readmem(kvaddr, KVADDR, pt_regs_buf,
+                	SIZE(pt_regs), "pt_regs", FAULT_ON_ERROR);
+	} else
+		pt_regs_buf = local;
+
+	rip = ULONG(pt_regs_buf + ms->pto.rip);
+	rsp = ULONG(pt_regs_buf + ms->pto.rsp);
+	cs = ULONG(pt_regs_buf + ms->pto.cs);
+	ss = ULONG(pt_regs_buf + ms->pto.ss);
+	rflags = ULONG(pt_regs_buf + ms->pto.eflags);
+	orig_rax = ULONG(pt_regs_buf + ms->pto.orig_rax);
+	rbp = ULONG(pt_regs_buf + ms->pto.rbp);
+	rax = ULONG(pt_regs_buf + ms->pto.rax);
+	rbx = ULONG(pt_regs_buf + ms->pto.rbx);
+	rcx = ULONG(pt_regs_buf + ms->pto.rcx);
+	rdx = ULONG(pt_regs_buf + ms->pto.rdx);
+	rsi = ULONG(pt_regs_buf + ms->pto.rsi);
+	rdi = ULONG(pt_regs_buf + ms->pto.rdi);
+	r8 = ULONG(pt_regs_buf + ms->pto.r8);
+	r9 = ULONG(pt_regs_buf + ms->pto.r9);
+	r10 = ULONG(pt_regs_buf + ms->pto.r10);
+	r11 = ULONG(pt_regs_buf + ms->pto.r11);
+	r12 = ULONG(pt_regs_buf + ms->pto.r12);
+	r13 = ULONG(pt_regs_buf + ms->pto.r13);
+	r14 = ULONG(pt_regs_buf + ms->pto.r14);
+	r15 = ULONG(pt_regs_buf + ms->pto.r15);
+
+        verified = x86_64_eframe_verify(bt, 
+		kvaddr ? kvaddr : (local - bt->stackbuf) + bt->stackbase,
+		cs, ss, rip, rsp, rflags);
+
+	/*
+	 *  If it's print-if-verified request, don't print bogus eframes.
+	 */
+        if (!verified && ((flags & (EFRAME_VERIFY|EFRAME_PRINT)) == 
+	    (EFRAME_VERIFY|EFRAME_PRINT))) 
+		flags &= ~EFRAME_PRINT;
+
+	if (CRASHDEBUG(2)) 
+		fprintf(ofp, "< exception frame at: %lx >\n", kvaddr ?  kvaddr :
+			(local - bt->stackbuf) + bt->stackbase);
+
+	if (flags & EFRAME_PRINT) {
+		if (flags & EFRAME_SEARCH) {
+			fprintf(ofp, "\n  %s-MODE EXCEPTION FRAME AT: %lx\n",
+				cs & 3 ? "USER" : "KERNEL", 
+				kvaddr ?  kvaddr : 
+				(local - bt->stackbuf) + bt->stackbase);
+		} else if (!(cs & 3)) {
+			fprintf(ofp, "    [exception RIP: ");
+			if ((sp = value_search(rip, &offset))) {
+                		fprintf(ofp, "%s", sp->name);
+                		if (offset)
+                        		fprintf(ofp, (output_radix == 16) ? 
+						"+0x%lx" : "+%ld", offset);
+			} else
+                		fprintf(ofp, "unknown or invalid address");
+			fprintf(ofp, "]\n");
+		}
+		fprintf(ofp, "    RIP: %016lx  RSP: %016lx  RFLAGS: %08lx\n", 
+			rip, rsp, rflags);
+		fprintf(ofp, "    RAX: %016lx  RBX: %016lx  RCX: %016lx\n", 
+			rax, rbx, rcx);
+		fprintf(ofp, "    RDX: %016lx  RSI: %016lx  RDI: %016lx\n", 
+	 		rdx, rsi, rdi);
+		fprintf(ofp, "    RBP: %016lx   R8: %016lx   R9: %016lx\n", 
+			rbp, r8, r9);
+		fprintf(ofp, "    R10: %016lx  R11: %016lx  R12: %016lx\n", 
+			r10, r11, r12);
+		fprintf(ofp, "    R13: %016lx  R14: %016lx  R15: %016lx\n", 
+			r13, r14, r15);
+		fprintf(ofp, "    ORIG_RAX: %016lx  CS: %04lx  SS: %04lx\n", 
+			orig_rax, cs, ss);
+
+		if (!verified && CRASHDEBUG((pc->flags & RUNTIME) ? 0 : 1))
+			error(WARNING, "possibly bogus exception frame\n");
+	}
+
+        if ((flags & EFRAME_PRINT) && BT_REFERENCE_CHECK(bt)) {
+                x86_64_do_bt_reference_check(bt, rip, NULL);
+                x86_64_do_bt_reference_check(bt, rsp, NULL);
+                x86_64_do_bt_reference_check(bt, cs, NULL);
+                x86_64_do_bt_reference_check(bt, ss, NULL);
+                x86_64_do_bt_reference_check(bt, rflags, NULL);
+                x86_64_do_bt_reference_check(bt, orig_rax, NULL);
+                x86_64_do_bt_reference_check(bt, rbp, NULL);
+                x86_64_do_bt_reference_check(bt, rax, NULL);
+                x86_64_do_bt_reference_check(bt, rbx, NULL);
+                x86_64_do_bt_reference_check(bt, rcx, NULL);
+                x86_64_do_bt_reference_check(bt, rdx, NULL);
+                x86_64_do_bt_reference_check(bt, rsi, NULL);
+                x86_64_do_bt_reference_check(bt, rdi, NULL);
+                x86_64_do_bt_reference_check(bt, r8, NULL);
+                x86_64_do_bt_reference_check(bt, r9, NULL);
+                x86_64_do_bt_reference_check(bt, r10, NULL);
+                x86_64_do_bt_reference_check(bt, r11, NULL);
+                x86_64_do_bt_reference_check(bt, r12, NULL);
+                x86_64_do_bt_reference_check(bt, r13, NULL);
+                x86_64_do_bt_reference_check(bt, r14, NULL);
+                x86_64_do_bt_reference_check(bt, r15, NULL);
+        }
+
+	/* Remember the rip and rsp for unwinding the process stack */
+	if (kt->flags & DWARF_UNWIND){
+		bt->instptr = rip;
+		bt->stkptr = rsp;
+		bt->bptr = rbp;
+	}
+
+	if (kvaddr)
+		FREEBUF(pt_regs_buf);
+
+	if (flags & EFRAME_CS)
+		return cs;
+	else if (flags & EFRAME_VERIFY)
+		return verified;
+
+	return 0;
+}
+
+static int 
+x86_64_print_eframe_location(ulong eframe, int level, FILE *ofp)
+{
+	return FALSE;
+
+#ifdef NOTDEF
+	ulong rip;
+	char *pt_regs_buf;
+        struct machine_specific *ms;
+        struct syment *sp;
+
+        ms = machdep->machspec;
+
+        pt_regs_buf = GETBUF(SIZE(pt_regs));
+        if (!readmem(eframe, KVADDR, pt_regs_buf, SIZE(pt_regs), 
+	    "pt_regs", RETURN_ON_ERROR|QUIET)) {
+		FREEBUF(pt_regs_buf);
+		return FALSE;
+	}
+
+        rip = ULONG(pt_regs_buf + ms->pto.rip);
+	FREEBUF(pt_regs_buf);
+
+        if (!(sp = value_search(rip, NULL)))
+                return FALSE;
+
+        fprintf(ofp, "%s#%d [%8lx] %s at %lx\n", level < 10 ? " " : "", level+1,
+		eframe, sp->name, rip);
+
+	return TRUE;
+#endif
+}
+
+/*
+ *  Check that the verifiable registers contain reasonable data.
+ */
+#define RAZ_MASK 0xffffffffffc08028    /* return-as-zero bits */
+
+static int 
+x86_64_eframe_verify(struct bt_info *bt, long kvaddr, long cs, long ss,
+	long rip, long rsp, long rflags)
+{
+	if ((rflags & RAZ_MASK) || !(rflags & 0x2))
+		return FALSE;
+
+        if ((cs == 0x10) && (ss == 0x18)) {
+                if (is_kernel_text(rip) && IS_KVADDR(rsp))
+                        return TRUE;
+
+                if (x86_64_is_module_addr(rip) &&
+		    IS_KVADDR(rsp) &&
+		    (rsp == (kvaddr + SIZE(pt_regs))))
+                        return TRUE;
+        }
+
+        if ((cs == 0x10) && kvaddr) {
+                if (is_kernel_text(rip) && IS_KVADDR(rsp) &&
+		    (rsp == (kvaddr + SIZE(pt_regs) + 8)))
+                        return TRUE;
+	}
+
+        if ((cs == 0x10) && kvaddr) {
+                if (is_kernel_text(rip) && IS_KVADDR(rsp) &&
+		    (rsp == (kvaddr + SIZE(pt_regs))))
+                        return TRUE;
+	}
+
+	if ((cs == 0x10) && kvaddr) {
+                if (is_kernel_text(rip) && IS_KVADDR(rsp) &&
+		    x86_64_in_exception_stack(bt))
+			return TRUE;
+	}
+
+        if ((cs == 0x33) && (ss == 0x2b)) {
+                if (IS_UVADDR(rip, bt->tc) && IS_UVADDR(rsp, bt->tc))
+                        return TRUE;
+        }
+
+        if (XEN() && ((cs == 0x33) || (cs == 0xe033)) && 
+	    ((ss == 0x2b) || (ss == 0xe02b))) {
+                if (IS_UVADDR(rip, bt->tc) && IS_UVADDR(rsp, bt->tc))
+                        return TRUE;
+        }
+
+	if (XEN() && ((cs == 0x10000e030) || (cs == 0xe030)) && 
+	    (ss == 0xe02b)) {
+                if (is_kernel_text(rip) && IS_KVADDR(rsp))
+                        return TRUE;
+	}
+
+	/* 
+	 *  32-bit segments 
+	 */
+        if ((cs == 0x23) && (ss == 0x2b)) {
+                if (IS_UVADDR(rip, bt->tc) && IS_UVADDR(rsp, bt->tc))
+                        return TRUE;
+        }
+
+	return FALSE;
+}
+
+/*
+ *  Get a stack frame combination of pc and ra from the most relevent spot.
+ */
+static void
+x86_64_get_stack_frame(struct bt_info *bt, ulong *pcp, ulong *spp)
+{
+	if (bt->flags & BT_DUMPFILE_SEARCH)
+		return x86_64_get_dumpfile_stack_frame(bt, pcp, spp);
+
+        if (pcp)
+                *pcp = x86_64_get_pc(bt);
+        if (spp)
+                *spp = x86_64_get_sp(bt);
+}
+
+/*
+ *  Get the starting point for the active cpus in a diskdump/netdump.
+ */
+static void
+x86_64_get_dumpfile_stack_frame(struct bt_info *bt_in, ulong *rip, ulong *rsp) 
+{
+	int panic_task;
+        int i, estack, panic, stage;
+        char *sym;
+	struct syment *sp;
+        ulong *up;
+	struct bt_info bt_local, *bt;
+        struct machine_specific *ms;
+	char *user_regs;
+	ulong ur_rip, ur_rsp;
+	ulong halt_rip, halt_rsp;
+	ulong crash_kexec_rip, crash_kexec_rsp;
+
+        bt = &bt_local;
+        BCOPY(bt_in, bt, sizeof(struct bt_info));
+        ms = machdep->machspec;
+	ur_rip = ur_rsp = 0;
+	halt_rip = halt_rsp = 0;
+	crash_kexec_rip = crash_kexec_rsp = 0;
+	stage = 0;
+	estack = -1;
+
+	panic_task = tt->panic_task == bt->task ? TRUE : FALSE;
+
+	if (panic_task && bt->machdep) {
+		user_regs = bt->machdep;
+
+		if (x86_64_eframe_verify(bt, 
+		    0,
+		    ULONG(user_regs + OFFSET(user_regs_struct_cs)),
+		    ULONG(user_regs + OFFSET(user_regs_struct_ss)),
+		    ULONG(user_regs + OFFSET(user_regs_struct_rip)),
+        	    ULONG(user_regs + OFFSET(user_regs_struct_rsp)),
+		    ULONG(user_regs + OFFSET(user_regs_struct_eflags)))) {
+			bt->stkptr = ULONG(user_regs + 
+				OFFSET(user_regs_struct_rsp));
+			if (x86_64_in_irqstack(bt)) {
+				ur_rip = ULONG(user_regs + 
+					OFFSET(user_regs_struct_rip));
+				ur_rsp = ULONG(user_regs + 
+					OFFSET(user_regs_struct_rsp));
+				goto skip_stage;
+			}
+		}
+	}
+
+	panic = FALSE;
+
+	/*
+	 *  Check the process stack first.
+	 */
+next_stack:
+        for (i = 0, up = (ulong *)bt->stackbuf; 
+	     i < (bt->stacktop - bt->stackbase)/sizeof(ulong); i++, up++) {
+                sym = closest_symbol(*up);
+		if (XEN_CORE_DUMPFILE()) {
+			if (STREQ(sym, "crash_kexec")) {
+				sp = x86_64_function_called_by((*up)-5);
+				if (sp && STREQ(sp->name, "machine_kexec")) {
+					*rip = *up;
+					*rsp = bt->stackbase + ((char *)(up) - bt->stackbuf);
+					return;
+				}
+			}
+			if (STREQ(sym, "xen_machine_kexec")) {
+                       		*rip = *up;
+                       		*rsp = bt->stackbase + ((char *)(up) - bt->stackbuf);
+				return;
+			}
+		} else if (STREQ(sym, "netconsole_netdump") || 
+		    STREQ(sym, "netpoll_start_netdump") ||
+		    STREQ(sym, "start_disk_dump") ||
+		    STREQ(sym, "disk_dump") ||
+		    STREQ(sym, "crash_kexec") ||
+		    STREQ(sym, "machine_kexec") ||
+		    STREQ(sym, "try_crashdump")) {
+			if (STREQ(sym, "crash_kexec")) {
+				sp = x86_64_function_called_by((*up)-5);
+				if (sp && STREQ(sp->name, "machine_kexec")) {
+					*rip = *up;
+					*rsp = bt->stackbase + ((char *)(up) - bt->stackbuf);
+					return;
+				}
+			}
+			/*
+			 *  Use second instance of crash_kexec if it exists.
+			 */
+			if (!(bt->flags & BT_TEXT_SYMBOLS) && 
+			    STREQ(sym, "crash_kexec") && !crash_kexec_rip) {
+				crash_kexec_rip = *up;
+				crash_kexec_rsp = bt->stackbase + ((char *)(up) - bt->stackbuf);
+				continue;
+			}
+                       	*rip = *up;
+                       	*rsp = bt->stackbase + ((char *)(up) - bt->stackbuf);
+                       	return;
+                }
+
+                if ((estack >= 0) && 
+                    (STREQ(sym, "nmi_watchdog_tick") ||
+                     STREQ(sym, "default_do_nmi"))) {
+			sp = x86_64_function_called_by((*up)-5);
+			if (!sp || !STREQ(sp->name, "die_nmi")) 
+				continue;
+                        *rip = *up;
+                        *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf);
+			bt_in->flags |= BT_START;
+			*rip = symbol_value("die_nmi");
+			*rsp = (*rsp) - (7*sizeof(ulong));
+                        return;
+                }
+
+                if (STREQ(sym, "panic")) {
+                        *rip = *up;
+                        *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf);
+                        panic = TRUE;
+                        continue;   /* keep looking for die */
+                }
+
+                if (STREQ(sym, "die")) {
+                        *rip = *up;
+                        *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf);
+                        for (i++, up++; i < LONGS_PER_STACK; i++, up++) {
+                                sym = closest_symbol(*up);
+                                if (STREQ(sym, "sysrq_handle_crash"))
+                                        goto next_sysrq;
+                        }
+                        return;
+                }
+
+                if (STREQ(sym, "sysrq_handle_crash")) {
+next_sysrq:
+                        *rip = *up;
+                        *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf);
+                        pc->flags |= SYSRQ;
+                        for (i++, up++; i < LONGS_PER_STACK; i++, up++) {
+                                sym = closest_symbol(*up);
+                                if (STREQ(sym, "sysrq_handle_crash"))
+                                        goto next_sysrq;
+                        }
+                        return;
+                }
+
+                if (!panic_task && (stage > 0) && 
+		    STREQ(sym, "smp_call_function_interrupt")) {
+                        *rip = *up;
+                        *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf);
+			return;
+                }
+
+                if (!panic_task && STREQ(sym, "crash_nmi_callback")) {
+                        *rip = *up;
+                        *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf);
+                        return;
+                }
+
+		if (XEN_CORE_DUMPFILE() && !panic_task && (bt->tc->pid == 0) &&
+		    (stage == 0) && STREQ(sym, "safe_halt")) {
+			halt_rip = *up;
+			halt_rsp = bt->stackbase + ((char *)(up) - bt->stackbuf);
+		}
+
+		if (XEN_CORE_DUMPFILE() && !panic_task && (bt->tc->pid == 0) &&
+		    !halt_rip && (stage == 0) && STREQ(sym, "xen_idle")) {
+			halt_rip = *up;
+			halt_rsp = bt->stackbase + ((char *)(up) - bt->stackbuf);
+		}
+
+		if (!XEN_CORE_DUMPFILE() && !panic_task && (bt->tc->pid == 0) && 
+		    !halt_rip && (stage == 0) && STREQ(sym, "cpu_idle")) { 
+			halt_rip = *up;
+			halt_rsp = bt->stackbase + ((char *)(up) - bt->stackbuf);
+		}
+	}
+
+	if (panic) 
+		return;
+
+	if (crash_kexec_rip) {
+		*rip = crash_kexec_rip;
+		*rsp = crash_kexec_rsp;
+		return;
+	}
+
+skip_stage:
+	switch (stage) 
+	{
+	/*
+         *  Now check the processor's interrupt stack.
+         */
+	case 0:
+		bt->stackbase = ms->stkinfo.ibase[bt->tc->processor];
+		bt->stacktop = ms->stkinfo.ibase[bt->tc->processor] + 
+			ms->stkinfo.isize;
+		console("x86_64_get_dumpfile_stack_frame: searching IRQ stack at %lx\n", 
+			bt->stackbase);
+		bt->stackbuf = ms->irqstack;
+		alter_stackbuf(bt);
+		stage = 1;
+		goto next_stack;
+
+        /*
+         *  Check the exception stacks.
+         */
+	case 1:
+		if (++estack == 7)
+			break;
+		bt->stackbase = ms->stkinfo.ebase[bt->tc->processor][estack];
+		bt->stacktop = ms->stkinfo.ebase[bt->tc->processor][estack] +
+                	ms->stkinfo.esize;
+		console("x86_64_get_dumpfile_stack_frame: searching %s estack at %lx\n", 
+			x86_64_exception_stacks[estack], bt->stackbase);
+		if (!(bt->stackbase)) 
+			goto skip_stage;
+		bt->stackbuf = ms->irqstack;
+		alter_stackbuf(bt);
+		goto next_stack;
+
+	}
+
+	/*
+	 *  We didn't find what we were looking for, so just use what was
+	 *  passed in from the ELF header.
+	 */
+	if (ur_rip && ur_rsp) {
+        	*rip = ur_rip;
+		*rsp = ur_rsp;
+		return;
+	}
+
+	if (halt_rip && halt_rsp) {
+        	*rip = halt_rip;
+		*rsp = halt_rsp;
+		return;
+	}
+
+	if (CRASHDEBUG(1)) 
+        	error(INFO, 
+		    "x86_64_get_dumpfile_stack_frame: cannot find anything useful (task: %lx)\n",
+			bt->task);
+
+	bt->flags &= ~(ulonglong)BT_DUMPFILE_SEARCH;
+
+        machdep->get_stack_frame(bt, rip, rsp);
+}
+
+/*
+ *  Get the saved RSP from the task's thread_struct.
+ */
+static ulong
+x86_64_get_sp(struct bt_info *bt)
+{
+        ulong offset, rsp;
+
+        if (tt->flags & THREAD_INFO) {
+                readmem(bt->task + OFFSET(task_struct_thread) +
+			OFFSET(thread_struct_rsp), KVADDR,
+                        &rsp, sizeof(void *),
+                        "thread_struct rsp", FAULT_ON_ERROR);
+                return rsp;
+        }
+
+        offset = OFFSET(task_struct_thread) + OFFSET(thread_struct_rsp); 
+
+        return GET_STACK_ULONG(offset);
+}
+
+/*
+ *  Get the saved PC from the task's thread_struct if it exists;
+ *  otherwise just use the "thread_return" label value.
+ */
+static ulong
+x86_64_get_pc(struct bt_info *bt)
+{
+        ulong offset, rip;
+
+	if (INVALID_MEMBER(thread_struct_rip))
+		return symbol_value("thread_return");
+
+        if (tt->flags & THREAD_INFO) {
+                readmem(bt->task + OFFSET(task_struct_thread) +
+                        OFFSET(thread_struct_rip), KVADDR,
+                        &rip, sizeof(void *),
+                        "thread_struct rip", FAULT_ON_ERROR);
+		if (rip)
+			return rip;
+		else
+			return symbol_value("thread_return");
+        }
+
+        offset = OFFSET(task_struct_thread) + OFFSET(thread_struct_rip);
+
+        return GET_STACK_ULONG(offset);
+}
+
+
+/*
+ *  Do the work for x86_64_get_sp() and x86_64_get_pc().
+ */
+static void
+get_x86_64_frame(struct bt_info *bt, ulong *getpc, ulong *getsp)
+{
+	error(FATAL, "get_x86_64_frame: TBD\n");
+}
+
+/*
+ *  Do the work for cmd_irq().
+ */
+static void 
+x86_64_dump_irq(int irq)
+{
+        if (symbol_exists("irq_desc")) {
+                machdep->dump_irq = generic_dump_irq;
+                return(generic_dump_irq(irq));
+        }
+
+        error(FATAL, "x86_64_dump_irq: irq_desc[] does not exist?\n");
+}
+
+/* 
+ *  Do the work for irq -d
+ */
+void 
+x86_64_display_idt_table(void)
+{
+	int i;
+	char *idt_table_buf;
+	char buf[BUFSIZE];
+	ulong *ip;
+
+	if (INVALID_SIZE(gate_struct)) {
+		option_not_supported('d');
+		return;
+	}
+	idt_table_buf = GETBUF(SIZE(gate_struct) * 256);
+        readmem(symbol_value("idt_table"), KVADDR, idt_table_buf, 
+		SIZE(gate_struct) * 256, "idt_table", FAULT_ON_ERROR);
+	ip = (ulong *)idt_table_buf;
+
+	for (i = 0; i < 256; i++, ip += 2) {
+                if (i < 10)
+                        fprintf(fp, "  ");
+                else if (i < 100)
+                        fprintf(fp, " ");
+                fprintf(fp, "[%d] %s\n",
+                        i, x86_64_extract_idt_function(ip, buf, NULL));
+	}
+
+	FREEBUF(idt_table_buf);
+}
+
+/*
+ *  Extract the function name out of the IDT entry.
+ */
+static char *
+x86_64_extract_idt_function(ulong *ip, char *buf, ulong *retaddr)
+{
+	ulong i1, i2, addr;
+	char locbuf[BUFSIZE];
+	physaddr_t phys;
+
+	if (buf)
+		BZERO(buf, BUFSIZE);
+
+	i1 = *ip;
+	i2 = *(ip+1);
+
+	i2 <<= 32;
+	addr = i2 & 0xffffffff00000000;
+	addr |= (i1 & 0xffff);
+	i1 >>= 32;
+	addr |= (i1 & 0xffff0000);
+
+	if (retaddr)
+		*retaddr = addr;
+
+	if (!buf)
+		return NULL;
+
+	value_to_symstr(addr, locbuf, 0);
+	if (strlen(locbuf))
+		sprintf(buf, locbuf);
+	else {
+		sprintf(buf, "%016lx", addr);
+		if (kvtop(NULL, addr, &phys, 0)) {
+			addr = machdep->kvbase + (ulong)phys;
+			if (value_to_symstr(addr, locbuf, 0)) {
+				strcat(buf, "  <");
+				strcat(buf, locbuf);
+				strcat(buf, ">");
+			}
+		}
+	}
+
+	return buf;
+}
+
+/*
+ *  Filter disassembly output if the output radix is not gdb's default 10
+ */
+static int 
+x86_64_dis_filter(ulong vaddr, char *inbuf)
+{
+        char buf1[BUFSIZE];
+        char buf2[BUFSIZE];
+        char *colon, *p1;
+        int argc;
+        char *argv[MAXARGS];
+        ulong value;
+
+	if (!inbuf) 
+		return TRUE;
+/*
+ *  For some reason gdb can go off into the weeds translating text addresses,
+ *  (on alpha -- not necessarily seen on x86_64) so this routine both fixes the 
+ *  references as well as imposing the current output radix on the translations.
+ */
+	console("IN: %s", inbuf);
 
-        	for (c = 0; c < kt->cpus; c++) {
-                	if (ms->stkinfo.ibase[c] == 0)
-                        	break;
-                                bt->hp->esp = ms->stkinfo.ibase[c];
-                                fprintf(fp, "CPU %d IRQ STACK:\n", c);
-                                if ((cnt = x86_64_eframe_search(bt)))
-					fprintf(fp, "\n");
-				else
-                                        fprintf(fp, "(none found)\n\n");
+	colon = strstr(inbuf, ":");
+
+	if (colon) {
+		sprintf(buf1, "0x%lx <%s>", vaddr,
+			value_to_symstr(vaddr, buf2, pc->output_radix));
+		sprintf(buf2, "%s%s", buf1, colon);
+		strcpy(inbuf, buf2);
+	}
+
+	strcpy(buf1, inbuf);
+	argc = parse_line(buf1, argv);
+
+	if ((FIRSTCHAR(argv[argc-1]) == '<') && 
+	    (LASTCHAR(argv[argc-1]) == '>')) {
+		p1 = rindex(inbuf, '<');
+		while ((p1 > inbuf) && !STRNEQ(p1, " 0x")) 
+			p1--;
+
+		if (!STRNEQ(p1, " 0x"))
+			return FALSE;
+		p1++;
+
+		if (!extract_hex(p1, &value, NULLCHAR, TRUE))
+			return FALSE;
+
+		sprintf(buf1, "0x%lx <%s>\n", value,	
+			value_to_symstr(value, buf2, pc->output_radix));
+
+		sprintf(p1, buf1);
+	
+        } else if (STREQ(argv[argc-2], "callq") &&
+            hexadecimal(argv[argc-1], 0)) {
+            	/*
+             	 *  Update module code of the form:
+             	 *
+             	 *    callq  0xffffffffa0017aa0
+	      	 *
+             	 *  to show a bracketed direct call target.
+             	 */
+                p1 = &LASTCHAR(inbuf);
+
+                if (extract_hex(argv[argc-1], &value, NULLCHAR, TRUE)) {
+                        sprintf(buf1, " <%s>\n",
+                                value_to_symstr(value, buf2,
+                                pc->output_radix));
+                        if (IS_MODULE_VADDR(value) &&
+                            !strstr(buf2, "+"))
+                                sprintf(p1, buf1);
                 }
+        }
 
-        	for (c = 0; c < kt->cpus; c++) {
-                	for (i = 0; i < 7; i++) {
-                        	if (ms->stkinfo.ebase[c][i] == 0)
-                                	break;
-                                bt->hp->esp = ms->stkinfo.ebase[c][i];
-                                fprintf(fp, "CPU %d %s EXCEPTION STACK:\n", 
-					c, x86_64_exception_stacks[i]);
-                                if ((cnt = x86_64_eframe_search(bt)))
-					fprintf(fp, "\n");
-				else
-                                        fprintf(fp, "(none found)\n\n");
-                	}
-        	}
+	console("    %s", inbuf);
 
-		return 0;
+	return TRUE;
+}
+
+
+/*
+ *   Override smp_num_cpus if possible and necessary.
+ */
+int
+x86_64_get_smp_cpus(void)
+{
+	int i, cpus, nr_pda, cpunumber, _cpu_pda, _boot_cpu_pda;
+	char *cpu_pda_buf;
+	ulong level4_pgt, cpu_pda_addr;
+
+	if (!VALID_STRUCT(x8664_pda))
+		return 1;
+
+	cpu_pda_buf = GETBUF(SIZE(x8664_pda));
+
+	if (LKCD_KERNTYPES()) {
+		if (symbol_exists("_cpu_pda"))
+ 			_cpu_pda = TRUE;
+		else
+	 		_cpu_pda = FALSE;
+		nr_pda = get_cpus_possible();
+	} else {
+		if (symbol_exists("_cpu_pda")) {
+			if (!(nr_pda = get_array_length("_cpu_pda", NULL, 0)))
+				nr_pda = NR_CPUS;
+			_cpu_pda = TRUE;
+		} else {
+			if (!(nr_pda = get_array_length("cpu_pda", NULL, 0)))
+				nr_pda = NR_CPUS;
+			_cpu_pda = FALSE;
+		}
+	}
+	if (_cpu_pda) {
+		if (symbol_exists("_boot_cpu_pda"))
+			_boot_cpu_pda = TRUE;
+		else
+			_boot_cpu_pda = FALSE;
+	}
+	for (i = cpus = 0; i < nr_pda; i++) {
+		if (_cpu_pda) {
+			if (_boot_cpu_pda) {
+				if (!_CPU_PDA_READ2(i, cpu_pda_buf))
+					break;
+			} else {
+				if (!_CPU_PDA_READ(i, cpu_pda_buf))
+					break;
+			}
+		} else {
+			if (!CPU_PDA_READ(i, cpu_pda_buf))
+				break;
+		}
+		if (VALID_MEMBER(x8664_pda_level4_pgt)) {
+			level4_pgt = ULONG(cpu_pda_buf + OFFSET(x8664_pda_level4_pgt));
+			if (!VALID_LEVEL4_PGT_ADDR(level4_pgt))
+				break;
+		}
+		cpunumber = INT(cpu_pda_buf + OFFSET(x8664_pda_cpunumber));
+		if (cpunumber != cpus)
+			break;
+                cpus++;
+	}
+
+	FREEBUF(cpu_pda_buf);
+
+	return cpus;
+}
+
+/*
+ *  Machine dependent command.
+ */
+void
+x86_64_cmd_mach(void)
+{
+        int c;
+
+        while ((c = getopt(argcnt, args, "cm")) != EOF) {
+                switch(c)
+                {
+                case 'c':
+                        x86_64_display_cpu_data();
+                        return;
+
+                case 'm':
+                        x86_64_display_memmap();
+                        return;
+
+                default:
+                        argerrs++;
+                        break;
+                }
         }
 
-        if (bt->hp && bt->hp->esp) {
-        	ms = machdep->machspec;
-		bt->stkptr = bt->hp->esp;
-		if ((estack = x86_64_in_exception_stack(bt))) {
-			stacksize = ms->stkinfo.esize;
-			bt->stackbase = estack;
-			bt->stacktop = estack + ms->stkinfo.esize;
-                	bt->stackbuf = ms->irqstack;
-                	alter_stackbuf(bt);
-		} else if ((irqstack = x86_64_in_irqstack(bt))) {
-			stacksize = ms->stkinfo.isize;
-			bt->stackbase = irqstack;
-			bt->stacktop = irqstack + ms->stkinfo.isize;
-                	bt->stackbuf = ms->irqstack;
-                	alter_stackbuf(bt);
-		} else if (!INSTACK(bt->stkptr, bt))
-			error(FATAL, 
-			    "unrecognized stack address for this task: %lx\n",
-				bt->hp->esp);
-	} 
+        if (argerrs)
+                cmd_usage(pc->curcmd, SYNOPSIS);
+
+        x86_64_display_machine_stats();
+}
+
+/*
+ *  "mach" command output.
+ */
+static void
+x86_64_display_machine_stats(void)
+{
+        struct new_utsname *uts;
+        char buf[BUFSIZE];
+        ulong mhz;
+
+        uts = &kt->utsname;
+
+        fprintf(fp, "       MACHINE TYPE: %s\n", uts->machine);
+        fprintf(fp, "        MEMORY SIZE: %s\n", get_memory_size(buf));
+        fprintf(fp, "               CPUS: %d\n", kt->cpus);
+        fprintf(fp, "    PROCESSOR SPEED: ");
+        if ((mhz = machdep->processor_speed()))
+                fprintf(fp, "%ld Mhz\n", mhz);
+        else
+                fprintf(fp, "(unknown)\n");
+        fprintf(fp, "                 HZ: %d\n", machdep->hz);
+        fprintf(fp, "          PAGE SIZE: %d\n", PAGESIZE());
+//      fprintf(fp, "      L1 CACHE SIZE: %d\n", l1_cache_size());
+        fprintf(fp, "KERNEL VIRTUAL BASE: %lx\n", machdep->kvbase);
+        fprintf(fp, "KERNEL VMALLOC BASE: %lx\n", vt->vmalloc_start);
+	if (machdep->flags & VMEMMAP)
+        	fprintf(fp, "KERNEL VMEMMAP BASE: %lx\n", machdep->machspec->vmemmap_vaddr);
+	fprintf(fp, "   KERNEL START MAP: %lx\n", __START_KERNEL_map);
+        fprintf(fp, "KERNEL MODULES BASE: %lx\n", MODULES_VADDR);
+        fprintf(fp, "  KERNEL STACK SIZE: %ld\n", STACKSIZE());
+}
+
+/*
+ *  "mach -c" 
+ */
+static void 
+x86_64_display_cpu_data(void)
+{
+        int cpu, cpus, boot_cpu, _cpu_pda;
+        ulong cpu_data;
+	ulong cpu_pda, cpu_pda_addr;
+
+	if (symbol_exists("cpu_data")) {
+        	cpu_data = symbol_value("cpu_data");
+		cpus = kt->cpus;
+		boot_cpu = FALSE;
+	} else if (symbol_exists("boot_cpu_data")) {
+        	cpu_data = symbol_value("boot_cpu_data");
+		boot_cpu = TRUE;
+		cpus = 1;
+	}
+	if (symbol_exists("_cpu_pda")) {
+		cpu_pda = symbol_value("_cpu_pda");
+		_cpu_pda = TRUE;
+	} else if (symbol_exists("cpu_pda")) {
+		cpu_pda = symbol_value("cpu_pda");
+		_cpu_pda = FALSE;
+	}
+
+        for (cpu = 0; cpu < cpus; cpu++) {
+		if (boot_cpu)
+                	fprintf(fp, "BOOT CPU:\n");
+		else
+                	fprintf(fp, "%sCPU %d:\n", cpu ? "\n" : "", cpu);
+
+                dump_struct("cpuinfo_x86", cpu_data, 0);
+		fprintf(fp, "\n");
+
+		if (_cpu_pda) {
+			readmem(cpu_pda, KVADDR, &cpu_pda_addr,
+				sizeof(unsigned long), "_cpu_pda addr", FAULT_ON_ERROR);
+			dump_struct("x8664_pda", cpu_pda_addr, 0);
+			cpu_pda += sizeof(void *);
+		} else {
+			dump_struct("x8664_pda", cpu_pda, 0);
+			cpu_pda += SIZE(x8664_pda);
+		}
+                cpu_data += SIZE(cpuinfo_x86);
+        }
+}
+
+/*
+ *  "mach -m"
+ */
+static char *e820type[] = {
+        "(invalid type)",
+        "E820_RAM",
+        "E820_RESERVED",
+        "E820_ACPI",
+        "E820_NVS",
+};
+
+static void
+x86_64_display_memmap(void)
+{
+        ulong e820;
+        int nr_map, i;
+        char *buf, *e820entry_ptr;
+        ulonglong addr, size;
+        uint type;
 
-	stacksize = bt->stacktop - bt->stackbase - SIZE(pt_regs);
+        e820 = symbol_value("e820");
+	if (CRASHDEBUG(1))
+		dump_struct("e820map", e820, RADIX(16));
+        buf = (char *)GETBUF(SIZE(e820map));
 
-	if (bt->stkptr)
-		i = (bt->stkptr - bt->stackbase)/sizeof(ulong);
-	else
-		i = 0;
+        readmem(e820, KVADDR, &buf[0], SIZE(e820map),
+                "e820map", FAULT_ON_ERROR);
 
-	for (cnt = 0; i <= stacksize/sizeof(ulong); i++) {
-		up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]);
+        nr_map = INT(buf + OFFSET(e820map_nr_map));
 
-                if (x86_64_exception_frame(EFRAME_SEARCH|EFRAME_PRINT|
-		    EFRAME_VERIFY, 0, (char *)up, bt, fp)) 
-			cnt++;
-	}
+        fprintf(fp, "      PHYSICAL ADDRESS RANGE         TYPE\n");
 
-	return cnt;
+        for (i = 0; i < nr_map; i++) {
+                e820entry_ptr = buf + sizeof(int) + (SIZE(e820entry) * i);
+                addr = ULONGLONG(e820entry_ptr + OFFSET(e820entry_addr));
+                size = ULONGLONG(e820entry_ptr + OFFSET(e820entry_size));
+                type = UINT(e820entry_ptr + OFFSET(e820entry_type));
+                fprintf(fp, "%016llx - %016llx  %s\n", addr, addr+size,
+			e820type[type]);
+        }
 }
 
-static void
-x86_64_display_full_frame(struct bt_info *bt, ulong rsp, FILE *ofp)
-{
-	int i, u_idx;
-	ulong *up;
-	ulong words, addr;
 
-        words = (rsp - bt->frameptr) / sizeof(ulong) + 1;
+static const char *hook_files[] = {
+        "arch/x86_64/kernel/entry.S",
+        "arch/x86_64/kernel/head.S",
+        "arch/x86_64/kernel/semaphore.c"
+};
 
-	addr = bt->frameptr;
-	u_idx = (bt->frameptr - bt->stackbase)/sizeof(ulong);
-	for (i = 0; i < words; i++, u_idx++) {
-		if (!(i & 1)) 
-			fprintf(ofp, "%s    %lx: ", i ? "\n" : "", addr);
-		
-		up = (ulong *)(&bt->stackbuf[u_idx*sizeof(ulong)]);
-		fprintf(ofp, "%016lx ", *up);
-		addr += sizeof(ulong);
-	}
-	fprintf(ofp, "\n");
-}
+#define ENTRY_S      ((char **)&hook_files[0])
+#define HEAD_S       ((char **)&hook_files[1])
+#define SEMAPHORE_C  ((char **)&hook_files[2])
+
+static struct line_number_hook x86_64_line_number_hooks[] = {
+	{"ret_from_fork", ENTRY_S},
+	{"system_call", ENTRY_S},
+	{"int_ret_from_sys_call", ENTRY_S},
+	{"ptregscall_common", ENTRY_S},
+	{"stub_execve", ENTRY_S},
+	{"stub_rt_sigreturn", ENTRY_S},
+	{"common_interrupt", ENTRY_S},
+	{"ret_from_intr", ENTRY_S},
+	{"load_gs_index", ENTRY_S},
+	{"arch_kernel_thread", ENTRY_S},
+	{"execve", ENTRY_S},
+	{"page_fault", ENTRY_S},
+	{"coprocessor_error", ENTRY_S},
+	{"simd_coprocessor_error", ENTRY_S},
+	{"device_not_available", ENTRY_S},
+	{"debug", ENTRY_S},
+	{"nmi", ENTRY_S},
+	{"int3", ENTRY_S},
+	{"overflow", ENTRY_S},
+	{"bounds", ENTRY_S},
+	{"invalid_op", ENTRY_S},
+	{"coprocessor_segment_overrun", ENTRY_S},
+	{"reserved", ENTRY_S},
+	{"double_fault", ENTRY_S},
+	{"invalid_TSS", ENTRY_S},
+	{"segment_not_present", ENTRY_S},
+	{"stack_segment", ENTRY_S},
+	{"general_protection", ENTRY_S},
+	{"alignment_check", ENTRY_S},
+	{"divide_error", ENTRY_S},
+	{"spurious_interrupt_bug", ENTRY_S},
+	{"machine_check", ENTRY_S},
+	{"call_debug", ENTRY_S},
+
+	{NULL, NULL}    /* list must be NULL-terminated */
+};
 
-/*
- *  Check a frame for a requested reference.
- */
 static void
-x86_64_do_bt_reference_check(struct bt_info *bt, ulong text, char *name)
+x86_64_dump_line_number(ulong callpc)
 {
-	struct syment *sp;
-	ulong offset;
-
-	if (!name)
-		sp = value_search(text, &offset); 
-	else if (!text)
-		sp = symbol_search(name);
+	error(FATAL, "x86_64_dump_line_number: TBD\n");
+}
 
-        switch (bt->ref->cmdflags & (BT_REF_SYMBOL|BT_REF_HEXVAL))
-        {
-        case BT_REF_SYMBOL:
-                if (name) {
-			if (STREQ(name, bt->ref->str))
-                        	bt->ref->cmdflags |= BT_REF_FOUND;
-		} else {
-			if (sp && !offset && STREQ(sp->name, bt->ref->str))
-                        	bt->ref->cmdflags |= BT_REF_FOUND;
-		}
-                break;
+void
+x86_64_compiler_warning_stub(void)
+{
+        struct line_number_hook *lhp;
+        char **p;
 
-        case BT_REF_HEXVAL:
-                if (text) {
-			if (bt->ref->hexval == text) 
-                        	bt->ref->cmdflags |= BT_REF_FOUND;
-		} else if (sp && (bt->ref->hexval == sp->value))
-                       	bt->ref->cmdflags |= BT_REF_FOUND;
-		else if (!name && !text && (bt->ref->hexval == 0))
-			bt->ref->cmdflags |= BT_REF_FOUND;
-                break;
-        }
+        lhp = &x86_64_line_number_hooks[0]; lhp++;
+        p = ENTRY_S;
+	x86_64_back_trace(NULL, NULL);
+	get_x86_64_frame(NULL, NULL, NULL);
+	x86_64_dump_line_number(0);
 }
 
 /*
- *  print one entry of a stack trace
+ *  Force the VM address-range selection via:
+ *
+ *   --machdep vm=orig 
+ *   --machdep vm=2.6.11
+ *  
+ *  Force the phys_base address via:
+ *
+ *   --machdep phys_base=<address>
+ *
+ *  Force the IRQ stack back-link via:
+ *
+ *   --machdep irq_eframe_link=<offset>
  */
-#define BACKTRACE_COMPLETE                   (1)
-#define BACKTRACE_ENTRY_IGNORED              (2)
-#define BACKTRACE_ENTRY_DISPLAYED            (3)
-#define BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED (4)
 
-static int
-x86_64_print_stack_entry(struct bt_info *bt, FILE *ofp, int level, 
-	int stkindex, ulong text)
+void
+parse_cmdline_arg(void)
 {
-	ulong rsp, offset;
-	struct syment *sp;
-	char *name;
-	int result; 
-	long eframe_check;
+	int i, c, errflag;
+	char *p;
 	char buf[BUFSIZE];
+	char *arglist[MAXARGS];
+	int megabytes;
+	int lines = 0;
+	int vm_flag;
+	ulong value;
+
+	if (!strstr(machdep->cmdline_arg, "=")) {
+		error(WARNING, "ignoring --machdep option: %s\n\n",
+			machdep->cmdline_arg);
+		return;
+        }
 
-	eframe_check = -1;
-	offset = 0;
-	sp = value_search(text, &offset);
-	if (!sp)
-		return BACKTRACE_ENTRY_IGNORED;
-
-	name = sp->name;
+	strcpy(buf, machdep->cmdline_arg);
 
-	if (bt->flags & BT_TEXT_SYMBOLS) {
-		if (bt->flags & BT_EXCEPTION_FRAME)
-			rsp = bt->stkptr;
-		else
-			rsp = bt->stackbase + (stkindex * sizeof(long));
-                fprintf(ofp, "  [%s] %s at %lx\n",
-                	mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(rsp)),
-                        name, text);
-		if (BT_REFERENCE_CHECK(bt))
-			x86_64_do_bt_reference_check(bt, text, name);
-		return BACKTRACE_ENTRY_DISPLAYED;
+	for (p = buf; *p; p++) {
+		if (*p == ',')
+			 *p = ' ';
 	}
 
-	if (!offset && !(bt->flags & BT_EXCEPTION_FRAME) &&
-	    !(bt->flags & BT_START)) { 
-		if (STREQ(name, "child_rip")) {
-			if (symbol_exists("kernel_thread"))
-				name = "kernel_thread";
-			else if (symbol_exists("arch_kernel_thread"))
-				name = "arch_kernel_thread";
-		}
-		else if (!(bt->flags & BT_SCHEDULE)) {
-			if (STREQ(name, "error_exit")) 
-				eframe_check = 8;
-			else {
-				if (CRASHDEBUG(2))
-					fprintf(ofp, 
-		              "< ignoring text symbol with no offset: %s() >\n",
-						sp->name);
-				return BACKTRACE_ENTRY_IGNORED;
+	c = parse_line(buf, arglist);
+
+	for (i = vm_flag = 0; i < c; i++) {
+		errflag = 0;
+
+		if (STRNEQ(arglist[i], "vm=")) {
+			vm_flag++;
+			p = arglist[i] + strlen("vm=");
+			if (strlen(p)) {
+				if (STREQ(p, "orig")) {
+					machdep->flags |= VM_ORIG;
+					continue;
+				} else if (STREQ(p, "2.6.11")) {
+					machdep->flags |= VM_2_6_11;
+					continue;
+				} else if (STREQ(p, "xen")) {
+					machdep->flags |= VM_XEN;
+					continue;
+				} else if (STREQ(p, "xen-rhel4")) {
+					machdep->flags |= VM_XEN_RHEL4;
+					continue;
+				}
+			}
+		} else if (STRNEQ(arglist[i], "phys_base=")) {
+			megabytes = FALSE;
+			if ((LASTCHAR(arglist[i]) == 'm') || 
+			    (LASTCHAR(arglist[i]) == 'M')) {
+				LASTCHAR(arglist[i]) = NULLCHAR;
+				megabytes = TRUE;
+			}
+                        p = arglist[i] + strlen("phys_base=");
+                        if (strlen(p)) {
+				if (megabytes) {
+                                	value = dtol(p, RETURN_ON_ERROR|QUIET,
+                                        	&errflag);
+				} else
+                                	value = htol(p, RETURN_ON_ERROR|QUIET,
+                                        	&errflag);
+                                if (!errflag) {
+					if (megabytes)
+						value = MEGABYTES(value);
+                                        machdep->machspec->phys_base = value;
+                                        error(NOTE,
+                                            "setting phys_base to: 0x%lx\n\n",
+                                                machdep->machspec->phys_base);
+					machdep->flags |= PHYS_BASE;
+                                        continue;
+                                }
+                        }
+                } else if (STRNEQ(arglist[i], "irq_eframe_link=")) {
+                        p = arglist[i] + strlen("irq_eframe_link=");
+			if (strlen(p)) {
+				value = stol(p, RETURN_ON_ERROR|QUIET, &errflag);
+				if (!errflag) {
+					machdep->machspec->irq_eframe_link = value;
+					continue;
+				}
 			}
 		}
+
+		error(WARNING, "ignoring --machdep option: %s\n", arglist[i]);
+		lines++;
+	} 
+
+	if (vm_flag) {
+		switch (machdep->flags & VM_FLAGS)
+		{
+		case 0:
+			break;
+	
+		case VM_ORIG:
+			error(NOTE, "using original x86_64 VM address ranges\n");
+			lines++;
+			break;
+	
+		case VM_2_6_11:
+			error(NOTE, "using 2.6.11 x86_64 VM address ranges\n");
+			lines++;
+			break;
+	
+		case VM_XEN:
+			error(NOTE, "using xen x86_64 VM address ranges\n");
+			lines++;
+			break;
+
+		case VM_XEN_RHEL4:
+			error(NOTE, "using RHEL4 xen x86_64 VM address ranges\n");
+			lines++;
+			break;
+	
+		default:
+			error(WARNING, "cannot set multiple vm values\n");
+			lines++;
+			machdep->flags &= ~VM_FLAGS;
+			break;
+		} 
 	}
 
-	if (bt->flags & BT_SCHEDULE)
-		name = "schedule";
+	if (lines)
+		fprintf(fp, "\n");
+}
 
-        if (STREQ(name, "child_rip")) {
-                if (symbol_exists("kernel_thread"))
-                        name = "kernel_thread";
-                else if (symbol_exists("arch_kernel_thread"))
-                        name = "arch_kernel_thread";
-		result = BACKTRACE_COMPLETE;
-        } else if (STREQ(name, "cpu_idle"))
-		result = BACKTRACE_COMPLETE;
-	else
-		result = BACKTRACE_ENTRY_DISPLAYED;
+void
+x86_64_clear_machdep_cache(void)
+{
+	machdep->machspec->last_upml_read = 0;
+}
 
-	if (bt->flags & BT_EXCEPTION_FRAME)
-		rsp = bt->stkptr;
-	else if (bt->flags & BT_START)
-		rsp = bt->stkptr;
+static void 
+x86_64_irq_eframe_link_init(void)
+{
+	int c;
+	struct syment *sp, *spn;
+	char buf[BUFSIZE];
+	char link_register[BUFSIZE];
+        char *arglist[MAXARGS];
+	ulong max_instructions;
+
+	if (machdep->machspec->irq_eframe_link == UNINITIALIZED)
+		machdep->machspec->irq_eframe_link = 0;
 	else
-		rsp = bt->stackbase + (stkindex * sizeof(long));
+		return; 
+
+	if (THIS_KERNEL_VERSION < LINUX(2,6,9)) 
+		return;
 
-	if ((bt->flags & BT_FULL)) {
-		if (bt->frameptr) 
-			x86_64_display_full_frame(bt, rsp, ofp);
-		bt->frameptr = rsp + sizeof(ulong);
+	if (!(sp = symbol_search("common_interrupt")) ||
+	    !(spn = next_symbol(NULL, sp))) {
+		return;
 	}
 
-        fprintf(ofp, "%s#%d [%8lx] %s at %lx\n", level < 10 ? " " : "", level,
-		rsp, name, text);
+	max_instructions = spn->value - sp->value;
 
-        if (bt->flags & BT_LINE_NUMBERS) {
-                get_line_number(text, buf, FALSE);
-                if (strlen(buf))
-                        fprintf(ofp, "    %s\n", buf);
-	}
+	open_tmpfile();
 
-	if (eframe_check >= 0) {
-		if (x86_64_exception_frame(EFRAME_PRINT|EFRAME_VERIFY, 
-		    bt->stackbase + (stkindex*sizeof(long)) + eframe_check,
-		    NULL, bt, ofp))
-			result = BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED;
-	}
+        sprintf(buf, "x/%ldi 0x%lx",
+		max_instructions, sp->value);
 
-	if (BT_REFERENCE_CHECK(bt))
-		x86_64_do_bt_reference_check(bt, text, name);
+        if (!gdb_pass_through(buf, pc->tmpfile, GNU_RETURN_ON_ERROR))
+		return;
 
-	bt->call_target = name;
+	link_register[0] = NULLCHAR;
 
-	if (is_direct_call_target(bt)) {
-		if (CRASHDEBUG(2))
-			fprintf(ofp, "< enable BT_CHECK_CALLER for %s >\n", 
-				bt->call_target);
-		bt->flags |= BT_CHECK_CALLER;
-	} else {
-		if (CRASHDEBUG(2) && (bt->flags & BT_CHECK_CALLER))
-			fprintf(ofp, "< disable BT_CHECK_CALLER for %s >\n", 
-				bt->call_target);
-		if (bt->flags & BT_CHECK_CALLER) {
-			if (CRASHDEBUG(2))
-			    	fprintf(ofp, "< set BT_NO_CHECK_CALLER >\n");
-			bt->flags |= BT_NO_CHECK_CALLER;
-		}
-		bt->flags &= ~(ulonglong)BT_CHECK_CALLER;
+	rewind(pc->tmpfile);
+        while (fgets(buf, BUFSIZE, pc->tmpfile)) {
+		if (!strstr(buf, sp->name))
+			break;
+		if ((c = parse_line(buf, arglist)) < 4)
+			continue;
+		if (strstr(arglist[2], "push"))
+			strcpy(link_register, arglist[3]);
 	}
+	close_tmpfile();
 
-	return result;
-}
+	if (CRASHDEBUG(1)) 
+		fprintf(fp, "IRQ stack link register: %s\n", 
+		    strlen(link_register) ? 
+			link_register : "undetermined");
 
-/*
- *  Unroll a kernel stack.
- */
-static void
-x86_64_back_trace_cmd(struct bt_info *bt)
-{
-	error(FATAL, "x86_64_back_trace_cmd: TBD\n");
+	if (STREQ(link_register, "%rbp"))
+		machdep->machspec->irq_eframe_link = 40;
+	
 }
 
-
+#include "netdump.h"
 
 /*
- *  Determine whether the initial stack pointer is located in one of the
- *  exception stacks.
+ *  From the xen vmcore, create an index of mfns for each page that makes
+ *  up the dom0 kernel's complete phys_to_machine_mapping[max_pfn] array.
  */
-static ulong
-x86_64_in_exception_stack(struct bt_info *bt) 
-{
-	int c, i;
-	ulong rsp;
-	ulong estack;
-	struct machine_specific *ms;
-
-	rsp = bt->stkptr;
-	ms = machdep->machspec;
-	estack = 0;
 
-        for (c = 0; !estack && (c < kt->cpus); c++) {
-		for (i = 0; i < 7; i++) {
-			if (ms->stkinfo.ebase[c][i] == 0)
-				break;
-			if ((rsp >= ms->stkinfo.ebase[c][i]) &&
-			    (rsp < (ms->stkinfo.ebase[c][i] + 
-			    ms->stkinfo.esize))) {
-				estack = ms->stkinfo.ebase[c][i]; 
-				if (c != bt->tc->processor) 
-					error(INFO, 
-      		                      "task cpu: %d  exception stack cpu: %d\n",
-						bt->tc->processor, c);
-				break;
-			}
-		}
-        }
-
-	return estack;
-}
+#define MAX_X86_64_FRAMES  (512)
+#define MFNS_PER_FRAME     (PAGESIZE()/sizeof(ulong))
 
-/*
- *  Determine whether the current stack pointer is in a cpu's irqstack.
- */
-static ulong
-x86_64_in_irqstack(struct bt_info *bt) 
+static int
+x86_64_xen_kdump_p2m_create(struct xen_kdump_data *xkd)
 {
-        int c;
-        ulong rsp;
-        ulong irqstack;
-        struct machine_specific *ms;
+        int i, j;
+        ulong kvaddr;
+        ulong *up;
+        ulong frames;
+        ulong frame_mfn[MAX_X86_64_FRAMES] = { 0 };
+        int mfns[MAX_X86_64_FRAMES] = { 0 };
 
-        rsp = bt->stkptr;
-        ms = machdep->machspec;
-        irqstack = 0;
+        /*
+         *  Temporarily read physical (machine) addresses from vmcore by
+         *  going directly to read_netdump() instead of via read_kdump().
+         */
+        pc->readmem = read_netdump;
 
-        for (c = 0; !irqstack && (c < kt->cpus); c++) {
-                if (ms->stkinfo.ibase[c] == 0)
-                 	break;
-                if ((rsp >= ms->stkinfo.ibase[c]) &&
-                    (rsp < (ms->stkinfo.ibase[c] + ms->stkinfo.isize))) {
-                	irqstack = ms->stkinfo.ibase[c];
-                        if (c != bt->tc->processor) 
-                                error(INFO, 
-			          "task cpu: %d  IRQ stack cpu: %d\n",
-                                	bt->tc->processor, c);
-                        break;
-                }
-        }
+        if (xkd->flags & KDUMP_CR3)
+                goto use_cr3;
 
-        return irqstack;
-}
+        if (CRASHDEBUG(1))
+                fprintf(fp, "x86_64_xen_kdump_p2m_create: p2m_mfn: %lx\n", 
+			xkd->p2m_mfn);
 
-#define STACK_TRANSITION_ERRMSG_E_I_P \
-"cannot transition from exception stack to IRQ stack to current process stack:\n    exception stack pointer: %lx\n          IRQ stack pointer: %lx\n      process stack pointer: %lx\n         current stack base: %lx\n" 
-#define STACK_TRANSITION_ERRMSG_E_P \
-"cannot transition from exception stack to current process stack:\n    exception stack pointer: %lx\n      process stack pointer: %lx\n         current_stack_base: %lx\n"
-#define STACK_TRANSITION_ERRMSG_I_P \
-"cannot transition from IRQ stack to current process stack:\n        IRQ stack pointer: %lx\n    process stack pointer: %lx\n       current stack base: %lx"
+	if (!readmem(PTOB(xkd->p2m_mfn), PHYSADDR, xkd->page, PAGESIZE(), 
+	    "xen kdump p2m mfn page", RETURN_ON_ERROR))
+		error(FATAL, "cannot read xen kdump p2m mfn page\n");
 
-/*
- *  Low-budget back tracer -- dump text return addresses, following call chain
- *  when possible, along with any verifiable exception frames.
- */
-static void
-x86_64_low_budget_back_trace_cmd(struct bt_info *bt_in)
-{
-	int i, level, done;
-	ulong rsp, offset, stacktop;
-	ulong *up;
-	long cs;
-	struct syment *sp, *spt;
-	FILE *ofp;
-	ulong estack, irqstack;
-	ulong irq_eframe;
-	struct bt_info bt_local, *bt;
-	struct machine_specific *ms;
-	ulong last_process_stack_eframe;
-	ulong user_mode_eframe;
+	if (CRASHDEBUG(2))
+		x86_64_debug_dump_page(fp, xkd->page, "pfn_to_mfn_frame_list");
 
-	bt = &bt_local;
-	BCOPY(bt_in, bt, sizeof(struct bt_info));
+	for (i = 0, up = (ulong *)xkd->page; i < MAX_X86_64_FRAMES; i++, up++)
+		frame_mfn[i] = *up;
 
-	level = 0;
-	done = FALSE;
-	irq_eframe = 0;
-	last_process_stack_eframe = 0;
-	bt->call_target = NULL;
-	rsp = bt->stkptr;
-	if (!rsp) {
-		error(INFO, "cannot determine starting stack pointer\n");
-		return;
-	}
-	ms = machdep->machspec;
-	if (BT_REFERENCE_CHECK(bt))
-		ofp = pc->nullfp;
-	else
-		ofp = fp;
+	for (i = 0; i < MAX_X86_64_FRAMES; i++) {
+		if (!frame_mfn[i])
+			break;
 
-        if (bt->flags & BT_TEXT_SYMBOLS) {
-                fprintf(ofp, "%sSTART: %s%s at %lx\n",
-                	space(VADDR_PRLEN > 8 ? 14 : 6),
-                	closest_symbol(bt->instptr), 
-			STREQ(closest_symbol(bt->instptr), "thread_return") ?
-			" (schedule)" : "",
-			bt->instptr);
-        } else if (bt->flags & BT_START) {
-                x86_64_print_stack_entry(bt, ofp, level,
-                        0, bt->instptr);
-		bt->flags &= ~BT_START;
-		level++;
+        	if (!readmem(PTOB(frame_mfn[i]), PHYSADDR, xkd->page, 
+		    PAGESIZE(), "xen kdump p2m mfn list page", RETURN_ON_ERROR))
+                	error(FATAL, "cannot read xen kdump p2m mfn list page\n");
+
+		for (j = 0, up = (ulong *)xkd->page; j < MFNS_PER_FRAME; j++, up++)
+			if (*up)
+				mfns[i]++;
+
+		xkd->p2m_frames += mfns[i];
+		
+	        if (CRASHDEBUG(7))
+			x86_64_debug_dump_page(fp, xkd->page, "pfn_to_mfn_frame_list page");
 	}
 
+        if (CRASHDEBUG(1))
+		fprintf(fp, "p2m_frames: %d\n", xkd->p2m_frames);
 
-        if ((estack = x86_64_in_exception_stack(bt))) {
-in_exception_stack:
-		bt->flags |= BT_EXCEPTION_STACK;
-		/*
-	 	 *  The stack buffer will have been loaded with the process
-		 *  stack, so switch to the indicated exception stack.
-		 */
-                bt->stackbase = estack;
-                bt->stacktop = estack + ms->stkinfo.esize;
-                bt->stackbuf = ms->irqstack;
+        if ((xkd->p2m_mfn_frame_list = (ulong *)
+	    malloc(xkd->p2m_frames * sizeof(ulong))) == NULL)
+                error(FATAL, "cannot malloc p2m_frame_index_list");
 
-                if (!readmem(bt->stackbase, KVADDR, bt->stackbuf,
-                    bt->stacktop - bt->stackbase,
-		    bt->hp && (bt->hp->esp == bt->stkptr) ? 
-	 	    "irqstack contents via hook" : "irqstack contents", 
+	for (i = 0, frames = xkd->p2m_frames; frames; i++) {
+        	if (!readmem(PTOB(frame_mfn[i]), PHYSADDR, 
+		    &xkd->p2m_mfn_frame_list[i * MFNS_PER_FRAME], 
+		    mfns[i] * sizeof(ulong), "xen kdump p2m mfn list page", 
 		    RETURN_ON_ERROR))
-                    	error(FATAL, "read of exception stack at %lx failed\n",
-                        	bt->stackbase);
+                	error(FATAL, "cannot read xen kdump p2m mfn list page\n");
 
-		/*
-	 	 *  If irq_eframe is set, we've jumped back here from the
-		 *  IRQ stack dump below.  Do basically the same thing as if
-		 *  had come from the processor stack, but presume that we
-		 *  must have been in kernel mode, i.e., took an exception
-	 	 *  while operating on an IRQ stack.  (untested)
-		 */
-                if (irq_eframe) {
-                        bt->flags |= BT_EXCEPTION_FRAME;
-                        i = (irq_eframe - bt->stackbase)/sizeof(ulong);
-                        x86_64_print_stack_entry(bt, ofp, level, i, 
-				bt->instptr);
-                        bt->flags &= ~(ulonglong)BT_EXCEPTION_FRAME;
-                        cs = x86_64_exception_frame(EFRAME_PRINT|EFRAME_CS, 0,
-                        	bt->stackbuf + (irq_eframe - bt->stackbase), 
-				bt, ofp);
-                        rsp += SIZE(pt_regs);  /* guaranteed kernel mode */
-                        level++;
-                        irq_eframe = 0;
-                }
+		frames -= mfns[i];
+	}
 
-		stacktop = bt->stacktop - SIZE(pt_regs);
+	if (CRASHDEBUG(2)) {
+		for (i = 0; i < xkd->p2m_frames; i++)
+		    	fprintf(fp, "%lx ", xkd->p2m_mfn_frame_list[i]);
+		fprintf(fp, "\n");
+	}
 
-        	for (i = (rsp - bt->stackbase)/sizeof(ulong);
-	     	    !done && (rsp < stacktop); i++, rsp += sizeof(ulong)) {
+        pc->readmem = read_kdump;
+	return TRUE;
 
-			up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]);
+use_cr3:
 
-			if (!is_kernel_text(*up))
-		        	continue;
+        if (CRASHDEBUG(1))
+                fprintf(fp, "x86_64_xen_kdump_p2m_create: cr3: %lx\n", xkd->cr3);
 
-	                switch (x86_64_print_stack_entry(bt, ofp, level, i,*up))
-	                {
-	                case BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED:
-				rsp += SIZE(pt_regs);
-				i += SIZE(pt_regs)/sizeof(ulong);
-	                case BACKTRACE_ENTRY_DISPLAYED:
-	                        level++;
-	                        break;
-	                case BACKTRACE_ENTRY_IGNORED:
-	                        break;
-	                case BACKTRACE_COMPLETE:
-	                        done = TRUE;
-	                        break;
-	                }
-		}
+        if (!readmem(PTOB(xkd->cr3), PHYSADDR, machdep->machspec->pml4, 
+	    PAGESIZE(), "xen kdump cr3 page", RETURN_ON_ERROR))
+                error(FATAL, "cannot read xen kdump cr3 page\n");
 
-                cs = x86_64_exception_frame(EFRAME_PRINT|EFRAME_CS, 0, 
-			bt->stackbuf + (bt->stacktop - bt->stackbase) - 
-			SIZE(pt_regs), bt, ofp);
+        if (CRASHDEBUG(7))
+                x86_64_debug_dump_page(fp, machdep->machspec->pml4,
+                        "contents of PML4 page:");
 
-		if (!BT_REFERENCE_CHECK(bt))
-			fprintf(fp, "--- <exception stack> ---\n");
+	kvaddr = symbol_value("end_pfn");
+        if (!x86_64_xen_kdump_load_page(kvaddr, xkd->page))
+                return FALSE;
+        up = (ulong *)(xkd->page + PAGEOFFSET(kvaddr));
 
-                /* 
-		 *  stack = (unsigned long *) estack_end[-2]; 
-		 */
-		up = (ulong *)(&bt->stackbuf[bt->stacktop - bt->stackbase]);
-		up -= 2;
-		rsp = bt->stkptr = *up;
-		up -= 3;
-		bt->instptr = *up;  
-		if (cs & 3)
-			done = TRUE;   /* user-mode exception */
-		else
-			done = FALSE;  /* kernel-mode exception */
-		bt->frameptr = 0;
+        xkd->p2m_frames = (*up/(PAGESIZE()/sizeof(ulong))) +
+                ((*up%(PAGESIZE()/sizeof(ulong))) ? 1 : 0);
 
-		/*
-		 *  Print the return values from the estack end.
-		 */
-		if (!done) {
-                	bt->flags |= BT_START;
-                	x86_64_print_stack_entry(bt, ofp, level,
-                        	0, bt->instptr);
-                	bt->flags &= ~BT_START;
-			level++;
-		}
-	}
+        if (CRASHDEBUG(1))
+                fprintf(fp, "end_pfn at %lx: %lx (%ld) -> %d p2m_frames\n",
+                        kvaddr, *up, *up, xkd->p2m_frames);
+
+        if ((xkd->p2m_mfn_frame_list = (ulong *)
+            malloc(xkd->p2m_frames * sizeof(ulong))) == NULL)
+                error(FATAL, "cannot malloc p2m_frame_index_list");
+
+        kvaddr = symbol_value("phys_to_machine_mapping");
+        if (!x86_64_xen_kdump_load_page(kvaddr, xkd->page))
+                return FALSE;
+        up = (ulong *)(xkd->page + PAGEOFFSET(kvaddr));
+        kvaddr = *up;
+        if (CRASHDEBUG(1))
+                fprintf(fp, "phys_to_machine_mapping: %lx\n", kvaddr);
+
+        machdep->last_pgd_read = BADADDR;
+        machdep->last_pmd_read = BADADDR;
+        machdep->last_ptbl_read = BADADDR;
+
+        for (i = 0; i < xkd->p2m_frames; i++) {
+                xkd->p2m_mfn_frame_list[i] = x86_64_xen_kdump_page_mfn(kvaddr);
+                kvaddr += PAGESIZE();
+        }
 
-	/*
-	 *  IRQ stack entry always comes in via the process stack, regardless
-	 *  whether it happened while running in user or kernel space.
-	 */
-        if (!done && (irqstack = x86_64_in_irqstack(bt))) {
-		bt->flags |= BT_IRQSTACK;
-		/*
-		 *  Until coded otherwise, the stackbase will be pointing to
-		 *  either the exception stack or, more likely, the process
-		 *  stack base.  Switch it to the IRQ stack.
-		 */
-                bt->stackbase = irqstack;
-                bt->stacktop = irqstack + ms->stkinfo.isize;
-                bt->stackbuf = ms->irqstack;
+        if (CRASHDEBUG(1)) {
+                for (i = 0; i < xkd->p2m_frames; i++)
+                        fprintf(fp, "%lx ", xkd->p2m_mfn_frame_list[i]);
+                fprintf(fp, "\n");
+        }
 
-                if (!readmem(bt->stackbase, KVADDR, 
-	  	    bt->stackbuf, bt->stacktop - bt->stackbase,
-                    bt->hp && (bt->hp->esp == bt_in->stkptr) ?
-		    "irqstack contents via hook" : "irqstack contents", 
-		    RETURN_ON_ERROR))
-                    	error(FATAL, "read of IRQ stack at %lx failed\n",
-				bt->stackbase);
+	machdep->last_pgd_read = 0;
+        machdep->last_ptbl_read = 0;
+        machdep->last_pmd_read = 0;
+        pc->readmem = read_kdump;
 
-		stacktop = bt->stacktop - 64; /* from kernel code */
+        return TRUE;
+}
 
-                for (i = (rsp - bt->stackbase)/sizeof(ulong);
-                    !done && (rsp < stacktop); i++, rsp += sizeof(ulong)) {
+static char *
+x86_64_xen_kdump_load_page(ulong kvaddr, char *pgbuf)
+{
+	ulong mfn;
+	ulong *pml4, *pgd, *pmd, *ptep;
 
-                        up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]);
+        pml4 = ((ulong *)machdep->machspec->pml4) + pml4_index(kvaddr);
+	mfn = ((*pml4) & PHYSICAL_PAGE_MASK) >> PAGESHIFT();
 
-                        if (!is_kernel_text(*up))
-                                continue;
+	if (CRASHDEBUG(3))
+		fprintf(fp, 
+		    "[%lx] pml4: %lx  mfn: %lx  pml4_index: %lx\n", 
+			kvaddr, *pml4, mfn, pml4_index(kvaddr));
+
+        if (!readmem(PTOB(mfn), PHYSADDR, machdep->pgd, PAGESIZE(),
+            "xen kdump pud page", RETURN_ON_ERROR))
+		error(FATAL, "cannot read/find pud page\n");
+        
+        if (CRASHDEBUG(7))
+		x86_64_debug_dump_page(fp, machdep->pgd, 
+                	"contents of page upper directory page:");
 
-                        switch (x86_64_print_stack_entry(bt, ofp, level, i,*up))
-                        {
-			case BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED:
-				rsp += SIZE(pt_regs);
-				i += SIZE(pt_regs)/sizeof(ulong);
-                        case BACKTRACE_ENTRY_DISPLAYED:
-                                level++;
-                                break;
-                        case BACKTRACE_ENTRY_IGNORED:
-                                break;
-                        case BACKTRACE_COMPLETE:
-                                done = TRUE;
-                                break;
-                        }
-                }
+        pgd = ((ulong *)machdep->pgd) + pgd_index(kvaddr);
+	mfn = ((*pgd) & PHYSICAL_PAGE_MASK) >> PAGESHIFT();
 
-		if (!BT_REFERENCE_CHECK(bt))
-                	fprintf(fp, "--- <IRQ stack> ---\n");
+	if (CRASHDEBUG(3))
+		fprintf(fp, 
+		    "[%lx] pgd: %lx  mfn: %lx  pgd_index: %lx\n", 
+			kvaddr, *pgd, mfn, pgd_index(kvaddr));
 
-                /*
-		 *  stack = (unsigned long *) (irqstack_end[-1]);
-		 *  (where irqstack_end is 64 bytes below page end)
-                 */
-                up = (ulong *)(&bt->stackbuf[stacktop - bt->stackbase]);
-                up -= 1;
-                irq_eframe = rsp = bt->stkptr = *up;
-		up -= 1;
-                bt->instptr = *up;
-                bt->frameptr = 0;
-                done = FALSE;
-        } else
-		irq_eframe = 0;
+	if (!readmem(PTOB(mfn), PHYSADDR, machdep->pmd, PAGESIZE(),
+            "xen kdump pmd page", RETURN_ON_ERROR))
+                error(FATAL, "cannot read/find pmd page\n");
 
-        if (!done && (estack = x86_64_in_exception_stack(bt))) 
-		goto in_exception_stack;
+        if (CRASHDEBUG(7)) 
+		x86_64_debug_dump_page(fp, machdep->pmd, 
+			"contents of page middle directory page:");
 
-	if (!done && (bt->flags & (BT_EXCEPTION_STACK|BT_IRQSTACK))) {
-		/*
-		 *  Verify that the rsp pointer taken from either the
-		 *  exception or IRQ stack points into the process stack.
-		 */
-		bt->stackbase = GET_STACKBASE(bt->tc->task);
-		bt->stacktop = GET_STACKTOP(bt->tc->task);
+        pmd = ((ulong *)machdep->pmd) + pmd_index(kvaddr);
+	mfn = ((*pmd) & PHYSICAL_PAGE_MASK) >> PAGESHIFT();
 
-		if (!INSTACK(rsp, bt)) {
-			switch (bt->flags & (BT_EXCEPTION_STACK|BT_IRQSTACK))
-			{
-			case (BT_EXCEPTION_STACK|BT_IRQSTACK):
-				error(FATAL, STACK_TRANSITION_ERRMSG_E_I_P,
-					bt_in->stkptr, bt->stkptr, rsp,
-					bt->stackbase);
+	if (CRASHDEBUG(3))
+		fprintf(fp, 
+		    "[%lx] pmd: %lx  mfn: %lx  pmd_index: %lx\n", 
+			kvaddr, *pmd, mfn, pmd_index(kvaddr));
 
-			case BT_EXCEPTION_STACK:
-				error(FATAL, STACK_TRANSITION_ERRMSG_E_P,
-					bt_in->stkptr, rsp, bt->stackbase);
+       if (!readmem(PTOB(mfn), PHYSADDR, machdep->ptbl, PAGESIZE(),
+            "xen kdump page table page", RETURN_ON_ERROR))
+                error(FATAL, "cannot read/find page table page\n");
 
-			case BT_IRQSTACK:
-				error(FATAL, STACK_TRANSITION_ERRMSG_I_P,
-					bt_in->stkptr, rsp, bt->stackbase);
-			}
-		}
+        if (CRASHDEBUG(7)) 
+		x86_64_debug_dump_page(fp, machdep->ptbl, 
+			"contents of page table page:");
 
-		/*
-	 	 *  Now fill the local stack buffer from the process stack.
-	  	 */
-               	if (!readmem(bt->stackbase, KVADDR, bt->stackbuf,
-                    bt->stacktop - bt->stackbase, 
-		    "irqstack contents", RETURN_ON_ERROR))
-                	error(FATAL, "read of process stack at %lx failed\n",
-				bt->stackbase);
-	}
+        ptep = ((ulong *)machdep->ptbl) + pte_index(kvaddr);
+	mfn = ((*ptep) & PHYSICAL_PAGE_MASK) >> PAGESHIFT();
 
-	/*
-	 *  For a normally blocked task, hand-create the first level.
-	 */
-        if (!done && 
-	    !(bt->flags & (BT_TEXT_SYMBOLS|BT_EXCEPTION_STACK|BT_IRQSTACK)) &&
-	    STREQ(closest_symbol(bt->instptr), "thread_return")) {
-		bt->flags |= BT_SCHEDULE;
-		i = (rsp - bt->stackbase)/sizeof(ulong);
-		x86_64_print_stack_entry(bt, ofp, level, 
-			i, bt->instptr);
-		bt->flags &= ~(ulonglong)BT_SCHEDULE;
-		rsp += sizeof(ulong);
-		level++;
-	}
+	if (CRASHDEBUG(3))
+		fprintf(fp, 
+		    "[%lx] ptep: %lx  mfn: %lx  pte_index: %lx\n", 
+			kvaddr, *ptep, mfn, pte_index(kvaddr));
 
-	/*
-	 *  Dump the IRQ exception frame from the process stack.
-	 *  If the CS register indicates a user exception frame,
-	 *  then set done to TRUE to avoid the process stack walk-through.
-	 *  Otherwise, bump up the rsp past the kernel-mode eframe.
-	 */
-        if (irq_eframe) {
-                bt->flags |= BT_EXCEPTION_FRAME;
-                i = (irq_eframe - bt->stackbase)/sizeof(ulong);
-                x86_64_print_stack_entry(bt, ofp, level, i, bt->instptr);
-                bt->flags &= ~(ulonglong)BT_EXCEPTION_FRAME;
-                cs = x86_64_exception_frame(EFRAME_PRINT|EFRAME_CS, 0, 
-			bt->stackbuf + (irq_eframe - bt->stackbase), bt, ofp);
-		if (cs & 3)
-			done = TRUE;   /* IRQ from user-mode */
-		else
-			rsp += SIZE(pt_regs);
-		level++;
-        }
+       if (!readmem(PTOB(mfn), PHYSADDR, pgbuf, PAGESIZE(),
+            "xen kdump page table page", RETURN_ON_ERROR))
+                error(FATAL, "cannot read/find pte page\n");
 
-	/*
-	 *  Walk the process stack.  
-	 */
-        for (i = (rsp - bt->stackbase)/sizeof(ulong);
-	     !done && (rsp < bt->stacktop); i++, rsp += sizeof(ulong)) {
+        if (CRASHDEBUG(7)) 
+		x86_64_debug_dump_page(fp, pgbuf, 
+			"contents of page:");
 
-		up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]);
+	return pgbuf;
+}
 
-		if (!is_kernel_text(*up))
-			continue;
+static ulong 
+x86_64_xen_kdump_page_mfn(ulong kvaddr)
+{
+	ulong mfn;
+	ulong *pml4, *pgd, *pmd, *ptep;
 
-		if ((bt->flags & BT_CHECK_CALLER)) {
-			/*
-			 *  A non-zero offset value from the value_search() 
-			 *  lets us know if it's a real text return address.
-			 */
-			spt = value_search(*up, &offset);
-			/*
-		         *  sp gets the syment of the function that the text 
-			 *  routine above called before leaving its return 
-			 *  address on the stack -- if it can be determined.
-			 */
-			sp = x86_64_function_called_by((*up)-5); 
+        pml4 = ((ulong *)machdep->machspec->pml4) + pml4_index(kvaddr);
+	mfn = ((*pml4) & PHYSICAL_PAGE_MASK) >> PAGESHIFT();
 
-			if (sp == NULL) {
-				/* 
-				 *  We were unable to get the called function.
-				 *  If the text address had an offset, then
-				 *  it must have made an indirect call, and
-				 *  can't have called our target function.
-				 */
-				if (offset) {
-					if (CRASHDEBUG(1))
-						fprintf(ofp, 
-                       "< ignoring %s() -- makes indirect call and NOT %s()>\n",
-						    	spt->name, 
-						    	bt->call_target);
-					continue;
-				}
-			} else if (!STREQ(sp->name, bt->call_target)) {
-				/*
-				 *  We got function called by the text routine,
-			 	 *  but it's not our target function.
-				 */
-				if (CRASHDEBUG(2))
-					fprintf(ofp, 
- 		                "< ignoring %s() -- calls %s() and NOT %s()>\n",
-						spt->name, sp->name, 
-						bt->call_target);
-				continue;
-			}
-		}
+        if ((mfn != machdep->last_pgd_read) && 
+	    !readmem(PTOB(mfn), PHYSADDR, machdep->pgd, PAGESIZE(),
+            "xen kdump pud entry", RETURN_ON_ERROR))
+		error(FATAL, "cannot read/find pud page\n");
+        machdep->last_pgd_read = mfn;
 
-		switch (x86_64_print_stack_entry(bt, ofp, level, i,*up))
-		{
-		case BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED:
-			last_process_stack_eframe = rsp + 8;
-			rsp += SIZE(pt_regs);
-			i += SIZE(pt_regs)/sizeof(ulong);
-		case BACKTRACE_ENTRY_DISPLAYED:
-			level++;
-			break;
-		case BACKTRACE_ENTRY_IGNORED:	
-			break;
-		case BACKTRACE_COMPLETE:
-			done = TRUE;
-			break;
-		}
-        }
+        pgd = ((ulong *)machdep->pgd) + pgd_index(kvaddr);
+	mfn = ((*pgd) & PHYSICAL_PAGE_MASK) >> PAGESHIFT();
 
-        if (!irq_eframe && !is_kernel_thread(bt->tc->task) &&
-            (GET_STACKBASE(bt->tc->task) == bt->stackbase)) {
-		user_mode_eframe = bt->stacktop - SIZE(pt_regs);
-		if (last_process_stack_eframe < user_mode_eframe)
-                	x86_64_exception_frame(EFRAME_PRINT, 0, bt->stackbuf +
-                        	(bt->stacktop - bt->stackbase) - SIZE(pt_regs),
-                        	bt, ofp);
-	}
+        if ((mfn != machdep->last_pmd_read) && 
+            !readmem(PTOB(mfn), PHYSADDR, machdep->pmd, PAGESIZE(),
+            "xen kdump pmd entry", RETURN_ON_ERROR))
+                error(FATAL, "cannot read/find pmd page\n");
+        machdep->last_pmd_read = mfn;
 
-        if (bt->flags & BT_TEXT_SYMBOLS) {
-        	if (BT_REFERENCE_FOUND(bt)) {
-                	print_task_header(fp, task_to_context(bt->task), 0);
-			BCOPY(bt_in, bt, sizeof(struct bt_info));
-                	bt->ref = NULL;
-                	machdep->back_trace(bt);
-                	fprintf(fp, "\n");
-        	}
-	}
+        pmd = ((ulong *)machdep->pmd) + pmd_index(kvaddr);
+	mfn = ((*pmd) & PHYSICAL_PAGE_MASK) >> PAGESHIFT();
+
+        if ((mfn != machdep->last_ptbl_read) && 
+            !readmem(PTOB(mfn), PHYSADDR, machdep->ptbl, PAGESIZE(),
+            "xen kdump page table page", RETURN_ON_ERROR))
+                error(FATAL, "cannot read/find page table page\n");
+        machdep->last_ptbl_read = mfn;
+
+        ptep = ((ulong *)machdep->ptbl) + pte_index(kvaddr);
+	mfn = ((*ptep) & PHYSICAL_PAGE_MASK) >> PAGESHIFT();
+
+	return mfn;
 }
 
+#include "xendump.h"
+
 /*
- *  Functions that won't be called indirectly.
- *  Add more to this as they are discovered.
+ *  Determine the physical address base for relocatable kernels.
  */
-static const char *direct_call_targets[] = {
-        "schedule",
-        "schedule_timeout",
-	NULL
-};
-
-static int
-is_direct_call_target(struct bt_info *bt)
+static void
+x86_64_calc_phys_base(void)
 {
 	int i;
+	FILE *iomem;
+	char buf[BUFSIZE];
+	char *p1;
+	ulong phys_base, text_start, kernel_code_start;
+	int errflag;
+	struct vmcore_data *vd;
+	static struct xendump_data *xd;
+	Elf64_Phdr *phdr;
 
-	if (!bt->call_target || (bt->flags & BT_NO_CHECK_CALLER))
-		return FALSE;
+	if (machdep->flags & PHYS_BASE)     /* --machdep override */
+		return;
 
-	for (i = 0; direct_call_targets[i]; i++) {
-		if (STREQ(direct_call_targets[i], bt->call_target)) 
-			return TRUE;
-	}
+	machdep->machspec->phys_base = 0;   /* default/traditional */
 
-	return FALSE;
-}
+	if (!kernel_symbol_exists("phys_base"))
+		return;
 
-static struct syment *
-x86_64_function_called_by(ulong rip)
-{
-	struct syment *sp;
-	char buf[BUFSIZE], *p1;
-	ulong value, offset;
-	unsigned char byte;
+	if (!symbol_exists("_text"))
+		return;
+	else
+		text_start = symbol_value("_text");
 
-	value = 0;
-	sp = NULL;
+	if (ACTIVE()) {
+	        if ((iomem = fopen("/proc/iomem", "r")) == NULL)
+	                return;
+	
+		errflag = 1;
+	        while (fgets(buf, BUFSIZE, iomem)) {
+			if (strstr(buf, ": Kernel code")) {
+				clean_line(buf);
+				errflag = 0;
+				break;
+			}
+		}
+	        fclose(iomem);
+	
+		if (errflag)
+			return;
+	
+		if (!(p1 = strstr(buf, "-")))
+			return;
+		else
+			*p1 = NULLCHAR;
+	
+		errflag = 0;
+		kernel_code_start = htol(buf, RETURN_ON_ERROR|QUIET, &errflag);
+	        if (errflag)
+			return;
+	
+		machdep->machspec->phys_base = kernel_code_start -
+			(text_start - __START_KERNEL_map);
+	
+		if (CRASHDEBUG(1)) {
+			fprintf(fp, "_text: %lx  ", text_start);
+			fprintf(fp, "Kernel code: %lx -> ", kernel_code_start);
+			fprintf(fp, "phys_base: %lx\n\n", 
+				machdep->machspec->phys_base);
+		}
 
-        if (!readmem(rip, KVADDR, &byte, sizeof(unsigned char), "call byte",
-            RETURN_ON_ERROR)) 
-		return sp;
+		return;
+	}
 
-        if (byte != 0xe8) 
-		return sp;
+	/*
+	 *  Get relocation value from whatever dumpfile format is being used.
+	 */
 
-        sprintf(buf, "x/i 0x%lx", rip);
+	if (DISKDUMP_DUMPFILE()) {
+		if (diskdump_phys_base(&phys_base)) {
+			machdep->machspec->phys_base = phys_base;
+			if (CRASHDEBUG(1))
+				fprintf(fp, "compressed kdump: phys_base: %lx\n",
+					phys_base);
+		}
+		return;
+	}
 
-        open_tmpfile2();
-	if (gdb_pass_through(buf, pc->tmpfile2, GNU_RETURN_ON_ERROR)) {
-	        rewind(pc->tmpfile2);
-	        while (fgets(buf, BUFSIZE, pc->tmpfile2)) {
-			if ((p1 = strstr(buf, "callq")) &&
-			    whitespace(*(p1-1))) { 
-				if (extract_hex(p1, &value, NULLCHAR, TRUE)) 
-					break;
+	if ((vd = get_kdump_vmcore_data())) {
+                for (i = 0; i < vd->num_pt_load_segments; i++) {
+			phdr = vd->load64 + i;
+			if ((phdr->p_vaddr >= __START_KERNEL_map) &&
+			    !(IS_VMALLOC_ADDR(phdr->p_vaddr))) {
+
+				machdep->machspec->phys_base = phdr->p_paddr - 
+				    (phdr->p_vaddr & ~(__START_KERNEL_map));
+
+				if (CRASHDEBUG(1)) {
+					fprintf(fp, "p_vaddr: %lx p_paddr: %lx -> ",
+						phdr->p_vaddr, phdr->p_paddr);
+					fprintf(fp, "phys_base: %lx\n\n", 
+						machdep->machspec->phys_base);
+				}
+				break;
 			}
 		}
+
+		return;
 	}
-        close_tmpfile2();
 
-	if (value)
-		sp = value_search(value, &offset);
+	if ((xd = get_xendump_data())) {
+		if (text_start == __START_KERNEL_map) {
+		       /* 
+			*  Xen kernels are not relocable (yet) and don't have
+			*  the "phys_base" entry point, so this is most likely 
+			*  a xendump of a fully-virtualized relocatable kernel.
+			*  No clues exist in the xendump header, so hardwire 
+			*  phys_base to 2MB and hope for the best.
+			*/
+			machdep->machspec->phys_base = 0x200000;
+			if (CRASHDEBUG(1))
+				fprintf(fp, 
+			    	    "default relocatable phys_base: %lx\n",
+					machdep->machspec->phys_base);
 
-	return sp;
-}
+		} else if (text_start > __START_KERNEL_map) {
+			switch (xd->flags & (XC_CORE_ELF|XC_CORE_NO_P2M)) 	
+			{
+			/*
+			 *  If this is a new ELF-style xendump with no
+			 *  p2m information, then it also must be a
+			 *  fully-virtualized relocatable kernel.  Again,
+			 *  the xendump header is useless, and we don't
+			 *  have /proc/iomem, so presume that the kernel 
+			 *  code starts at 2MB.
+			 */ 
+			case (XC_CORE_ELF|XC_CORE_NO_P2M):
+				machdep->machspec->phys_base = 0x200000 - 
+					(text_start - __START_KERNEL_map);
+				if (CRASHDEBUG(1))
+					fprintf(fp, "default relocatable " 
+			    	            "phys_base: %lx\n",
+						machdep->machspec->phys_base);
+				break;
 
-/*
- *  Unroll the kernel stack using a minimal amount of gdb services.
- */
-static void
-x86_64_back_trace(struct gnu_request *req, struct bt_info *bt)
-{
-	error(FATAL, "x86_64_back_trace: unused\n");
+			default:
+				break;
+			}
+		}
+	}
 }
 
 
 /*
- *  Print exception frame information for x86_64.
- *
- *    Pid: 0, comm: swapper Not tainted 2.6.5-1.360phro.rootsmp
- *    RIP: 0010:[<ffffffff8010f534>] <ffffffff8010f534>{default_idle+36}
- *    RSP: 0018:ffffffff8048bfd8  EFLAGS: 00000246
- *    RAX: 0000000000000000 RBX: ffffffff8010f510 RCX: 0000000000000018
- *    RDX: 0000010001e37280 RSI: ffffffff803ac0a0 RDI: 000001007f43c400
- *    RBP: 0000000000000000 R08: ffffffff8048a000 R09: 0000000000000000
- *    R10: ffffffff80482188 R11: 0000000000000001 R12: 0000000000000000
- *    R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000
- *    FS:  0000002a96e14fc0(0000) GS:ffffffff80481d80(0000) GS:0000000055578aa0
- *    CS:  0010 DS: 0018 ES: 0018 CR0: 000000008005003b
- *    CR2: 0000002a9556b000 CR3: 0000000000101000 CR4: 00000000000006e0
- *
+ *  Create an index of mfns for each page that makes up the
+ *  kernel's complete phys_to_machine_mapping[max_pfn] array.
  */
-
-static long 
-x86_64_exception_frame(ulong flags, ulong kvaddr, char *local, 
-	struct bt_info *bt, FILE *ofp)
+static int 
+x86_64_xendump_p2m_create(struct xendump_data *xd)
 {
-        long rip, rsp, cs, ss, rflags, orig_rax, rbp; 
-	long rax, rbx, rcx, rdx, rsi, rdi;
-        long r8, r9, r10, r11, r12, r13, r14, r15;
-	struct machine_specific *ms;
-	char *pt_regs_buf;
-	long verified;
-	int err;
-
-        ms = machdep->machspec;
-
-	if (!(machdep->flags & PT_REGS_INIT)) {
-		err = 0;
-		err |= ((ms->pto.r15 = MEMBER_OFFSET("pt_regs", "r15")) == 
-			INVALID_OFFSET);
-		err |= ((ms->pto.r14 = MEMBER_OFFSET("pt_regs", "r14")) == 
-			INVALID_OFFSET);
-		err |= ((ms->pto.r13 = MEMBER_OFFSET("pt_regs", "r13")) == 
-			INVALID_OFFSET);
-		err |= ((ms->pto.r12 = MEMBER_OFFSET("pt_regs", "r12")) == 
-			INVALID_OFFSET);
-		err |= ((ms->pto.r11 = MEMBER_OFFSET("pt_regs", "r11")) == 
-			INVALID_OFFSET);
-		err |= ((ms->pto.r10 = MEMBER_OFFSET("pt_regs", "r10")) == 
-			INVALID_OFFSET);
-		err |= ((ms->pto.r9 = MEMBER_OFFSET("pt_regs", "r9")) == 
-			INVALID_OFFSET);
-		err |= ((ms->pto.r8 = MEMBER_OFFSET("pt_regs", "r8")) == 
-			INVALID_OFFSET);
-		err |= ((ms->pto.rax = MEMBER_OFFSET("pt_regs", "rax")) == 
-			INVALID_OFFSET);
-		err |= ((ms->pto.rbx = MEMBER_OFFSET("pt_regs", "rbx")) == 
-			INVALID_OFFSET);
-		err |= ((ms->pto.rcx = MEMBER_OFFSET("pt_regs", "rcx")) == 
-			INVALID_OFFSET);
-		err |= ((ms->pto.rdx = MEMBER_OFFSET("pt_regs", "rdx")) == 
-			INVALID_OFFSET);
-		err |= ((ms->pto.rsi = MEMBER_OFFSET("pt_regs", "rsi")) == 
-			INVALID_OFFSET);
-		err |= ((ms->pto.rdi = MEMBER_OFFSET("pt_regs", "rdi")) == 
-			INVALID_OFFSET);
-		err |= ((ms->pto.rip = MEMBER_OFFSET("pt_regs", "rip")) == 
-			INVALID_OFFSET);
-		err |= ((ms->pto.rsp = MEMBER_OFFSET("pt_regs", "rsp")) == 
-			INVALID_OFFSET);
-		err |= ((ms->pto.cs = MEMBER_OFFSET("pt_regs", "cs")) == 
-			INVALID_OFFSET);
-		err |= ((ms->pto.ss = MEMBER_OFFSET("pt_regs", "ss")) == 
-			INVALID_OFFSET);
-		err |= ((ms->pto.eflags = MEMBER_OFFSET("pt_regs", "eflags")) ==
-			INVALID_OFFSET);
-		err |= ((ms->pto.orig_rax = 
-			MEMBER_OFFSET("pt_regs", "orig_rax")) == 
-			INVALID_OFFSET);
-		err |= ((ms->pto.rbp = MEMBER_OFFSET("pt_regs", "rbp")) == 
-			INVALID_OFFSET);
+	int i, idx;
+	ulong mfn, kvaddr, ctrlreg[8], ctrlreg_offset;
+	ulong *up;
+	off_t offset; 
 
-		if (err)
-			error(WARNING, "pt_regs structure has changed\n");
+        if (!symbol_exists("phys_to_machine_mapping")) {
+                xd->flags |= XC_CORE_NO_P2M;
+                return TRUE;
+        }
 
-		machdep->flags |= PT_REGS_INIT;
-	}
+	if ((ctrlreg_offset = MEMBER_OFFSET("vcpu_guest_context", "ctrlreg")) ==
+	     INVALID_OFFSET)
+		error(FATAL, 
+		    "cannot determine vcpu_guest_context.ctrlreg offset\n");
+	else if (CRASHDEBUG(1))
+		fprintf(xd->ofp, 
+		    "MEMBER_OFFSET(vcpu_guest_context, ctrlreg): %ld\n",
+			ctrlreg_offset);
+
+	offset = (off_t)xd->xc_core.header.xch_ctxt_offset + 
+		(off_t)ctrlreg_offset;
+
+	if (lseek(xd->xfd, offset, SEEK_SET) == -1)
+		error(FATAL, "cannot lseek to xch_ctxt_offset\n");
+
+	if (read(xd->xfd, &ctrlreg, sizeof(ctrlreg)) !=
+	    sizeof(ctrlreg))
+		error(FATAL, "cannot read vcpu_guest_context ctrlreg[8]\n");
+
+	for (i = 0; CRASHDEBUG(1) && (i < 8); i++)
+		fprintf(xd->ofp, "ctrlreg[%d]: %lx\n", i, ctrlreg[i]);
+
+	mfn = ctrlreg[3] >> PAGESHIFT();
+
+	if (!xc_core_mfn_to_page(mfn, machdep->machspec->pml4))
+		error(FATAL, "cannot read/find cr3 page\n");
+
+	if (CRASHDEBUG(7)) 
+		x86_64_debug_dump_page(xd->ofp, machdep->machspec->pml4, 
+                	"contents of PML4 page:");
 
-	if (kvaddr) {
-		pt_regs_buf = GETBUF(SIZE(pt_regs));
-        	readmem(kvaddr, KVADDR, pt_regs_buf,
-                	SIZE(pt_regs), "pt_regs", FAULT_ON_ERROR);
-	} else
-		pt_regs_buf = local;
+	kvaddr = symbol_value("end_pfn");
+	if (!x86_64_xendump_load_page(kvaddr, xd))
+		return FALSE;
 
-	rip = ULONG(pt_regs_buf + ms->pto.rip);
-	rsp = ULONG(pt_regs_buf + ms->pto.rsp);
-	cs = ULONG(pt_regs_buf + ms->pto.cs);
-	ss = ULONG(pt_regs_buf + ms->pto.ss);
-	rflags = ULONG(pt_regs_buf + ms->pto.eflags);
-	orig_rax = ULONG(pt_regs_buf + ms->pto.orig_rax);
-	rbp = ULONG(pt_regs_buf + ms->pto.rbp);
-	rax = ULONG(pt_regs_buf + ms->pto.rax);
-	rbx = ULONG(pt_regs_buf + ms->pto.rbx);
-	rcx = ULONG(pt_regs_buf + ms->pto.rcx);
-	rdx = ULONG(pt_regs_buf + ms->pto.rdx);
-	rsi = ULONG(pt_regs_buf + ms->pto.rsi);
-	rdi = ULONG(pt_regs_buf + ms->pto.rdi);
-	r8 = ULONG(pt_regs_buf + ms->pto.r8);
-	r9 = ULONG(pt_regs_buf + ms->pto.r9);
-	r10 = ULONG(pt_regs_buf + ms->pto.r10);
-	r11 = ULONG(pt_regs_buf + ms->pto.r11);
-	r12 = ULONG(pt_regs_buf + ms->pto.r12);
-	r13 = ULONG(pt_regs_buf + ms->pto.r13);
-	r14 = ULONG(pt_regs_buf + ms->pto.r14);
-	r15 = ULONG(pt_regs_buf + ms->pto.r15);
+	up = (ulong *)(xd->page + PAGEOFFSET(kvaddr));
+	if (CRASHDEBUG(1))
+		fprintf(xd->ofp, "end_pfn: %lx\n", *up);
 
-        verified = x86_64_eframe_verify(bt, 
-		kvaddr ? kvaddr : (local - bt->stackbuf) + bt->stackbase,
-		cs, ss, rip, rsp, rflags);
+	xd->xc_core.p2m_frames = (*up/(PAGESIZE()/sizeof(ulong))) +
+                ((*up%(PAGESIZE()/sizeof(ulong))) ? 1 : 0);
 
-	/*
-	 *  If it's print-if-verified request, don't print bogus eframes.
-	 */
-        if (!verified && ((flags & (EFRAME_VERIFY|EFRAME_PRINT)) == 
-	    (EFRAME_VERIFY|EFRAME_PRINT))) 
-		flags &= ~EFRAME_PRINT;
+	if ((xd->xc_core.p2m_frame_index_list = (ulong *)
+	    malloc(xd->xc_core.p2m_frames * sizeof(ulong))) == NULL)
+        	error(FATAL, "cannot malloc p2m_frame_list");
 
-	if (CRASHDEBUG(2)) 
-		fprintf(ofp, "< exception frame at: %lx >\n", kvaddr ?  kvaddr :
-			(local - bt->stackbuf) + bt->stackbase);
+	kvaddr = symbol_value("phys_to_machine_mapping");
+	if (!x86_64_xendump_load_page(kvaddr, xd))
+		return FALSE;
 
-	if (flags & EFRAME_PRINT) {
-		if (flags & EFRAME_SEARCH) {
-			fprintf(ofp, "\n  %s-MODE EXCEPTION FRAME AT: %lx\n",
-				cs & 3 ? "USER" : "KERNEL", 
-				kvaddr ?  kvaddr : 
-				(local - bt->stackbuf) + bt->stackbase);
-		}
+	up = (ulong *)(xd->page + PAGEOFFSET(kvaddr));
+	if (CRASHDEBUG(1))
+		fprintf(fp, "phys_to_machine_mapping: %lx\n", *up);
 
-		fprintf(ofp, "    RIP: %016lx  RSP: %016lx  RFLAGS: %08lx\n", 
-			rip, rsp, rflags);
-		fprintf(ofp, "    RAX: %016lx  RBX: %016lx  RCX: %016lx\n", 
-			rax, rbx, rcx);
-		fprintf(ofp, "    RDX: %016lx  RSI: %016lx  RDI: %016lx\n", 
-	 		rdx, rsi, rdi);
-		fprintf(ofp, "    RBP: %016lx   R8: %016lx   R9: %016lx\n", 
-			rbp, r8, r9);
-		fprintf(ofp, "    R10: %016lx  R11: %016lx  R12: %016lx\n", 
-			r10, r11, r12);
-		fprintf(ofp, "    R13: %016lx  R14: %016lx  R15: %016lx\n", 
-			r13, r14, r15);
-		fprintf(ofp, "    ORIG_RAX: %016lx  CS: %04lx  SS: %04lx\n", 
-			orig_rax, cs, ss);
+	kvaddr = *up;
+	machdep->last_ptbl_read = BADADDR;
 
-		if (!verified)
-			error(WARNING, "possibly bogus exception frame\n");
+	for (i = 0; i < xd->xc_core.p2m_frames; i++) {
+		if ((idx = x86_64_xendump_page_index(kvaddr, xd)) == MFN_NOT_FOUND)
+			return FALSE;
+		xd->xc_core.p2m_frame_index_list[i] = idx; 
+		kvaddr += PAGESIZE();
 	}
 
-        if ((flags & EFRAME_PRINT) && BT_REFERENCE_CHECK(bt)) {
-                x86_64_do_bt_reference_check(bt, rip, NULL);
-                x86_64_do_bt_reference_check(bt, rsp, NULL);
-                x86_64_do_bt_reference_check(bt, cs, NULL);
-                x86_64_do_bt_reference_check(bt, ss, NULL);
-                x86_64_do_bt_reference_check(bt, rflags, NULL);
-                x86_64_do_bt_reference_check(bt, orig_rax, NULL);
-                x86_64_do_bt_reference_check(bt, rbp, NULL);
-                x86_64_do_bt_reference_check(bt, rax, NULL);
-                x86_64_do_bt_reference_check(bt, rbx, NULL);
-                x86_64_do_bt_reference_check(bt, rcx, NULL);
-                x86_64_do_bt_reference_check(bt, rdx, NULL);
-                x86_64_do_bt_reference_check(bt, rsi, NULL);
-                x86_64_do_bt_reference_check(bt, rdi, NULL);
-                x86_64_do_bt_reference_check(bt, r8, NULL);
-                x86_64_do_bt_reference_check(bt, r9, NULL);
-                x86_64_do_bt_reference_check(bt, r10, NULL);
-                x86_64_do_bt_reference_check(bt, r11, NULL);
-                x86_64_do_bt_reference_check(bt, r12, NULL);
-                x86_64_do_bt_reference_check(bt, r13, NULL);
-                x86_64_do_bt_reference_check(bt, r14, NULL);
-                x86_64_do_bt_reference_check(bt, r15, NULL);
-        }
+	machdep->last_ptbl_read = 0;
 
-	if (kvaddr)
-		FREEBUF(pt_regs_buf);
+	return TRUE;
+}
 
-	if (flags & EFRAME_CS)
-		return cs;
-	else if (flags & EFRAME_VERIFY)
-		return verified;
+static void
+x86_64_debug_dump_page(FILE *ofp, char *page, char *name)
+{
+	int i;
+	ulong *up;
 
-	return 0;
+        fprintf(ofp, "%s\n", name);
+
+        up = (ulong *)page;
+        for (i = 0; i < 256; i++) {
+        	fprintf(ofp, "%016lx: %016lx %016lx\n",
+                        (ulong)((i * 2) * sizeof(ulong)),
+                        *up, *(up+1));
+                up += 2;
+        }
 }
 
 /*
- *  Check that the verifiable registers contain reasonable data.
+ *  Find the page associate with the kvaddr, and read its contents
+ *  into the passed-in buffer.
  */
-#define RAZ_MASK 0xffffffffffc08028    /* return-as-zero bits */
-
-static int 
-x86_64_eframe_verify(struct bt_info *bt, long kvaddr, long cs, long ss,
-	long rip, long rsp, long rflags)
+static char *
+x86_64_xendump_load_page(ulong kvaddr, struct xendump_data *xd)
 {
-	if ((rflags & RAZ_MASK) || !(rflags & 0x2))
-		return FALSE;
+	ulong mfn;
+	ulong *pml4, *pgd, *pmd, *ptep;
 
-        if ((cs == 0x10) && (ss == 0x18)) {
-                if (is_kernel_text(rip) && IS_KVADDR(rsp))
-                        return TRUE;
-        }
+        pml4 = ((ulong *)machdep->machspec->pml4) + pml4_index(kvaddr);
+	mfn = ((*pml4) & PHYSICAL_PAGE_MASK) >> PAGESHIFT();
 
-        if ((cs == 0x10) && kvaddr) {
-                if (is_kernel_text(rip) && IS_KVADDR(rsp) &&
-		    (rsp == (kvaddr + SIZE(pt_regs) + 8)))
-                        return TRUE;
-	}
+	if (CRASHDEBUG(3))
+		fprintf(xd->ofp, 
+		    "[%lx] pml4: %lx  mfn: %lx  pml4_index: %lx\n", 
+			kvaddr, *pml4, mfn, pml4_index(kvaddr));
 
-        if ((cs == 0x10) && kvaddr) {
-                if (is_kernel_text(rip) && IS_KVADDR(rsp) &&
-		    (rsp == (kvaddr + SIZE(pt_regs))))
-                        return TRUE;
-	}
+	if (!xc_core_mfn_to_page(mfn, machdep->pgd))
+		error(FATAL, "cannot read/find pud page\n");
+
+        if (CRASHDEBUG(7))
+		x86_64_debug_dump_page(xd->ofp, machdep->pgd, 
+                	"contents of page upper directory page:");
+
+        pgd = ((ulong *)machdep->pgd) + pgd_index(kvaddr);
+	mfn = ((*pgd) & PHYSICAL_PAGE_MASK) >> PAGESHIFT();
+
+	if (CRASHDEBUG(3))
+		fprintf(xd->ofp, 
+		    "[%lx] pgd: %lx  mfn: %lx  pgd_index: %lx\n", 
+			kvaddr, *pgd, mfn, pgd_index(kvaddr));
+
+        if (!xc_core_mfn_to_page(mfn, machdep->pmd))
+                error(FATAL, "cannot read/find pmd page\n");
+
+        if (CRASHDEBUG(7)) 
+		x86_64_debug_dump_page(xd->ofp, machdep->pmd, 
+			"contents of page middle directory page:");
+
+        pmd = ((ulong *)machdep->pmd) + pmd_index(kvaddr);
+	mfn = ((*pmd) & PHYSICAL_PAGE_MASK) >> PAGESHIFT();
+
+	if (CRASHDEBUG(3))
+		fprintf(xd->ofp, 
+		    "[%lx] pmd: %lx  mfn: %lx  pmd_index: %lx\n", 
+			kvaddr, *pmd, mfn, pmd_index(kvaddr));
+
+        if (!xc_core_mfn_to_page(mfn, machdep->ptbl))
+                error(FATAL, "cannot read/find page table page\n");
+
+        if (CRASHDEBUG(7)) 
+		x86_64_debug_dump_page(xd->ofp, machdep->ptbl, 
+			"contents of page table page:");
 
-        if ((cs == 0x33) && (ss == 0x2b)) {
-                if (IS_UVADDR(rip, bt->tc) && IS_UVADDR(rsp, bt->tc))
-                        return TRUE;
-        }
+        ptep = ((ulong *)machdep->ptbl) + pte_index(kvaddr);
+	mfn = ((*ptep) & PHYSICAL_PAGE_MASK) >> PAGESHIFT();
 
-	return FALSE;
+	if (CRASHDEBUG(3))
+		fprintf(xd->ofp, 
+		    "[%lx] ptep: %lx  mfn: %lx  pte_index: %lx\n", 
+			kvaddr, *ptep, mfn, pte_index(kvaddr));
+
+        if (!xc_core_mfn_to_page(mfn, xd->page))
+                error(FATAL, "cannot read/find pte page\n");
+
+        if (CRASHDEBUG(7)) 
+		x86_64_debug_dump_page(xd->ofp, xd->page, 
+			"contents of page:");
+
+	return xd->page;
 }
 
 /*
- *  Get a stack frame combination of pc and ra from the most relevent spot.
+ *  Find the dumpfile page index associated with the kvaddr.
  */
-static void
-x86_64_get_stack_frame(struct bt_info *bt, ulong *pcp, ulong *spp)
+static int 
+x86_64_xendump_page_index(ulong kvaddr, struct xendump_data *xd)
 {
-	if (bt->flags & BT_DUMPFILE_SEARCH)
-		return x86_64_get_dumpfile_stack_frame(bt, pcp, spp);
+        int idx;
+	ulong mfn;
+	ulong *pml4, *pgd, *pmd, *ptep;
 
-        if (pcp)
-                *pcp = x86_64_get_pc(bt);
-        if (spp)
-                *spp = x86_64_get_sp(bt);
+        pml4 = ((ulong *)machdep->machspec->pml4) + pml4_index(kvaddr);
+	mfn = ((*pml4) & PHYSICAL_PAGE_MASK) >> PAGESHIFT();
+
+        if ((mfn != machdep->last_pgd_read) && 
+	    !xc_core_mfn_to_page(mfn, machdep->pgd))
+		error(FATAL, "cannot read/find pud page\n");
+        machdep->last_pgd_read = mfn;
+
+        pgd = ((ulong *)machdep->pgd) + pgd_index(kvaddr);
+	mfn = ((*pgd) & PHYSICAL_PAGE_MASK) >> PAGESHIFT();
+
+        if ((mfn != machdep->last_pmd_read) && 
+            !xc_core_mfn_to_page(mfn, machdep->pmd))
+                error(FATAL, "cannot read/find pmd page\n");
+
+        machdep->last_pmd_read = mfn;
+
+        pmd = ((ulong *)machdep->pmd) + pmd_index(kvaddr);
+	mfn = ((*pmd) & PHYSICAL_PAGE_MASK) >> PAGESHIFT();
+
+        if ((mfn != machdep->last_ptbl_read) && 
+	    !xc_core_mfn_to_page(mfn, machdep->ptbl))
+                error(FATAL, "cannot read/find page table page\n");
+        machdep->last_ptbl_read = mfn;
+
+        ptep = ((ulong *)machdep->ptbl) + pte_index(kvaddr);
+	mfn = ((*ptep) & PHYSICAL_PAGE_MASK) >> PAGESHIFT();
+
+        if ((idx = xc_core_mfn_to_page_index(mfn)) == MFN_NOT_FOUND)
+                error(INFO, "cannot determine page index for %lx\n",
+                        kvaddr);
+
+	return idx;
 }
 
 /*
- *  Get the starting point for the active cpus in a diskdump/netdump.
+ *  Pull the rsp from the cpu_user_regs struct in the header
+ *  turn it into a task, and match it with the active_set.
+ *  Unfortunately, the registers in the vcpu_guest_context 
+ *  are not necessarily those of the panic task, so for now
+ *  let get_active_set_panic_task() get the right task.
  */
-static void
-x86_64_get_dumpfile_stack_frame(struct bt_info *bt_in, ulong *rip, ulong *rsp) 
+static ulong 
+x86_64_xendump_panic_task(struct xendump_data *xd)
 {
-	int panic_task;
-        int i, panic, stage;
-        char *sym;
-	struct syment *sp;
-        ulong *up;
-	struct bt_info bt_local, *bt;
-        struct machine_specific *ms;
-	char *user_regs;
-	ulong ur_rip;
-	ulong ur_rsp;
+	int i;
+	ulong rsp;
+	off_t offset;
+	ulong task;
 
-        bt = &bt_local;
-        BCOPY(bt_in, bt, sizeof(struct bt_info));
-        ms = machdep->machspec;
-	ur_rip = ur_rsp = 0;
-	stage = 0;
+	if (INVALID_MEMBER(vcpu_guest_context_user_regs) ||
+	    INVALID_MEMBER(cpu_user_regs_esp))
+		return NO_TASK;
+
+        offset = (off_t)xd->xc_core.header.xch_ctxt_offset +
+                (off_t)OFFSET(vcpu_guest_context_user_regs) +
+		(off_t)OFFSET(cpu_user_regs_rsp);
+
+        if (lseek(xd->xfd, offset, SEEK_SET) == -1)
+		return NO_TASK;
+
+        if (read(xd->xfd, &rsp, sizeof(ulong)) != sizeof(ulong))
+		return NO_TASK;
+
+        if (IS_KVADDR(rsp) && (task = stkptr_to_task(rsp))) {
+
+                for (i = 0; i < NR_CPUS; i++) {
+                	if (task == tt->active_set[i]) {
+                        	if (CRASHDEBUG(0))
+                                	error(INFO,
+                            "x86_64_xendump_panic_task: rsp: %lx -> task: %lx\n",
+                                        	rsp, task);
+                        	return task;
+			}
+		}               
 
-	panic_task = tt->panic_task == bt->task ? TRUE : FALSE;
+               	error(WARNING,
+		    "x86_64_xendump_panic_task: rsp: %lx -> task: %lx (not active)\n",
+			rsp);
+        }
 
-	if (panic_task && bt->machdep) {
-		user_regs = bt->machdep;
+	return NO_TASK;
+}
 
-		if (x86_64_eframe_verify(bt, 
-		    0,
-		    ULONG(user_regs + OFFSET(user_regs_struct_cs)),
-		    ULONG(user_regs + OFFSET(user_regs_struct_ss)),
-		    ULONG(user_regs + OFFSET(user_regs_struct_rip)),
-        	    ULONG(user_regs + OFFSET(user_regs_struct_rsp)),
-		    ULONG(user_regs + OFFSET(user_regs_struct_eflags)))) {
-			bt->stkptr = ULONG(user_regs + 
-				OFFSET(user_regs_struct_rsp));
-			if (x86_64_in_irqstack(bt)) {
-				ur_rip = ULONG(user_regs + 
-					OFFSET(user_regs_struct_rip));
-				ur_rsp = ULONG(user_regs + 
-					OFFSET(user_regs_struct_rsp));
-				goto skip_stage;
-			}
-		}
-	}
+/*
+ *  Because of an off-by-one vcpu bug in early xc_domain_dumpcore()
+ *  instantiations, the registers in the vcpu_guest_context are not 
+ *  necessarily those of the panic task.  Furthermore, the rsp is
+ *  seemingly unassociated with the task, presumably due a hypervisor
+ *  callback, so only accept the contents if they retfer to the panic
+ *  task's stack. 
+ */
+static void 
+x86_64_get_xendump_regs(struct xendump_data *xd, struct bt_info *bt, ulong *rip, ulong *rsp)
+{
+	ulong task, xrip, xrsp;
+	off_t offset;
+	struct syment *sp;
+	int cpu;
 
-	panic = FALSE;
+        if (INVALID_MEMBER(vcpu_guest_context_user_regs) ||
+            INVALID_MEMBER(cpu_user_regs_rip) ||
+            INVALID_MEMBER(cpu_user_regs_rsp))
+                goto generic;
+
+        offset = (off_t)xd->xc_core.header.xch_ctxt_offset +
+                (off_t)OFFSET(vcpu_guest_context_user_regs) +
+                (off_t)OFFSET(cpu_user_regs_rsp);
+        if (lseek(xd->xfd, offset, SEEK_SET) == -1)
+                goto generic;
+        if (read(xd->xfd, &xrsp, sizeof(ulong)) != sizeof(ulong))
+                goto generic;
+
+        offset = (off_t)xd->xc_core.header.xch_ctxt_offset +
+                (off_t)OFFSET(vcpu_guest_context_user_regs) +
+                (off_t)OFFSET(cpu_user_regs_rip);
+        if (lseek(xd->xfd, offset, SEEK_SET) == -1)
+                goto generic;
+        if (read(xd->xfd, &xrip, sizeof(ulong)) != sizeof(ulong))
+                goto generic;
 
 	/*
-	 *  Check the process stack first.
+	 *  This works -- comes from smp_send_stop call in panic.
+	 *  But xendump_panic_hook() will forestall this function 
+	 *  from being called (for now).
 	 */
-next_stack:
-        for (i = 0, up = (ulong *)bt->stackbuf; 
-	     i < (bt->stacktop - bt->stackbase)/sizeof(ulong); i++, up++) {
-                sym = closest_symbol(*up);
-
-                if (STREQ(sym, "netconsole_netdump") || 
-		    STREQ(sym, "netpoll_start_netdump") ||
-		    STREQ(sym, "start_disk_dump") ||
-		    STREQ(sym, "disk_dump") ||
-		    STREQ(sym, "try_crashdump")) {
-                        *rip = *up;
-                        *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf);
-                        return;
-                }
+        if (IS_KVADDR(xrsp) && (task = stkptr_to_task(xrsp)) &&
+	    (task == bt->task)) {
+		if (CRASHDEBUG(1))
+			fprintf(xd->ofp, 
+		"hooks from vcpu_guest_context: rip: %lx rsp: %lx\n", xrip, xrsp);
+		*rip = xrip;
+		*rsp = xrsp;
+		return;
+	}
 
-                if ((stage == 2) && 
-                    (STREQ(sym, "nmi_watchdog_tick") ||
-                     STREQ(sym, "default_do_nmi"))) {
-			sp = x86_64_function_called_by((*up)-5);
-			if (!sp || !STREQ(sp->name, "die_nmi")) 
-				continue;
-                        *rip = *up;
-                        *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf);
-			bt_in->flags |= BT_START;
-			*rip = symbol_value("die_nmi");
-			*rsp = (*rsp) - (7*sizeof(ulong));
-                        return;
-                }
+generic:
 
-                if (STREQ(sym, "panic")) {
-                        *rip = *up;
-                        *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf);
-                        panic = TRUE;
-                        continue;   /* keep looking for die */
-                }
+	machdep->get_stack_frame(bt, rip, rsp);
 
-                if (STREQ(sym, "die")) {
-                        *rip = *up;
-                        *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf);
-                        for (i++, up++; i < LONGS_PER_STACK; i++, up++) {
-                                sym = closest_symbol(*up);
-                                if (STREQ(sym, "sysrq_handle_crash"))
-                                        goto next_sysrq;
+	/*
+	 *  If this is an active task showing itself in schedule(), 
+	 *  then the thread_struct rsp is stale.  It has to be coming 
+	 *  from a callback via the interrupt stack.
+	 */
+	if (is_task_active(bt->task) && (symbol_value("thread_return") == *rip)) {
+		cpu = bt->tc->processor;
+		xrsp = machdep->machspec->stkinfo.ibase[cpu] + 
+			machdep->machspec->stkinfo.isize - sizeof(ulong);
+
+                while (readmem(xrsp, KVADDR, &xrip,
+                    sizeof(ulong), "xendump rsp", RETURN_ON_ERROR)) {
+        		if ((sp = value_search(xrip, (ulong *)&offset)) && 
+			    STREQ(sp->name, "smp_really_stop_cpu") && offset) {
+                                *rip = xrip;
+                                *rsp = xrsp;
+                                if (CRASHDEBUG(1))
+                                        error(INFO,
+                                            "switch thread_return to smp_call_function_interrupt\n");
+                                break;
                         }
-                        return;
+                        xrsp -= sizeof(ulong);
+                        if (xrsp <= machdep->machspec->stkinfo.ibase[cpu])
+                                break;
                 }
+	}
+}
 
-                if (STREQ(sym, "sysrq_handle_crash")) {
-next_sysrq:
-                        *rip = *up;
-                        *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf);
-                        machdep->flags |= SYSRQ;
-                        for (i++, up++; i < LONGS_PER_STACK; i++, up++) {
-                                sym = closest_symbol(*up);
-                                if (STREQ(sym, "sysrq_handle_crash"))
-                                        goto next_sysrq;
-                        }
-                        return;
-                }
+/* for XEN Hypervisor analysis */
 
-                if (!panic_task && (stage > 0) && 
-		    STREQ(sym, "smp_call_function_interrupt")) {
-                        *rip = *up;
-                        *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf);
-			return;
-                }
-	}
+static int 
+x86_64_is_kvaddr_hyper(ulong addr)
+{
+        return (addr >= HYPERVISOR_VIRT_START && addr < HYPERVISOR_VIRT_END); 
+}
 
-	if (panic) 
-		return;
+static ulong
+x86_64_get_stackbase_hyper(ulong task)
+{
+	struct xen_hyper_vcpu_context *vcc;
+	struct xen_hyper_pcpu_context *pcc;
+	ulong rsp0, base;
+
+	/* task means vcpu here */
+	vcc = xen_hyper_vcpu_to_vcpu_context(task);
+	if (!vcc)
+		error(FATAL, "invalid vcpu\n");
+
+	pcc = xen_hyper_id_to_pcpu_context(vcc->processor);
+	if (!pcc)
+		error(FATAL, "invalid pcpu number\n");
+
+	rsp0 = pcc->sp.rsp0;
+	base = rsp0 & (~(STACKSIZE() - 1));
+	return base;
+}
 
-skip_stage:
-	switch (stage) 
-	{
-	/*
-         *  Now check the processor's interrupt stack.
-         */
-	case 0:
-		bt->stackbase = ms->stkinfo.ibase[bt->tc->processor];
-		bt->stacktop = ms->stkinfo.ibase[bt->tc->processor] + 
-			ms->stkinfo.isize;
-		bt->stackbuf = ms->irqstack;
-		alter_stackbuf(bt);
-		stage = 1;
-		goto next_stack;
+static ulong
+x86_64_get_stacktop_hyper(ulong task)
+{
+	return x86_64_get_stackbase_hyper(task) + STACKSIZE();
+}
 
-        /*
-         *  Check the NMI exception stack.
-         */
-	case 1:
-		bt->stackbase = ms->stkinfo.ebase[bt->tc->processor][NMI_STACK];
-		bt->stacktop = ms->stkinfo.ebase[bt->tc->processor][NMI_STACK] +
-                       ms->stkinfo.esize;
-		bt->stackbuf = ms->irqstack;
-		alter_stackbuf(bt);
-		stage = 2;
-		goto next_stack;
+#define EXCEPTION_STACKSIZE_HYPER (1024UL)
 
-	case 2:
-		break;
-	}
+static ulong
+x86_64_in_exception_stack_hyper(ulong vcpu, ulong rsp)
+{
+	struct xen_hyper_vcpu_context *vcc;
+	struct xen_hyper_pcpu_context *pcc;
+	int i;
+	ulong stackbase;
 
-	/*
-	 *  We didn't find what we were looking for, so just use what was
-	 *  passed in from the ELF header.
-	 */
-	if (ur_rip && ur_rsp) {
-        	*rip = ur_rip;
-		*rsp = ur_rsp;
+	vcc = xen_hyper_vcpu_to_vcpu_context(vcpu);
+	if (!vcc)
+		error(FATAL, "invalid vcpu\n");
+
+	pcc = xen_hyper_id_to_pcpu_context(vcc->processor);
+	if (!pcc)
+		error(FATAL, "invalid pcpu number\n");
+
+	for (i = 0; i < XEN_HYPER_TSS_IST_MAX; i++) {
+		if (pcc->ist[i] == 0) {
+			continue;
+		}
+		stackbase = pcc->ist[i] - EXCEPTION_STACKSIZE_HYPER;
+		if ((rsp & ~(EXCEPTION_STACKSIZE_HYPER - 1)) == stackbase) {
+			return stackbase;
+		}
 	}
 
-        console("x86_64_get_dumpfile_stack_frame: cannot find anything useful\n");
+	return 0;
+}
 
-	bt->flags &= ~(ulonglong)BT_DUMPFILE_SEARCH;
+static void
+x86_64_get_stack_frame_hyper(struct bt_info *bt, ulong *pcp, ulong *spp)
+{
+	struct xen_hyper_vcpu_context *vcc;
+        int pcpu;
+        ulong *regs;
+	ulong rsp, rip;
+
+	/* task means vcpu here */
+	vcc = xen_hyper_vcpu_to_vcpu_context(bt->task);
+	if (!vcc)
+		error(FATAL, "invalid vcpu\n");
+
+	pcpu = vcc->processor;
+	if (!xen_hyper_test_pcpu_id(pcpu)) {
+		error(FATAL, "invalid pcpu number\n");
+	}
+
+	if (bt->flags & BT_TEXT_SYMBOLS_ALL) {
+		if (spp)
+			*spp = x86_64_get_stackbase_hyper(bt->task);
+		if (pcp)
+			*pcp = 0;
+		bt->flags &= ~BT_TEXT_SYMBOLS_ALL;
+		return;
+	}
 
-        machdep->get_stack_frame(bt, rip, rsp);
+	regs = (ulong *)xen_hyper_id_to_dumpinfo_context(pcpu)->pr_reg_ptr;
+	rsp = XEN_HYPER_X86_64_NOTE_RSP(regs);
+	rip = XEN_HYPER_X86_64_NOTE_RIP(regs);
+
+	if (spp) {
+		if (x86_64_in_exception_stack_hyper(bt->task, rsp))
+			*spp = rsp;
+		else if (rsp < x86_64_get_stackbase_hyper(bt->task) ||
+			rsp >= x86_64_get_stacktop_hyper(bt->task))
+			*spp = x86_64_get_stackbase_hyper(bt->task);
+		else
+			*spp = rsp;
+	}
+	if (pcp) {
+		if (is_kernel_text(rip))
+			*pcp = rip;
+		else
+			*pcp = 0;
+	}
 }
 
-/*
- *  Get the saved RSP from the task's thread_struct.
- */
-static ulong
-x86_64_get_sp(struct bt_info *bt)
+static int
+x86_64_print_stack_entry_hyper(struct bt_info *bt, FILE *ofp, int level, 
+	int stkindex, ulong text)
 {
-        ulong offset, rsp;
+	ulong rsp, offset;
+	struct syment *sp;
+	char *name;
+	int result; 
+	char buf[BUFSIZE];
 
-        if (tt->flags & THREAD_INFO) {
-                readmem(bt->task + OFFSET(task_struct_thread) +
-			OFFSET(thread_struct_rsp), KVADDR,
-                        &rsp, sizeof(void *),
-                        "thread_struct rsp", FAULT_ON_ERROR);
-                return rsp;
-        }
+	offset = 0;
+	sp = value_search(text, &offset);
+	if (!sp)
+		return BACKTRACE_ENTRY_IGNORED;
 
-        offset = OFFSET(task_struct_thread) + OFFSET(thread_struct_rsp); 
+	name = sp->name;
 
-        return GET_STACK_ULONG(offset);
-}
+	if (STREQ(name, "syscall_enter"))
+		result = BACKTRACE_COMPLETE;
+	else
+		result = BACKTRACE_ENTRY_DISPLAYED;
 
-/*
- *  Get the saved PC from the task's thread_struct if it exists;
- *  otherwise just use the "thread_return" label value.
- */
-static ulong
-x86_64_get_pc(struct bt_info *bt)
-{
-        ulong offset, rip;
+	rsp = bt->stackbase + (stkindex * sizeof(long));
 
-	if (INVALID_MEMBER(thread_struct_rip)) 
-		return symbol_value("thread_return");
+	if ((bt->flags & BT_FULL)) {
+		if (bt->frameptr) 
+			x86_64_display_full_frame(bt, rsp, ofp);
+		bt->frameptr = rsp + sizeof(ulong);
+	}
 
-        if (tt->flags & THREAD_INFO) {
-                readmem(bt->task + OFFSET(task_struct_thread) +
-                        OFFSET(thread_struct_rip), KVADDR,
-                        &rip, sizeof(void *),
-                        "thread_struct rip", FAULT_ON_ERROR);
-                return rip;
-        }
+        fprintf(ofp, "%s#%d [%8lx] %s at %lx\n", level < 10 ? " " : "", level,
+		rsp, name, text);
+
+        if (bt->flags & BT_LINE_NUMBERS) {
+                get_line_number(text, buf, FALSE);
+                if (strlen(buf))
+                        fprintf(ofp, "    %s\n", buf);
+	}
 
-        offset = OFFSET(task_struct_thread) + OFFSET(thread_struct_rip);
+	if (BT_REFERENCE_CHECK(bt))
+		x86_64_do_bt_reference_check(bt, text, name);
 
-        return GET_STACK_ULONG(offset);
+	return result;
 }
 
-
-/*
- *  Do the work for x86_64_get_sp() and x86_64_get_pc().
- */
 static void
-get_x86_64_frame(struct bt_info *bt, ulong *getpc, ulong *getsp)
-{
-	error(FATAL, "get_x86_64_frame: TBD\n");
-}
-
-/*
- *  Do the work for cmd_irq().
- */
-static void 
-x86_64_dump_irq(int irq)
+x86_64_print_eframe_regs_hyper(struct bt_info *bt)
 {
-        if (symbol_exists("irq_desc")) {
-                machdep->dump_irq = generic_dump_irq;
-                return(generic_dump_irq(irq));
-        }
+	ulong *up;
+	ulong offset;
+	struct syment *sp;
 
-        error(FATAL, "ia64_dump_irq: irq_desc[] does not exist?\n");
-}
 
-/* 
- *  Do the work for irq -d
- */
-void 
-x86_64_display_idt_table(void)
-{
-	int i;
-	char *idt_table_buf;
-	char buf[BUFSIZE];
-	ulong *ip;
+	up = (ulong *)(&bt->stackbuf[bt->stacktop - bt->stackbase]);
+	up -= 21;
 
-	idt_table_buf = GETBUF(SIZE(gate_struct) * 256);
-        readmem(symbol_value("idt_table"), KVADDR, idt_table_buf, 
-		SIZE(gate_struct) * 256, "idt_table", FAULT_ON_ERROR);
-	ip = (ulong *)idt_table_buf;
+	fprintf(fp, "    [exception RIP: ");
+	if ((sp = value_search(up[16], &offset))) {
+               	fprintf(fp, "%s", sp->name);
+              	if (offset)
+               		fprintf(fp, (output_radix == 16) ? 
+					"+0x%lx" : "+%ld", offset);
+	} else
+       		fprintf(fp, "unknown or invalid address");
+	fprintf(fp, "]\n");
 
-	for (i = 0; i < 256; i++, ip += 2) {
-                if (i < 10)
-                        fprintf(fp, "  ");
-                else if (i < 100)
-                        fprintf(fp, " ");
-                fprintf(fp, "[%d] %s\n",
-                        i, x86_64_extract_idt_function(ip, buf, NULL));
-	}
+	fprintf(fp, "    RIP: %016lx  RSP: %016lx  RFLAGS: %08lx\n", 
+		up[16], up[19], up[18]);
+	fprintf(fp, "    RAX: %016lx  RBX: %016lx  RCX: %016lx\n", 
+		up[10], up[5], up[11]);
+	fprintf(fp, "    RDX: %016lx  RSI: %016lx  RDI: %016lx\n", 
+ 		up[11], up[13], up[14]);
+	fprintf(fp, "    RBP: %016lx   R8: %016lx   R9: %016lx\n", 
+		up[4], up[9], up[8]);
+	fprintf(fp, "    R10: %016lx  R11: %016lx  R12: %016lx\n", 
+		up[7], up[6], up[3]);
+	fprintf(fp, "    R13: %016lx  R14: %016lx  R15: %016lx\n", 
+		up[2], up[1], up[0]);
+	fprintf(fp, "    ORIG_RAX: %016lx  CS: %04lx  SS: %04lx\n", 
+		up[15], up[17], up[20]);
 
-	FREEBUF(idt_table_buf);
+	fprintf(fp, "--- <exception stack> ---\n");
 }
 
 /*
- *  Extract the function name out of the IDT entry.
+ *  simple back tracer for xen hypervisor
+ *  irq stack does not exist. so relative easy.
  */
-static char *
-x86_64_extract_idt_function(ulong *ip, char *buf, ulong *retaddr)
+static void
+x86_64_simple_back_trace_cmd_hyper(struct bt_info *bt_in)
 {
-	ulong i1, i2, addr;
-	char locbuf[BUFSIZE];
-	physaddr_t phys;
+	int i, level, done;
+	ulong rsp, estack, stacktop;
+	ulong *up;
+	FILE *ofp;
+	struct bt_info bt_local, *bt;
+	char ebuf[EXCEPTION_STACKSIZE_HYPER];
 
-	if (buf)
-		BZERO(buf, BUFSIZE);
+	bt = &bt_local;
+	BCOPY(bt_in, bt, sizeof(struct bt_info));
 
-	i1 = *ip;
-	i2 = *(ip+1);
+	if (bt->flags & BT_FRAMESIZE_DEBUG) {
+		error(INFO, "-F not support\n");
+		return;
+	}
 
-	i2 <<= 32;
-	addr = i2 & 0xffffffff00000000;
-	addr |= (i1 & 0xffff);
-	i1 >>= 32;
-	addr |= (i1 & 0xffff0000);
+	level = 0;
+	done = FALSE;
+	bt->call_target = NULL;
+	rsp = bt->stkptr;
+	if (!rsp) {
+		error(INFO, "cannot determine starting stack pointer\n");
+		return;
+	}
+	if (BT_REFERENCE_CHECK(bt))
+		ofp = pc->nullfp;
+	else
+		ofp = fp;
 
-	if (retaddr)
-		*retaddr = addr;
+	while ((estack = x86_64_in_exception_stack_hyper(bt->task, rsp))) {
+		bt->flags |= BT_EXCEPTION_STACK;
+		bt->stackbase = estack;
+		bt->stacktop = estack + EXCEPTION_STACKSIZE_HYPER;
+		bt->stackbuf = ebuf;
 
-	if (!buf)
-		return NULL;
+		if (!readmem(bt->stackbase, KVADDR, bt->stackbuf,
+		    bt->stacktop - bt->stackbase, "exception stack contents",
+		    RETURN_ON_ERROR))
+			error(FATAL, "read of exception stack at %lx failed\n",
+				bt->stackbase);
 
-	value_to_symstr(addr, locbuf, 0);
-	if (strlen(locbuf))
-		sprintf(buf, locbuf);
-	else {
-		sprintf(buf, "%016lx", addr);
-		if (kvtop(NULL, addr, &phys, 0)) {
-			addr = machdep->kvbase + (ulong)phys;
-			if (value_to_symstr(addr, locbuf, 0)) {
-				strcat(buf, "  <");
-				strcat(buf, locbuf);
-				strcat(buf, ">");
-			}
-		}
-	}
+		stacktop = bt->stacktop - 168;
 
-	return buf;
-}
+        	for (i = (rsp - bt->stackbase)/sizeof(ulong);
+		     !done && (rsp < stacktop); i++, rsp += sizeof(ulong)) {
+	
+			up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]);
 
-/*
- *  Filter disassembly output if the output radix is not gdb's default 10
- */
-static int 
-x86_64_dis_filter(ulong vaddr, char *inbuf)
-{
-        char buf1[BUFSIZE];
-        char buf2[BUFSIZE];
-        char *colon, *p1;
-        int argc;
-        char *argv[MAXARGS];
-        ulong value;
+			if (!is_kernel_text(*up))
+				continue;
 
-	if (!inbuf) 
-		return TRUE;
-/*
- *  For some reason gdb can go off into the weeds translating text addresses,
- *  (on alpha -- not necessarily seen on x86_64) so this routine both fixes the 
- *  references as well as imposing the current output radix on the translations.
- */
-	console("IN: %s", inbuf);
+			switch (x86_64_print_stack_entry_hyper(bt, ofp, level, i,*up))
+			{
+			case BACKTRACE_ENTRY_DISPLAYED:
+				level++;
+				break;
+			case BACKTRACE_ENTRY_IGNORED:	
+				break;
+			case BACKTRACE_COMPLETE:
+				done = TRUE;
+				break;
+			}
+        	}
 
-	colon = strstr(inbuf, ":");
+		if (!BT_REFERENCE_CHECK(bt))
+			x86_64_print_eframe_regs_hyper(bt);
 
-	if (colon) {
-		sprintf(buf1, "0x%lx <%s>", vaddr,
-			value_to_symstr(vaddr, buf2, pc->output_radix));
-		sprintf(buf2, "%s%s", buf1, colon);
-		strcpy(inbuf, buf2);
+		up = (ulong *)(&bt->stackbuf[bt->stacktop - bt->stackbase]);
+		up -= 2;
+		rsp = bt->stkptr = *up;
+		up -= 3;
+		bt->instptr = *up;
+		done = FALSE;
+		bt->frameptr = 0;
 	}
 
-	strcpy(buf1, inbuf);
-	argc = parse_line(buf1, argv);
+	if (bt->flags & BT_EXCEPTION_STACK) {
+		bt->flags &= ~BT_EXCEPTION_STACK;
+		bt->stackbase = bt_in->stackbase;
+		bt->stacktop = bt_in->stacktop;
+		bt->stackbuf = bt_in->stackbuf;
+	}
 
-	if ((FIRSTCHAR(argv[argc-1]) == '<') && 
-	    (LASTCHAR(argv[argc-1]) == '>')) {
-		p1 = rindex(inbuf, '<');
-		while ((p1 > inbuf) && !STRNEQ(p1, " 0x")) 
-			p1--;
+        for (i = (rsp - bt->stackbase)/sizeof(ulong);
+	     !done && (rsp < bt->stacktop); i++, rsp += sizeof(ulong)) {
 
-		if (!STRNEQ(p1, " 0x"))
-			return FALSE;
-		p1++;
+		up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]);
 
-		if (!extract_hex(p1, &value, NULLCHAR, TRUE))
-			return FALSE;
+		if (!is_kernel_text(*up))
+			continue;
 
-		sprintf(buf1, "0x%lx <%s>\n", value,	
-			value_to_symstr(value, buf2, pc->output_radix));
+		switch (x86_64_print_stack_entry_hyper(bt, ofp, level, i,*up))
+		{
+		case BACKTRACE_ENTRY_DISPLAYED:
+			level++;
+			break;
+		case BACKTRACE_ENTRY_IGNORED:	
+			break;
+		case BACKTRACE_COMPLETE:
+			done = TRUE;
+			break;
+		}
+        }
+}
 
-		sprintf(p1, buf1);
-	
-        } else if (STREQ(argv[argc-2], "callq") &&
-            hexadecimal(argv[argc-1], 0)) {
-            	/*
-             	 *  Update module code of the form:
-             	 *
-             	 *    callq  0xffffffffa0017aa0
-	      	 *
-             	 *  to show a bracketed direct call target.
-             	 */
-                p1 = &LASTCHAR(inbuf);
+static void
+x86_64_init_hyper(int when)
+{
+	switch (when)
+	{
+	case PRE_SYMTAB:
+		machdep->verify_symbol = x86_64_verify_symbol;
+                machdep->machspec = &x86_64_machine_specific;
+                if (pc->flags & KERNEL_DEBUG_QUERY)
+                        return;
+                machdep->pagesize = memory_page_size();
+                machdep->pageshift = ffs(machdep->pagesize) - 1;
+                machdep->pageoffset = machdep->pagesize - 1;
+                machdep->pagemask = ~((ulonglong)machdep->pageoffset);
+		machdep->stacksize = machdep->pagesize * 2;
+                if ((machdep->machspec->upml = (char *)malloc(PAGESIZE())) == NULL)
+                        error(FATAL, "cannot malloc upml space.");
+                if ((machdep->pgd = (char *)malloc(PAGESIZE())) == NULL)
+                        error(FATAL, "cannot malloc pgd space.");
+                if ((machdep->pmd = (char *)malloc(PAGESIZE())) == NULL)
+                        error(FATAL, "cannot malloc pmd space.");
+                if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL)
+                        error(FATAL, "cannot malloc ptbl space.");
+		if ((machdep->machspec->pml4 = 
+			(char *)malloc(PAGESIZE()*2)) == NULL)
+                        error(FATAL, "cannot malloc pml4 space.");
+                machdep->machspec->last_upml_read = 0;
+                machdep->machspec->last_pml4_read = 0;
+                machdep->last_pgd_read = 0;
+                machdep->last_pmd_read = 0;
+                machdep->last_ptbl_read = 0;
+		machdep->verify_paddr = generic_verify_paddr;
+		machdep->ptrs_per_pgd = PTRS_PER_PGD;
+                if (machdep->cmdline_arg)
+                        parse_cmdline_arg();
+		break;
 
-                if (extract_hex(argv[argc-1], &value, NULLCHAR, TRUE)) {
-                        sprintf(buf1, " <%s>\n",
-                                value_to_symstr(value, buf2,
-                                pc->output_radix));
-                        if (IS_MODULE_VADDR(value) &&
-                            !strstr(buf2, "+"))
-                                sprintf(p1, buf1);
-                }
-        }
+	case PRE_GDB:
+                machdep->machspec->page_offset = PAGE_OFFSET_XEN_HYPER;
+	        machdep->kvbase = (ulong)HYPERVISOR_VIRT_START;
+		machdep->identity_map_base = (ulong)PAGE_OFFSET_XEN_HYPER;
+                machdep->is_kvaddr = x86_64_is_kvaddr_hyper;
+                machdep->is_uvaddr = x86_64_is_uvaddr;
+	        machdep->eframe_search = x86_64_eframe_search;
+	        machdep->back_trace = x86_64_simple_back_trace_cmd_hyper;
+	        machdep->processor_speed = x86_64_processor_speed;
+	        machdep->kvtop = x86_64_kvtop;
+	        machdep->get_task_pgd = x86_64_get_task_pgd;
+		machdep->get_stack_frame = x86_64_get_stack_frame_hyper;
+		machdep->get_stackbase = x86_64_get_stackbase_hyper;
+		machdep->get_stacktop = x86_64_get_stacktop_hyper;
+		machdep->translate_pte = x86_64_translate_pte;
+		machdep->memory_size = xen_hyper_x86_memory_size;	/* KAK add */
+		machdep->is_task_addr = x86_64_is_task_addr;
+		machdep->dis_filter = x86_64_dis_filter;
+		machdep->cmd_mach = x86_64_cmd_mach;
+		machdep->get_smp_cpus = xen_hyper_x86_get_smp_cpus;	/* KAK add */
+		machdep->line_number_hooks = x86_64_line_number_hooks;
+		machdep->value_to_symbol = generic_machdep_value_to_symbol;
+		machdep->init_kernel_pgd = x86_64_init_kernel_pgd;
+		machdep->clear_machdep_cache = x86_64_clear_machdep_cache;
 
-	console("    %s", inbuf);
+		/* machdep table for Xen Hypervisor */
+		xhmachdep->pcpu_init = xen_hyper_x86_pcpu_init;
+		break;
+
+	case POST_GDB:
+		XEN_HYPER_STRUCT_SIZE_INIT(cpuinfo_x86, "cpuinfo_x86");
+		XEN_HYPER_STRUCT_SIZE_INIT(tss_struct, "tss_struct");
+		XEN_HYPER_ASSIGN_OFFSET(tss_struct_rsp0) = MEMBER_OFFSET("tss_struct", "__blh") + sizeof(short unsigned int);
+		XEN_HYPER_MEMBER_OFFSET_INIT(tss_struct_ist, "tss_struct", "ist");
+		if (symbol_exists("cpu_data")) {
+			xht->cpu_data_address = symbol_value("cpu_data");
+		}
+/* KAK Can this be calculated? */
+		if (!machdep->hz) {
+			machdep->hz = XEN_HYPER_HZ;
+		}
+		break;
 
-	return TRUE;
+	case POST_INIT:
+		break;
+	}
 }
 
 
-/*
- *   Override smp_num_cpus if possible and necessary.
- */
-int
-x86_64_get_smp_cpus(void)
-{
-	int i, cpus, nr_pda, cpunumber;
-	char *cpu_pda_buf;
-	ulong level4_pgt;
+struct framesize_cache {
+        ulong textaddr;
+        int framesize;
+};
 
-	if (!VALID_STRUCT(x8664_pda))
-		return 1;
+static struct framesize_cache *x86_64_framesize_cache = NULL;
+static int framesize_cache_entries = 0;
 
-	cpu_pda_buf = GETBUF(SIZE(x8664_pda));
+#define FRAMESIZE_QUERY  (1)
+#define FRAMESIZE_ENTER  (2)
+#define FRAMESIZE_DUMP   (3)
 
-	if (!(nr_pda = get_array_length("cpu_pda", NULL, 0)))
-               nr_pda = NR_CPUS;
+#define FRAMESIZE_CACHE_INCR (50)
 
-	for (i = cpus = 0; i < nr_pda; i++) {
-		if (!CPU_PDA_READ(i, cpu_pda_buf))
-			break;
-		level4_pgt = ULONG(cpu_pda_buf + OFFSET(x8664_pda_level4_pgt));
-		cpunumber = INT(cpu_pda_buf + OFFSET(x8664_pda_cpunumber));
-                if (!VALID_LEVEL4_PGT_ADDR(level4_pgt) || (cpunumber != cpus))
-                        break;
-                cpus++;
-	}
+static int
+x86_64_framesize_cache_resize(void)
+{
+	int i;
+	struct framesize_cache *new_fc, *fc;
 
-	FREEBUF(cpu_pda_buf);
+	if ((new_fc = realloc(x86_64_framesize_cache, 
+		    (framesize_cache_entries+FRAMESIZE_CACHE_INCR) * 
+		    sizeof(struct framesize_cache))) == NULL) {
+			error(INFO, "cannot realloc x86_64_framesize_cache space!\n");
+			return FALSE;
+	} 
 
-	return cpus;
+	fc = new_fc + framesize_cache_entries;
+	for (i = framesize_cache_entries; 
+	     i < (framesize_cache_entries+FRAMESIZE_CACHE_INCR); 
+	     fc++, i++) {
+		fc->textaddr = 0;
+		fc->framesize = 0;
+	} 	
+
+	x86_64_framesize_cache = new_fc;
+	framesize_cache_entries += FRAMESIZE_CACHE_INCR;
+
+	return TRUE;
 }
 
-/*
- *  Machine dependent command.
- */
-void
-x86_64_cmd_mach(void)
+static int
+x86_64_framesize_cache_func(int cmd, ulong textaddr, int *framesize)
 {
-        int c;
+	int i;
+	struct framesize_cache *fc;
+	char buf[BUFSIZE];
 
-        while ((c = getopt(argcnt, args, "cm")) != EOF) {
-                switch(c)
-                {
-                case 'c':
-                        x86_64_display_cpu_data();
-                        return;
+	if (!x86_64_framesize_cache) {
+		framesize_cache_entries = FRAMESIZE_CACHE_INCR;
+		if ((x86_64_framesize_cache = calloc(framesize_cache_entries,
+		    sizeof(struct framesize_cache))) == NULL)
+			error(FATAL, 
+			    "cannot calloc x86_64_framesize_cache space!\n");
+	}
 
-                case 'm':
-                        x86_64_display_memmap();
-                        return;
+	switch (cmd) 
+	{
+	case FRAMESIZE_QUERY:
+		fc = &x86_64_framesize_cache[0];
+		for (i = 0; i < framesize_cache_entries; i++, fc++) {
+			if (fc->textaddr == textaddr) {
+				*framesize = fc->framesize;
+				return TRUE;
+			}
+		}
+		return FALSE;
 
-                default:
-                        argerrs++;
-                        break;
-                }
-        }
+	case FRAMESIZE_ENTER:
+retry:
+		fc = &x86_64_framesize_cache[0];
+		for (i = 0; i < framesize_cache_entries; i++, fc++) {
+			if ((fc->textaddr == 0) ||
+			    (fc->textaddr == textaddr)) {
+				fc->textaddr = textaddr;
+				fc->framesize = *framesize;
+				return fc->framesize;
+			}
+		}
 
-        if (argerrs)
-                cmd_usage(pc->curcmd, SYNOPSIS);
+		if (x86_64_framesize_cache_resize())
+			goto retry;
 
-        x86_64_display_machine_stats();
-}
+		return *framesize;
 
-/*
- *  "mach" command output.
- */
-static void
-x86_64_display_machine_stats(void)
-{
-        struct new_utsname *uts;
-        char buf[BUFSIZE];
-        ulong mhz;
+	case FRAMESIZE_DUMP:
+		fc = &x86_64_framesize_cache[0];
+		for (i = 0; i < framesize_cache_entries; i++, fc++) {
+			if (fc->textaddr == 0) {
+				if (i < (framesize_cache_entries-1)) {
+					fprintf(fp, "[%d-%d]: (unused)\n",
+						i, framesize_cache_entries-1);
+				}
+				break;
+			}
 
-        uts = &kt->utsname;
+			fprintf(fp, "[%3d]: %lx %3d (%s)\n", i,
+				fc->textaddr, fc->framesize,
+				value_to_symstr(fc->textaddr, buf, 0));
+		}
+		break;
+	}
 
-        fprintf(fp, "       MACHINE TYPE: %s\n", uts->machine);
-        fprintf(fp, "        MEMORY SIZE: %s\n", get_memory_size(buf));
-        fprintf(fp, "               CPUS: %d\n", kt->cpus);
-        fprintf(fp, "    PROCESSOR SPEED: ");
-        if ((mhz = machdep->processor_speed()))
-                fprintf(fp, "%ld Mhz\n", mhz);
-        else
-                fprintf(fp, "(unknown)\n");
-        fprintf(fp, "                 HZ: %d\n", machdep->hz);
-        fprintf(fp, "          PAGE SIZE: %d\n", PAGESIZE());
-        fprintf(fp, "      L1 CACHE SIZE: %d\n", l1_cache_size());
-        fprintf(fp, "KERNEL VIRTUAL BASE: %lx\n", machdep->kvbase);
-        fprintf(fp, "KERNEL VMALLOC BASE: %lx\n", vt->vmalloc_start);
-	fprintf(fp, "   KERNEL START MAP: %lx\n", __START_KERNEL_map);
-        fprintf(fp, "KERNEL MODULES BASE: %lx\n", MODULES_VADDR);
-        fprintf(fp, "  KERNEL STACK SIZE: %ld\n", STACKSIZE());
+	return TRUE;
 }
 
-/*
- *  "mach -c" 
- */
-static void 
-x86_64_display_cpu_data(void)
+#define BT_FRAMESIZE_IGNORE_MASK \
+	(BT_OLD_BACK_TRACE|BT_TEXT_SYMBOLS|BT_TEXT_SYMBOLS_ALL|BT_FRAMESIZE_DISABLE)
+ 
+static int
+x86_64_get_framesize(struct bt_info *bt, ulong textaddr)
 {
-        int cpu, cpus, boot_cpu;
-        ulong cpu_data;
-	ulong cpu_pda;
-
-	if (symbol_exists("cpu_data")) {
-        	cpu_data = symbol_value("cpu_data");
-		cpus = kt->cpus;
-		boot_cpu = FALSE;
-	} else if (symbol_exists("boot_cpu_data")) {
-        	cpu_data = symbol_value("boot_cpu_data");
-		boot_cpu = TRUE;
-		cpus = 1;
+	int c, framesize, instr, arg;
+	struct syment *sp;
+	long max_instructions;
+	ulong offset;
+	char buf[BUFSIZE];
+	char buf2[BUFSIZE];
+	char *arglist[MAXARGS];
+	ulong locking_func, textaddr_save, current;
+	char *p1, *p2;
+	int reterror;
+
+	if (!(bt->flags & BT_FRAMESIZE_DEBUG)) {
+		if ((bt->flags & BT_FRAMESIZE_IGNORE_MASK) ||
+		    (kt->flags & USE_OLD_BT))
+			return 0;
+	}
+
+        if (!(sp = value_search(textaddr, &offset))) {
+		if (!(bt->flags & BT_FRAMESIZE_DEBUG))
+			bt->flags |= BT_FRAMESIZE_DISABLE;
+                return 0;
+	}
+
+	if (!(bt->flags & BT_FRAMESIZE_DEBUG) &&
+	    x86_64_framesize_cache_func(FRAMESIZE_QUERY, textaddr, &framesize)) {
+		if (framesize == -1)
+			bt->flags |= BT_FRAMESIZE_DISABLE;
+		return framesize; 
 	}
-	cpu_pda = symbol_value("cpu_pda");
-
-        for (cpu = 0; cpu < cpus; cpu++) {
-		if (boot_cpu)
-                	fprintf(fp, "BOOT CPU:\n");
-		else
-                	fprintf(fp, "%sCPU %d:\n", cpu ? "\n" : "", cpu);
 
-                dump_struct("cpuinfo_x86", cpu_data, 0);
-		fprintf(fp, "\n");
-		dump_struct("x8664_pda", cpu_pda, 0);
-
-                cpu_data += SIZE(cpuinfo_x86);
-		cpu_pda += SIZE(x8664_pda);
-        }
-}
+	/*
+	 *  Bait and switch an incoming .text.lock address
+	 *  with the containing function's address.
+	 */
+	if (STRNEQ(sp->name, ".text.lock.") &&
+	    (locking_func = text_lock_function(sp->name, bt, textaddr))) {
+        	if (!(sp = value_search(locking_func, &offset))) {
+			bt->flags |= BT_FRAMESIZE_DISABLE;
+			return 0;
+		}
+		textaddr_save = textaddr;
+		textaddr = locking_func;
+	} else
+		textaddr_save = 0;
 
-/*
- *  "mach -m"
- */
-static char *e820type[] = {
-        "(invalid type)",
-        "E820_RAM",
-        "E820_RESERVED",
-        "E820_ACPI",
-        "E820_NVS",
-};
+	framesize = 0;
+        max_instructions = textaddr - sp->value; 
+	instr = arg = -1;
 
-static void
-x86_64_display_memmap(void)
-{
-        ulong e820;
-        int nr_map, i;
-        char *buf, *e820entry_ptr;
-        ulonglong addr, size;
-        uint type;
+        open_tmpfile2();
 
-        e820 = symbol_value("e820");
-	if (CRASHDEBUG(1))
-		dump_struct("e820map", e820, RADIX(16));
-        buf = (char *)GETBUF(SIZE(e820map));
+        sprintf(buf, "x/%ldi 0x%lx",
+                max_instructions, sp->value);
 
-        readmem(e820, KVADDR, &buf[0], SIZE(e820map),
-                "e820map", FAULT_ON_ERROR);
+        if (!gdb_pass_through(buf, pc->tmpfile2, GNU_RETURN_ON_ERROR)) {
+        	close_tmpfile2();
+		bt->flags |= BT_FRAMESIZE_DISABLE;
+                return 0;
+	}
 
-        nr_map = INT(buf + OFFSET(e820map_nr_map));
+        rewind(pc->tmpfile2);
+        while (fgets(buf, BUFSIZE, pc->tmpfile2)) {
+		strcpy(buf2, buf);
 
-        fprintf(fp, "      PHYSICAL ADDRESS RANGE         TYPE\n");
+		if (CRASHDEBUG(3))
+			fprintf(pc->saved_fp, buf2);
 
-        for (i = 0; i < nr_map; i++) {
-                e820entry_ptr = buf + sizeof(int) + (SIZE(e820entry) * i);
-                addr = ULONGLONG(e820entry_ptr + OFFSET(e820entry_addr));
-                size = ULONGLONG(e820entry_ptr + OFFSET(e820entry_size));
-                type = UINT(e820entry_ptr + OFFSET(e820entry_type));
-                fprintf(fp, "%016llx - %016llx  %s\n", addr, addr+size,
-			e820type[type]);
-        }
-}
+		c = parse_line(buf, arglist);
 
+		if (instr == -1) {
+			/*
+			 *  Check whether <function+offset> are 
+			 *  in the output string.
+			 */
+			if (LASTCHAR(arglist[0]) == ':') {
+				instr = 1;
+				arg = 2;
+			} else { 
+				instr = 2;
+				arg = 3;
+			}
+		}
 
-static const char *hook_files[] = {
-        "arch/x86_64/kernel/entry.S",
-        "arch/x86_64/kernel/head.S",
-        "arch/x86_64/kernel/semaphore.c"
-};
+		if (c < (arg+1))
+			continue;
 
-#define ENTRY_S      ((char **)&hook_files[0])
-#define HEAD_S       ((char **)&hook_files[1])
-#define SEMAPHORE_C  ((char **)&hook_files[2])
+		reterror = 0;
+		current =  htol(strip_ending_char(arglist[0], ':'), 
+			RETURN_ON_ERROR, &reterror);
+		if (reterror)
+			continue;
+		if (current >= textaddr)
+			break;
 
-static struct line_number_hook x86_64_line_number_hooks[] = {
-	{"ret_from_fork", ENTRY_S},
-	{"system_call", ENTRY_S},
-	{"int_ret_from_sys_call", ENTRY_S},
-	{"ptregscall_common", ENTRY_S},
-	{"stub_execve", ENTRY_S},
-	{"stub_rt_sigreturn", ENTRY_S},
-	{"common_interrupt", ENTRY_S},
-	{"ret_from_intr", ENTRY_S},
-	{"load_gs_index", ENTRY_S},
-	{"arch_kernel_thread", ENTRY_S},
-	{"execve", ENTRY_S},
-	{"page_fault", ENTRY_S},
-	{"coprocessor_error", ENTRY_S},
-	{"simd_coprocessor_error", ENTRY_S},
-	{"device_not_available", ENTRY_S},
-	{"debug", ENTRY_S},
-	{"nmi", ENTRY_S},
-	{"int3", ENTRY_S},
-	{"overflow", ENTRY_S},
-	{"bounds", ENTRY_S},
-	{"invalid_op", ENTRY_S},
-	{"coprocessor_segment_overrun", ENTRY_S},
-	{"reserved", ENTRY_S},
-	{"double_fault", ENTRY_S},
-	{"invalid_TSS", ENTRY_S},
-	{"segment_not_present", ENTRY_S},
-	{"stack_segment", ENTRY_S},
-	{"general_protection", ENTRY_S},
-	{"alignment_check", ENTRY_S},
-	{"divide_error", ENTRY_S},
-	{"spurious_interrupt_bug", ENTRY_S},
-	{"machine_check", ENTRY_S},
-	{"call_debug", ENTRY_S},
+		if (STRNEQ(arglist[instr], "push")) {
+			framesize += 8;
+			if (CRASHDEBUG(2) || (bt->flags & BT_FRAMESIZE_DEBUG))
+				fprintf(pc->saved_fp, "%s\t[framesize: %d]\n", 
+					strip_linefeeds(buf2), framesize);
+	 	} else if (STRNEQ(arglist[instr], "pop")) {
+			framesize -= 8;
+			if (CRASHDEBUG(2) || (bt->flags & BT_FRAMESIZE_DEBUG))
+				fprintf(pc->saved_fp, "%s\t[framesize: %d]\n", 
+					strip_linefeeds(buf2), framesize);
+		} else if (STRNEQ(arglist[instr], "add") && 
+			(p1 = strstr(arglist[arg], ",%rsp"))) {
+			*p1 = NULLCHAR;
+			p2 = arglist[arg];
+			reterror = 0;
+			offset =  htol(p2+1, RETURN_ON_ERROR, &reterror);
+			if (reterror)
+				continue;
+			framesize -= offset;
+			if (CRASHDEBUG(2) || (bt->flags & BT_FRAMESIZE_DEBUG))
+				fprintf(pc->saved_fp, "%s\t[framesize: %d]\n", 
+					strip_linefeeds(buf2), framesize);
+		} else if (STRNEQ(arglist[instr], "sub") && 
+			(p1 = strstr(arglist[arg], ",%rsp"))) {
+			*p1 = NULLCHAR;
+			p2 = arglist[arg];
+			reterror = 0;
+			offset =  htol(p2+1, RETURN_ON_ERROR, &reterror);
+			if (reterror)
+				continue;
+			framesize += offset;
+			if (CRASHDEBUG(2) || (bt->flags & BT_FRAMESIZE_DEBUG))
+				fprintf(pc->saved_fp, "%s\t[framesize: %d]\n", 
+					strip_linefeeds(buf2), framesize);
+		} else if (STRNEQ(arglist[instr], "retq")) {
+			bt->flags |= BT_FRAMESIZE_DISABLE;
+			framesize = -1;
+			if (CRASHDEBUG(2) || (bt->flags & BT_FRAMESIZE_DEBUG))
+				fprintf(pc->saved_fp, "%s\t[framesize: DISABLED]\n", 
+					strip_linefeeds(buf2));
+			break;
+		}
+        }
+        close_tmpfile2();
 
-	{NULL, NULL}    /* list must be NULL-terminated */
-};
+	if (textaddr_save)
+		textaddr = textaddr_save;
 
-static void
-x86_64_dump_line_number(ulong callpc)
-{
-	error(FATAL, "x86_64_dump_line_number: TBD\n");
+	return (x86_64_framesize_cache_func(FRAMESIZE_ENTER, textaddr, &framesize));
 }
 
-void
-x86_64_compiler_warning_stub(void)
+static void 
+x86_64_framesize_debug(struct bt_info *bt)
 {
-        struct line_number_hook *lhp;
-        char **p;
+	int framesize;
 
-        lhp = &x86_64_line_number_hooks[0]; lhp++;
-        p = ENTRY_S;
-	x86_64_back_trace(NULL, NULL);
-	get_x86_64_frame(NULL, NULL, NULL);
-	x86_64_dump_line_number(0);
-}
+	switch (bt->hp->esp) 
+	{
+	case 1: /* "dump" */
+		if (bt->hp->eip) {
+			framesize = 1;
+			x86_64_framesize_cache_func(FRAMESIZE_ENTER, bt->hp->eip, 
+				&framesize);
+		} else
+			x86_64_framesize_cache_func(FRAMESIZE_DUMP, 0, NULL);
+		break;
+
+	case 0:
+		if (bt->hp->eip) {
+			framesize = 0;
+			x86_64_framesize_cache_func(FRAMESIZE_ENTER, bt->hp->eip, 
+				&framesize);
+		} else  /* "clear" */
+			BZERO(&x86_64_framesize_cache[0], 
+			    sizeof(struct framesize_cache)*framesize_cache_entries);
+		break;
+
+	case -1:
+		if (!bt->hp->eip)
+			error(INFO, "x86_64_framesize_debug: ignoring command\n");
+		else
+			x86_64_get_framesize(bt, bt->hp->eip);
+		break;
 
+	default:
+		if (bt->hp->esp > 1) {
+			framesize = bt->hp->esp;
+			if (bt->hp->eip)
+				x86_64_framesize_cache_func(FRAMESIZE_ENTER, bt->hp->eip, 
+					&framesize);
+		} else
+			error(INFO, "x86_64_framesize_debug: ignoring command\n");
+		break;
+	}
+}
 #endif  /* X86_64 */ 
--- crash/diskdump.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/diskdump.c	2009-01-15 16:17:17.000000000 -0500
@@ -1,16 +1,16 @@
 /* 
  * diskdump.c 
  * 
- * NOTE: The Red Hat diskdump module currently creates
- *       vmcore dumpfiles that are identical to those made
- *       by the Red Hat netdump module, and therefore the 
- *       dumpfile is recognized as such.  But just in case 
- *       there's ever a divergence, this file is being kept
- *       in place, along with the DISKDUMP-related #define's
- *       and their usage throughout the crash sources.
+ * The diskdump module optionally creates either ELF vmcore 
+ * dumpfiles, or compressed dumpfiles derived from the LKCD format.
+ * In the case of ELF vmcore files, since they are identical to 
+ * netdump dumpfiles, the facilities in netdump.c are used.  For
+ * compressed dumpfiles, the facilities in this file are used.
  *
- * Copyright (C) 2004, 2005 David Anderson
- * Copyright (C) 2004, 2005 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2004, 2005, 2006, 2007, 2008 David Anderson
+ * Copyright (C) 2004, 2005, 2006, 2007, 2008 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2005  FUJITSU LIMITED
+ * Copyright (C) 2005  NEC Corporation
  *
  * This software may be freely redistributed under the terms of the
  * GNU General Public License.
@@ -18,22 +18,295 @@
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * Author: David Anderson
  */
 
 #include "defs.h"
 #include "diskdump.h"
 
+#define BITMAP_SECT_LEN	4096
+
 struct diskdump_data {
 	ulong flags;       /* DISKDUMP_LOCAL, plus anything else... */
         int dfd;           /* dumpfile file descriptor */
         FILE *ofp;         /* fprintf(dd->ofp, "xxx"); */
 	int machine_type;  /* machine type identifier */
+
+	/* header */
+	struct disk_dump_header		*header;
+	struct disk_dump_sub_header	*sub_header;
+	struct kdump_sub_header		*sub_header_kdump;
+
+	size_t	data_offset;
+	int	block_size;
+	int	block_shift;
+	char	*bitmap;
+	int	bitmap_len;
+	char	*dumpable_bitmap;
+	int	byte, bit;
+	char	*compressed_page;	/* copy of compressed page data */
+	char	*curbufptr;		/* ptr to uncompressed page buffer */
+
+	/* page cache */
+	struct page_cache_hdr {		/* header for each cached page */
+		uint32_t pg_flags;
+		uint64_t pg_addr;
+		char *pg_bufptr;
+		ulong pg_hit_count;
+	} page_cache_hdr[DISKDUMP_CACHED_PAGES];
+	char	*page_cache_buf;	/* base of cached buffer pages */
+	int	evict_index;		/* next page to evict */
+	ulong	evictions;		/* total evictions done */
+	ulong	cached_reads;
+	ulong  *valid_pages;
+	ulong   accesses;
 };
 
 static struct diskdump_data diskdump_data = { 0 };
 static struct diskdump_data *dd = &diskdump_data;
+static int get_dump_level(void);
+
+ulong *diskdump_flags = &diskdump_data.flags;
+
+static inline int get_bit(char *map, int byte, int bit)
+{
+	return map[byte] & (1<<bit);
+}
+
+static inline int page_is_ram(unsigned int nr)
+{
+	return get_bit(dd->bitmap, nr >> 3, nr & 7);
+}
+
+static inline int page_is_dumpable(unsigned int nr)
+{
+	return dd->dumpable_bitmap[nr>>3] & (1 << (nr & 7));
+}
+
+static inline int dump_is_partial(const struct disk_dump_header *header)
+{
+	return header->bitmap_blocks >=
+	    divideup(divideup(header->max_mapnr, 8), dd->block_size) * 2;
+}
+
+static int open_dump_file(char *file)
+{
+	int fd;
+
+	fd = open(file, O_RDONLY);
+	if (fd < 0) {
+		error(INFO, "diskdump / compressed kdump: unable to open dump file %s", file);
+		return FALSE;
+	}
+	dd->dfd = fd;
+	return TRUE;
+}
+
+static int read_dump_header(char *file)
+{
+	struct disk_dump_header *header = NULL;
+	struct disk_dump_sub_header *sub_header = NULL;
+	struct kdump_sub_header *sub_header_kdump = NULL;
+	int bitmap_len;
+	int block_size = (int)sysconf(_SC_PAGESIZE);
+	off_t offset;
+	const off_t failed = (off_t)-1;
+	ulong pfn;
+	int i, j, max_sect_len;
+
+	if (block_size < 0)
+		return FALSE;
+
+restart:
+	if ((header = realloc(header, block_size)) == NULL)
+		error(FATAL, "diskdump / compressed kdump: cannot malloc block_size buffer\n");
+
+	if (lseek(dd->dfd, 0, SEEK_SET) == failed) {
+		if (CRASHDEBUG(1))
+			error(INFO, "diskdump / compressed kdump: cannot lseek dump header\n");
+		goto err;
+	}
+
+	if (read(dd->dfd, header, block_size) < block_size) {
+		if (CRASHDEBUG(1))
+			error(INFO, "diskdump / compressed kdump: cannot read dump header\n");
+		goto err;
+	}
+
+	/* validate dump header */
+	if (!memcmp(header->signature, DISK_DUMP_SIGNATURE,
+				sizeof(header->signature))) {
+		dd->flags |= DISKDUMP_LOCAL;
+	} else if (!memcmp(header->signature, KDUMP_SIGNATURE,
+				sizeof(header->signature))) {
+		dd->flags |= KDUMP_CMPRS_LOCAL;
+		if (header->header_version >= 1)
+			dd->flags |= ERROR_EXCLUDED;
+	} else {
+		if (CRASHDEBUG(1))
+			error(INFO, 
+			    "diskdump / compressed kdump: dump does not have panic dump header\n");
+		goto err;
+	}
+
+	if (CRASHDEBUG(1))
+		fprintf(fp, "%s: header->utsname.machine: %s\n", 
+			DISKDUMP_VALID() ? "diskdump" : "compressed kdump",
+			header->utsname.machine);
+
+	if (STRNEQ(header->utsname.machine, "i686") &&
+	    machine_type_mismatch(file, "X86", NULL, 0))
+		goto err;
+	else if (STRNEQ(header->utsname.machine, "x86_64") &&
+	    machine_type_mismatch(file, "X86_64", NULL, 0))
+		goto err;
+	else if (STRNEQ(header->utsname.machine, "ia64") &&
+	    machine_type_mismatch(file, "IA64", NULL, 0))
+		goto err;
+	else if (STRNEQ(header->utsname.machine, "ppc64") &&
+	    machine_type_mismatch(file, "PPC64", NULL, 0))
+		goto err;
+
+	if (header->block_size != block_size) {
+		block_size = header->block_size;
+		if (CRASHDEBUG(1))
+			fprintf(fp, 
+			    "retrying with different block/page size: %d\n", 
+				header->block_size);
+		goto restart;
+	}
+	dd->block_size  = header->block_size;
+	dd->block_shift = ffs(header->block_size) - 1;
+
+	if (sizeof(*header) + sizeof(void *) * header->nr_cpus > block_size ||
+	    header->nr_cpus <= 0) {
+		error(INFO, "%s: invalid nr_cpus value: %d\n", 
+			DISKDUMP_VALID() ? "diskdump" : "compressed kdump",
+			header->nr_cpus);
+		goto err;
+	}
+
+	/* read sub header */
+	offset = (off_t)block_size;
+	if (lseek(dd->dfd, offset, SEEK_SET) == failed) {
+		error(INFO, "%s: cannot lseek dump sub header\n",
+			DISKDUMP_VALID() ? "diskdump" : "compressed kdump");
+
+		goto err;
+	}
+
+	if (DISKDUMP_VALID()) {
+		if ((sub_header = malloc(block_size)) == NULL)
+			error(FATAL, "diskdump: cannot malloc sub_header buffer\n");
+
+		if (read(dd->dfd, sub_header, block_size)
+		  < block_size) {
+			error(INFO, "diskdump: cannot read dump sub header\n");
+			goto err;
+		}
+		dd->sub_header = sub_header;
+	} else if (KDUMP_CMPRS_VALID()) {
+		if ((sub_header_kdump = malloc(block_size)) == NULL)
+			error(FATAL, "compressed kdump: cannot malloc sub_header_kdump buffer\n");
+
+		if (read(dd->dfd, sub_header_kdump, block_size)
+		  < block_size) {
+			error(INFO, "compressed kdump: cannot read dump sub header\n");
+			goto err;
+		}
+		dd->sub_header_kdump = sub_header_kdump;
+	}
+
+	/* read memory bitmap */
+	bitmap_len = block_size * header->bitmap_blocks;
+	dd->bitmap_len = bitmap_len;
+
+	offset = (off_t)block_size * (1 + header->sub_hdr_size);
+	if (lseek(dd->dfd, offset, SEEK_SET) == failed) {
+		error(INFO, "%s: cannot lseek memory bitmap\n",
+			DISKDUMP_VALID() ? "diskdump" : "compressed kdump");
+
+		goto err;
+	}
+
+	if ((dd->bitmap = malloc(bitmap_len)) == NULL)
+		error(FATAL, "%s: cannot malloc bitmap buffer\n",
+			DISKDUMP_VALID() ? "diskdump" : "compressed kdump");
+
+	dd->dumpable_bitmap = calloc(bitmap_len, 1);
+	if (read(dd->dfd, dd->bitmap, bitmap_len) < bitmap_len) {
+		error(INFO, "%s: cannot read memory bitmap\n",
+			DISKDUMP_VALID() ? "diskdump" : "compressed kdump");
+		goto err;
+	}
+
+	if (dump_is_partial(header))
+		memcpy(dd->dumpable_bitmap, dd->bitmap + bitmap_len/2,
+		       bitmap_len/2);
+	else
+		memcpy(dd->dumpable_bitmap, dd->bitmap, bitmap_len);
+
+	dd->data_offset
+		= (1 + header->sub_hdr_size + header->bitmap_blocks)
+		* header->block_size;
+
+	dd->header = header;
+
+	if (machine_type("X86"))
+		dd->machine_type = EM_386;
+	else if (machine_type("X86_64"))
+		dd->machine_type = EM_X86_64;
+	else if (machine_type("IA64"))
+		dd->machine_type = EM_IA_64;
+	else if (machine_type("PPC64"))
+		dd->machine_type = EM_PPC64;
+	else {
+		error(INFO, "%s: unsupported machine type: %s\n", 
+			DISKDUMP_VALID() ? "diskdump" : "compressed kdump",
+			MACHINE_TYPE);
+		goto err;
+	}
+
+	max_sect_len = divideup(header->max_mapnr, BITMAP_SECT_LEN);
+
+	dd->valid_pages = calloc(sizeof(ulong), max_sect_len + 1);
+	pfn = 0;
+	for (i = 1; i < max_sect_len + 1; i++) {
+		dd->valid_pages[i] = dd->valid_pages[i - 1];
+		for (j = 0; j < BITMAP_SECT_LEN; j++, pfn++)
+			if (page_is_dumpable(pfn))
+				dd->valid_pages[i]++;
+	}
+
+        return TRUE;
+
+err:
+	free(header);
+	if (sub_header)
+		free(sub_header);
+	if (sub_header_kdump)
+		free(sub_header_kdump);
+	if (dd->bitmap)
+		free(dd->bitmap);
+	if (dd->dumpable_bitmap)
+		free(dd->dumpable_bitmap);
+	dd->flags &= ~(DISKDUMP_LOCAL|KDUMP_CMPRS_LOCAL);
+	return FALSE;
+}
+
+static int
+pfn_to_pos(ulong pfn)
+{
+	int desc_pos, j, valid;
+
+	valid = dd->valid_pages[pfn / BITMAP_SECT_LEN];
+
+	for (j = round(pfn, BITMAP_SECT_LEN), desc_pos = valid; j <= pfn; j++)
+			if (page_is_dumpable(j))
+				desc_pos++;
+
+	return desc_pos;
+}
+
 
 /*
  *  Determine whether a file is a diskdump creation, and if TRUE,
@@ -43,7 +316,28 @@
 int
 is_diskdump(char *file)
 {
-	return FALSE;
+	int sz, i;
+
+	if (!open_dump_file(file) || !read_dump_header(file))
+		return FALSE;
+
+	sz = dd->block_size * (DISKDUMP_CACHED_PAGES);
+	if ((dd->page_cache_buf = malloc(sz)) == NULL)
+		error(FATAL, "%s: cannot malloc compressed page_cache_buf\n",
+			DISKDUMP_VALID() ? "diskdump" : "compressed kdump");
+
+	for (i = 0; i < DISKDUMP_CACHED_PAGES; i++)
+		dd->page_cache_hdr[i].pg_bufptr =
+			&dd->page_cache_buf[i * dd->block_size];
+
+	if ((dd->compressed_page = (char *)malloc(dd->block_size)) == NULL)
+		error(FATAL, "%s: cannot malloc compressed page space\n",
+			DISKDUMP_VALID() ? "diskdump" : "compressed kdump");
+
+	if (CRASHDEBUG(1))
+		diskdump_memory_dump(fp);
+
+	return TRUE;
 }
 
 /*
@@ -53,11 +347,141 @@
 int
 diskdump_init(char *unused, FILE *fptr)
 {
-        if (!DISKDUMP_VALID())
-                return FALSE;
+	if (!DISKDUMP_VALID() && !KDUMP_CMPRS_VALID())
+		return FALSE;
 
-        dd->ofp = fptr;
-        return TRUE;
+	dd->ofp = fptr;
+	return TRUE;
+}
+
+/*
+ *  Get the relocational offset from the sub header of kdump.
+ */
+int
+diskdump_phys_base(unsigned long *phys_base)
+{
+	if (KDUMP_CMPRS_VALID()) {
+		*phys_base = dd->sub_header_kdump->phys_base;
+		return TRUE;
+	}
+
+	return FALSE;
+}
+
+/*
+ *  Check whether paddr is already cached.
+ */
+static int
+page_is_cached(physaddr_t paddr)
+{
+	int i;
+	struct page_cache_hdr *pgc;
+
+	dd->accesses++;
+
+	for (i = 0; i < DISKDUMP_CACHED_PAGES; i++) {
+
+		pgc = &dd->page_cache_hdr[i];
+
+		if (!DISKDUMP_VALID_PAGE(pgc->pg_flags))
+			continue;
+
+		if (pgc->pg_addr == paddr) {
+			pgc->pg_hit_count++;
+			dd->curbufptr = pgc->pg_bufptr;
+			dd->cached_reads++;
+			return TRUE;
+		}
+	}
+	return FALSE;
+}
+
+/*
+ *  Cache the page's data.
+ *
+ *  If an empty page cache location is available, take it.  Otherwise, evict
+ *  the entry indexed by evict_index, and then bump evict index.  The hit_count
+ *  is only gathered for dump_diskdump_environment().
+ *
+ *  If the page is compressed, uncompress it into the selected page cache entry.
+ *  If the page is raw, just copy it into the selected page cache entry.
+ *  If all works OK, update diskdump->curbufptr to point to the page's
+ *  uncompressed data.
+ */
+static int
+cache_page(physaddr_t paddr)
+{
+	int i, ret;
+	int found;
+	ulong pfn;
+	int desc_pos;
+	off_t seek_offset;
+	page_desc_t pd;
+	const int block_size = dd->block_size;
+	const off_t failed = (off_t)-1;
+	ulong retlen;
+
+	for (i = found = 0; i < DISKDUMP_CACHED_PAGES; i++) {
+		if (DISKDUMP_VALID_PAGE(dd->page_cache_hdr[i].pg_flags))
+			continue;
+		found = TRUE;
+		break;
+	}
+
+	if (!found) {
+		i = dd->evict_index;
+		dd->page_cache_hdr[i].pg_hit_count = 0;
+		dd->evict_index =
+			(dd->evict_index+1) % DISKDUMP_CACHED_PAGES;
+		dd->evictions++;
+	}
+
+	dd->page_cache_hdr[i].pg_flags = 0;
+	dd->page_cache_hdr[i].pg_addr = paddr;
+	dd->page_cache_hdr[i].pg_hit_count++;
+
+	/* find page descriptor */
+	pfn = paddr >> dd->block_shift;
+	desc_pos = pfn_to_pos(pfn);
+	seek_offset = dd->data_offset
+			+ (off_t)(desc_pos - 1)*sizeof(page_desc_t);
+	lseek(dd->dfd, seek_offset, SEEK_SET);
+
+	/* read page descriptor */
+	if (read(dd->dfd, &pd, sizeof(pd)) != sizeof(pd))
+		return READ_ERROR;
+
+	/* sanity check */
+	if (pd.size > block_size)
+		return READ_ERROR;
+
+	if (lseek(dd->dfd, pd.offset, SEEK_SET) == failed)
+		return SEEK_ERROR;
+
+	/* read page data */
+	if (read(dd->dfd, dd->compressed_page, pd.size) != pd.size)
+		return READ_ERROR;
+
+	if (pd.flags & DUMP_DH_COMPRESSED) {
+		retlen = block_size;
+		ret = uncompress((unsigned char *)dd->page_cache_hdr[i].pg_bufptr,
+		                 &retlen,
+		                 (unsigned char *)dd->compressed_page,
+		                 pd.size);
+		if ((ret != Z_OK) || (retlen != block_size)) {
+			error(INFO, "%s: uncompress failed: %d\n", 
+				DISKDUMP_VALID() ? "diskdump" : "compressed kdump",
+				ret);
+			return READ_ERROR;
+		}
+	} else
+		memcpy(dd->page_cache_hdr[i].pg_bufptr,
+		       dd->compressed_page, block_size);
+
+	dd->page_cache_hdr[i].pg_flags |= PAGE_VALID;
+	dd->curbufptr = dd->page_cache_hdr[i].pg_bufptr;
+
+	return TRUE;
 }
 
 /*
@@ -66,7 +490,31 @@
 int
 read_diskdump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr)
 {
-        return 0;
+	int ret;
+	physaddr_t curpaddr;
+	ulong pfn, page_offset;
+
+	pfn = paddr >> dd->block_shift;
+	curpaddr = paddr & ~((physaddr_t)(dd->block_size-1));
+	page_offset = paddr & ((physaddr_t)(dd->block_size-1));
+
+	if ((pfn >= dd->header->max_mapnr) || !page_is_ram(pfn))
+		return SEEK_ERROR;
+	if (!page_is_dumpable(pfn)) {
+		if ((dd->flags & (ZERO_EXCLUDED|ERROR_EXCLUDED)) ==
+		    ERROR_EXCLUDED)
+			return PAGE_EXCLUDED;
+		memset(bufptr, 0, cnt);
+		return cnt;
+	}
+
+	if (!page_is_cached(curpaddr))
+		if ((ret = cache_page(curpaddr)) < 0)
+			return ret;
+	
+	memcpy(bufptr, dd->curbufptr + page_offset, cnt);
+
+	return cnt;
 }
 
 /*
@@ -81,7 +529,23 @@
 ulong
 get_diskdump_panic_task(void)
 {
-	return NO_TASK;
+	if ((!DISKDUMP_VALID() && !KDUMP_CMPRS_VALID())
+	    || !get_active_set())
+		return NO_TASK;
+
+	return (ulong)dd->header->tasks[dd->header->current_cpu];
+}
+
+extern  void get_netdump_regs_x86(struct bt_info *, ulong *, ulong *);
+extern void get_netdump_regs_x86_64(struct bt_info *, ulong *, ulong *);
+
+static void
+get_diskdump_regs_ppc64(struct bt_info *bt, ulong *eip, ulong *esp)
+{
+	if ((bt->task == tt->panic_task) && DISKDUMP_VALID())
+		bt->machdep = &dd->sub_header->elf_regs;
+
+	machdep->get_stack_frame(bt, eip, esp);
 }
 
 /*
@@ -91,12 +555,35 @@
 void
 get_diskdump_regs(struct bt_info *bt, ulong *eip, ulong *esp)
 {
-        switch (dd->machine_type)
-        {
-        default:
-                error(FATAL,
-                   "diskdump support for this machine type is not available\n");
-        }
+	switch (dd->machine_type) 
+	{
+	case EM_386:
+		return get_netdump_regs_x86(bt, eip, esp);
+		break;
+
+	case EM_IA_64:
+	       /* For normal backtraces, this information will be obtained
+		* frome the switch_stack structure, which is pointed to by
+		* the thread.ksp field of the task_struct. But it's still
+		* needed by the "bt -t" option.
+		*/
+		machdep->get_stack_frame(bt, eip, esp);
+		break;
+
+	case EM_PPC64:
+		return get_diskdump_regs_ppc64(bt, eip, esp);
+		break;
+
+	case EM_X86_64:
+		return get_netdump_regs_x86_64(bt, eip, esp);
+		break;
+
+	default:
+		error(FATAL, "%s: unsupported machine type: %s\n",
+			DISKDUMP_VALID() ? "diskdump" : "compressed kdump",
+			MACHINE_TYPE);
+
+	}
 }
 
 /*
@@ -105,7 +592,10 @@
 uint
 diskdump_page_size(void)
 {
-	return 0;
+	if (!DISKDUMP_VALID() && !KDUMP_CMPRS_VALID())
+		return 0;
+
+	return dd->header->block_size;
 }
 
 /*
@@ -131,6 +621,197 @@
 int
 diskdump_memory_dump(FILE *fp)
 {
+	int i, others, dump_level;
+	struct disk_dump_header *dh;
+	struct disk_dump_sub_header *dsh;
+	struct kdump_sub_header *kdsh;
+	ulong *tasks;
+
+        fprintf(fp, "diskdump_data: \n");
+        fprintf(fp, "             flags: %lx (", dd->flags);
+        others = 0;
+        if (dd->flags & DISKDUMP_LOCAL)
+                fprintf(fp, "%sDISKDUMP_LOCAL", others++ ? "|" : "");
+        if (dd->flags & KDUMP_CMPRS_LOCAL)
+                fprintf(fp, "%sKDUMP_CMPRS_LOCAL", others++ ? "|" : "");
+        if (dd->flags & ERROR_EXCLUDED)
+                fprintf(fp, "%sERROR_EXCLUDED", others++ ? "|" : "");
+        if (dd->flags & ZERO_EXCLUDED)
+                fprintf(fp, "%sZERO_EXCLUDED", others++ ? "|" : "");
+        fprintf(fp, ")\n");
+        fprintf(fp, "               dfd: %d\n", dd->dfd);
+        fprintf(fp, "               ofp: %lx\n", (ulong)dd->ofp);
+        fprintf(fp, "      machine_type: %d ", dd->machine_type);
+	switch (dd->machine_type)
+	{
+	case EM_386:
+		fprintf(fp, "(EM_386)\n"); break;
+	case EM_X86_64:
+		fprintf(fp, "(EM_X86_64)\n"); break;
+	case EM_IA_64:
+		fprintf(fp, "(EM_IA_64)\n"); break;
+	case EM_PPC64:
+		fprintf(fp, "(EM_PPC64)\n"); break;
+	default:
+		fprintf(fp, "(unknown)\n"); break;
+	}
+
+        fprintf(fp, "\n            header: %lx\n", (ulong)dd->header);
+	dh = dd->header;
+	fprintf(fp, "           signature: \"");
+	for (i = 0; i < SIG_LEN; i++)
+		if (dh->signature[i])
+			fprintf(fp, "%c", dh->signature[i]);
+	fprintf(fp, "\"\n");
+	fprintf(fp, "      header_version: %d\n", dh->header_version);
+	fprintf(fp, "             utsname:\n");
+	fprintf(fp, "               sysname: %s\n", dh->utsname.sysname);
+	fprintf(fp, "              nodename: %s\n", dh->utsname.nodename);
+	fprintf(fp, "               release: %s\n", dh->utsname.release);
+	fprintf(fp, "               version: %s\n", dh->utsname.version);
+	fprintf(fp, "               machine: %s\n", dh->utsname.machine);
+	fprintf(fp, "            domainname: %s\n", dh->utsname.domainname);
+	fprintf(fp, "           timestamp:\n");
+	fprintf(fp, "                tv_sec: %lx\n", dh->timestamp.tv_sec);
+	fprintf(fp, "               tv_usec: %lx\n", dh->timestamp.tv_usec);
+	fprintf(fp, "              status: %x (", dh->status);
+        others = 0;
+        if (dh->status & DUMP_HEADER_COMPLETED)
+                fprintf(fp, "%sDUMP_HEADER_COMPLETED", others++ ? "|" : "");
+        if (dh->status & DUMP_HEADER_INCOMPLETED)
+                fprintf(fp, "%sDUMP_HEADER_INCOMPLETED", others++ ? "|" : "");
+        if (dh->status & DUMP_HEADER_COMPRESSED)
+                fprintf(fp, "%sDUMP_HEADER_COMPRESSED", others++ ? "|" : "");
+	fprintf(fp, ")\n");
+	fprintf(fp, "          block_size: %d\n", dh->block_size);
+	fprintf(fp, "        sub_hdr_size: %d\n", dh->sub_hdr_size);
+	fprintf(fp, "       bitmap_blocks: %u\n", dh->bitmap_blocks);
+	fprintf(fp, "           max_mapnr: %u\n", dh->max_mapnr);
+	fprintf(fp, "    total_ram_blocks: %u\n", dh->total_ram_blocks);
+	fprintf(fp, "       device_blocks: %u\n", dh->device_blocks);
+	fprintf(fp, "      written_blocks: %u\n", dh->written_blocks);
+	fprintf(fp, "         current_cpu: %u\n", dh->current_cpu);
+	fprintf(fp, "             nr_cpus: %d\n", dh->nr_cpus);
+	tasks = (ulong *)&dh->tasks[0];
+	fprintf(fp, "      tasks[nr_cpus]: %lx\n", *tasks);
+	for (tasks++, i = 1; i < dh->nr_cpus; i++) {
+		fprintf(fp, "                      %lx\n", *tasks);
+		tasks++;
+	}
+        fprintf(fp, "\n");
+	fprintf(fp, "        sub_header: %lx ", (ulong)dd->sub_header);
+	if ((dsh = dd->sub_header)) {
+		fprintf(fp, "\n            elf_regs: %lx\n", 
+			(ulong)&dsh->elf_regs);
+		fprintf(fp, "          dump_level: ");
+		if ((pc->flags & RUNTIME) && 
+		    ((dump_level = get_dump_level()) >= 0)) {
+			fprintf(fp, "%d (0x%x) %s", dump_level, dump_level, 
+				dump_level ? "(" : "");
+
+#define DUMP_EXCLUDE_CACHE 0x00000001   /* Exclude LRU & SwapCache pages*/
+#define DUMP_EXCLUDE_CLEAN 0x00000002   /* Exclude all-zero pages */
+#define DUMP_EXCLUDE_FREE  0x00000004   /* Exclude free pages */
+#define DUMP_EXCLUDE_ANON  0x00000008   /* Exclude Anon pages */
+#define DUMP_SAVE_PRIVATE  0x00000010   /* Save private pages */
+
+		        others = 0;
+        		if (dump_level & DUMP_EXCLUDE_CACHE)
+                		fprintf(fp, "%sDUMP_EXCLUDE_CACHE", 
+					others++ ? "|" : "");
+        		if (dump_level & DUMP_EXCLUDE_CLEAN)
+                		fprintf(fp, "%sDUMP_EXCLUDE_CLEAN", 
+					others++ ? "|" : "");
+        		if (dump_level & DUMP_EXCLUDE_FREE)
+                		fprintf(fp, "%sDUMP_EXCLUDE_FREE", 
+					others++ ? "|" : "");
+        		if (dump_level & DUMP_EXCLUDE_ANON)
+                		fprintf(fp, "%sDUMP_EXCLUDE_ANON", 
+					others++ ? "|" : "");
+        		if (dump_level & DUMP_SAVE_PRIVATE)
+                		fprintf(fp, "%sDUMP_SAVE_PRIVATE", 
+					others++ ? "|" : "");
+			fprintf(fp, "%s\n\n", dump_level ? ")" : "");
+		} else
+			fprintf(fp, "%s\n\n", pc->flags & RUNTIME ? 
+				"(unknown)" : "(undetermined)");
+
+	} else
+        	fprintf(fp, "(n/a)\n\n");
+
+	fprintf(fp, "  sub_header_kdump: %lx ", (ulong)dd->sub_header_kdump);
+	if ((kdsh = dd->sub_header_kdump)) {
+		fprintf(fp, "\n           phys_base: %lx\n", 
+			(ulong)kdsh->phys_base);
+		fprintf(fp, "          dump_level: ");
+		if ((dump_level = get_dump_level()) >= 0) {
+			fprintf(fp, "%d (0x%x) %s", dump_level, dump_level, 
+				dump_level ? "(" : "");
+
+#define DL_EXCLUDE_ZERO         (0x001) /* Exclude Pages filled with Zeros */
+#define DL_EXCLUDE_CACHE        (0x002) /* Exclude Cache Pages without Private Pages */
+#define DL_EXCLUDE_CACHE_PRI    (0x004) /* Exclude Cache Pages with Private Pages */
+#define DL_EXCLUDE_USER_DATA    (0x008) /* Exclude UserProcessData Pages */
+#define DL_EXCLUDE_FREE         (0x010) /* Exclude Free Pages */
+
+        		if (dump_level & DL_EXCLUDE_ZERO)
+                		fprintf(fp, "%sDUMP_EXCLUDE_ZERO", 
+					others++ ? "|" : "");
+        		if (dump_level & DL_EXCLUDE_CACHE)
+                		fprintf(fp, "%sDUMP_EXCLUDE_CACHE", 
+					others++ ? "|" : "");
+        		if (dump_level & DL_EXCLUDE_CACHE_PRI)
+                		fprintf(fp, "%sDUMP_EXCLUDE_CACHE_PRI", 
+					others++ ? "|" : "");
+        		if (dump_level & DL_EXCLUDE_USER_DATA)
+                		fprintf(fp, "%sDUMP_EXCLUDE_USER_DATA", 
+					others++ ? "|" : "");
+        		if (dump_level & DL_EXCLUDE_FREE)
+                		fprintf(fp, "%sDUMP_EXCLUDE_FREE", 
+					others++ ? "|" : "");
+			others = 0;
+
+			fprintf(fp, "%s\n\n", dump_level ? ")" : "");
+		} else
+			fprintf(fp, "(unknown)\n\n");
+	} else
+        	fprintf(fp, "(n/a)\n\n");
+
+	fprintf(fp, "       data_offset: %lx\n", (ulong)dd->data_offset);
+	fprintf(fp, "        block_size: %d\n", dd->block_size);
+	fprintf(fp, "       block_shift: %d\n", dd->block_shift);
+	fprintf(fp, "            bitmap: %lx\n", (ulong)dd->bitmap);
+	fprintf(fp, "        bitmap_len: %d\n", dd->bitmap_len);
+	fprintf(fp, "   dumpable_bitmap: %lx\n", (ulong)dd->dumpable_bitmap);
+	fprintf(fp, "              byte: %d\n", dd->byte);
+	fprintf(fp, "               bit: %d\n", dd->bit);
+	fprintf(fp, "   compressed_page: %lx\n", (ulong)dd->compressed_page);
+	fprintf(fp, "         curbufptr: %lx\n\n", (ulong)dd->curbufptr);
+
+	for (i = 0; i < DISKDUMP_CACHED_PAGES; i++) {
+		fprintf(fp, "%spage_cache_hdr[%d]:\n", i < 10 ? " " : "", i);
+		fprintf(fp, "            pg_flags: %x (", dd->page_cache_hdr[i].pg_flags);
+		others = 0;
+		if (dd->page_cache_hdr[i].pg_flags & PAGE_VALID)
+                	fprintf(fp, "%sPAGE_VALID", others++ ? "|" : "");
+		fprintf(fp, ")\n");
+		fprintf(fp, "             pg_addr: %llx\n", (ulonglong)dd->page_cache_hdr[i].pg_addr);
+		fprintf(fp, "           pg_bufptr: %lx\n", (ulong)dd->page_cache_hdr[i].pg_bufptr);
+		fprintf(fp, "        pg_hit_count: %ld\n", dd->page_cache_hdr[i].pg_hit_count);
+	}
+
+	fprintf(fp, "\n    page_cache_buf: %lx\n", (ulong)dd->page_cache_buf);
+	fprintf(fp, "       evict_index: %d\n", dd->evict_index);
+	fprintf(fp, "         evictions: %ld\n", dd->evictions);
+	fprintf(fp, "          accesses: %ld\n", dd->accesses);
+	fprintf(fp, "      cached_reads: %ld ", dd->cached_reads);
+	if (dd->accesses)
+		fprintf(fp, "(%ld%%)\n",
+			dd->cached_reads * 100 / dd->accesses);
+	else
+		fprintf(fp, "\n");
+	fprintf(fp, "       valid_pages: %lx\n", (ulong)dd->valid_pages);
+
 	return 0;
 }
 
@@ -142,3 +823,36 @@
 {
 	return 0;
 }
+
+/*
+ *  Versions of disk_dump that support it contain the "dump_level" symbol.
+ *  Version 1 and later compressed kdump dumpfiles contain the dump level
+ *  in an additional field of the sub_header_kdump structure.
+ */
+static int 
+get_dump_level(void)
+{
+	int dump_level;
+
+	if (DISKDUMP_VALID()) {
+		if (symbol_exists("dump_level") &&
+		    readmem(symbol_value("dump_level"), KVADDR, &dump_level,
+		    sizeof(dump_level), "dump_level", QUIET|RETURN_ON_ERROR))
+                 	return dump_level;
+	} else if (KDUMP_CMPRS_VALID()) {
+		if (dd->header->header_version >= 1)
+			return dd->sub_header_kdump->dump_level;
+	}
+
+	return -1;
+}
+
+/*
+ *  Used by the "sys" command to display [PARTIAL DUMP] 
+ *  after the dumpfile name.
+ */
+int 
+is_partial_diskdump(void) 
+{
+	return (get_dump_level() > 0 ? TRUE : FALSE);
+}
--- crash/net.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/net.c	2008-03-17 17:28:54.000000000 -0400
@@ -1,8 +1,8 @@
 /* net.c - core analysis suite
  *
  * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc.
- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson
- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 David Anderson
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Red Hat, Inc. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -50,6 +50,7 @@
 #define STRUCT_NET_DEVICE (0x4)
 #define SOCK_V1           (0x8)
 #define SOCK_V2           (0x10)
+#define NO_INET_SOCK      (0x20)
 
 #define	DEV_NAME_MAX	100
 struct devinfo {
@@ -64,6 +65,8 @@
 #define BYTES_IP_TUPLE	(BYTES_IP_ADDR + BYTES_PORT_NUM + 1)
 
 static void show_net_devices(void);
+static void show_net_devices_v2(void);
+static void show_net_devices_v3(void);
 static void print_neighbour_q(ulong, int);
 static void get_netdev_info(ulong, struct devinfo *);
 static void get_device_name(ulong, char *);
@@ -75,6 +78,7 @@
 static void dump_sockets(ulong, struct reference *);
 static int  sym_socket_dump(ulong, int, int, ulong, struct reference *);
 static void dump_hw_addr(unsigned char *, int);
+static char *dump_in6_addr_port(uint16_t *, uint16_t, char *, int *);
 
 
 #define MK_TYPE_T(f,s,m)						\
@@ -109,6 +113,8 @@
 			"net_device", "addr_len");
 		net->dev_ip_ptr = MEMBER_OFFSET_INIT(net_device_ip_ptr,
 			"net_device", "ip_ptr");
+		MEMBER_OFFSET_INIT(net_device_dev_list, "net_device", "dev_list");
+		MEMBER_OFFSET_INIT(net_dev_base_head, "net", "dev_base_head");
 		ARRAY_LENGTH_INIT(net->net_device_name_index,
 			net_device_name, "net_device.name", NULL, sizeof(char));
 		net->flags |= (NETDEV_INIT|STRUCT_NET_DEVICE);
@@ -158,13 +164,6 @@
 			"in_ifaddr", "ifa_address");
 
 		STRUCT_SIZE_INIT(sock, "sock");
-		MEMBER_OFFSET_INIT(sock_daddr, "sock", "daddr");
-		MEMBER_OFFSET_INIT(sock_rcv_saddr, "sock", "rcv_saddr");
-		MEMBER_OFFSET_INIT(sock_dport, "sock", "dport");
-		MEMBER_OFFSET_INIT(sock_sport, "sock", "sport");
-		MEMBER_OFFSET_INIT(sock_num, "sock", "num");
-		MEMBER_OFFSET_INIT(sock_family, "sock", "family");
-		MEMBER_OFFSET_INIT(sock_type, "sock", "type");
 
                 MEMBER_OFFSET_INIT(sock_family, "sock", "family");
 		if (VALID_MEMBER(sock_family)) {
@@ -195,7 +194,23 @@
 			 */
 			STRUCT_SIZE_INIT(inet_sock, "inet_sock");
 			STRUCT_SIZE_INIT(socket, "socket");
-			MEMBER_OFFSET_INIT(inet_sock_inet, "inet_sock", "inet");
+
+			if (STRUCT_EXISTS("inet_opt")) {
+				MEMBER_OFFSET_INIT(inet_sock_inet, "inet_sock", "inet");
+				MEMBER_OFFSET_INIT(inet_opt_daddr, "inet_opt", "daddr");
+				MEMBER_OFFSET_INIT(inet_opt_rcv_saddr, "inet_opt", "rcv_saddr");
+				MEMBER_OFFSET_INIT(inet_opt_dport, "inet_opt", "dport");
+				MEMBER_OFFSET_INIT(inet_opt_sport, "inet_opt", "sport");
+				MEMBER_OFFSET_INIT(inet_opt_num, "inet_opt", "num");
+			} else {	/* inet_opt moved to inet_sock */
+				ASSIGN_OFFSET(inet_sock_inet) = 0;
+				MEMBER_OFFSET_INIT(inet_opt_daddr, "inet_sock", "daddr");
+				MEMBER_OFFSET_INIT(inet_opt_rcv_saddr, "inet_sock", "rcv_saddr");
+				MEMBER_OFFSET_INIT(inet_opt_dport, "inet_sock", "dport");
+				MEMBER_OFFSET_INIT(inet_opt_sport, "inet_sock", "sport");
+				MEMBER_OFFSET_INIT(inet_opt_num, "inet_sock", "num");
+			}	
+
 			if (VALID_STRUCT(inet_sock) && 
 			    INVALID_MEMBER(inet_sock_inet)) {
 				/*
@@ -210,15 +225,36 @@
 			         *  to subtract the size of the inet_opt struct
 				 *  from the size of the containing inet_sock.
 				 */
+				net->flags |= NO_INET_SOCK;
 				ASSIGN_OFFSET(inet_sock_inet) = 
 				    SIZE(inet_sock) - STRUCT_SIZE("inet_opt");
 			}
-			MEMBER_OFFSET_INIT(inet_opt_daddr, "inet_opt", "daddr");
-			MEMBER_OFFSET_INIT(inet_opt_rcv_saddr, "inet_opt", 
-				"rcv_saddr");
-			MEMBER_OFFSET_INIT(inet_opt_dport, "inet_opt", "dport");
-			MEMBER_OFFSET_INIT(inet_opt_sport, "inet_opt", "sport");
-			MEMBER_OFFSET_INIT(inet_opt_num, "inet_opt", "num");
+
+			/* 
+			 *  If necessary, set inet_sock size and inet_sock_inet offset,
+			 *  accounting for the configuration-dependent, intervening,
+			 *  struct ipv6_pinfo pointer located in between the sock and 
+			 *  inet_opt members of the inet_sock.
+			 */
+			if (!VALID_STRUCT(inet_sock)) 
+			{
+				if (symbol_exists("tcpv6_protocol") && 
+				    symbol_exists("udpv6_protocol")) {
+					ASSIGN_SIZE(inet_sock) = SIZE(sock) + 
+						sizeof(void *) + STRUCT_SIZE("inet_opt");
+					ASSIGN_OFFSET(inet_sock_inet) = SIZE(sock) + 
+						sizeof(void *);
+				} else {
+					ASSIGN_SIZE(inet_sock) = SIZE(sock) + 
+						STRUCT_SIZE("inet_opt");
+					ASSIGN_OFFSET(inet_sock_inet) = SIZE(sock);
+				}
+			}
+
+			MEMBER_OFFSET_INIT(ipv6_pinfo_rcv_saddr, "ipv6_pinfo", "rcv_saddr");
+			MEMBER_OFFSET_INIT(ipv6_pinfo_daddr, "ipv6_pinfo", "daddr");
+			STRUCT_SIZE_INIT(in6_addr, "in6_addr");
+
 			net->flags |= SOCK_V2;
 		}
 	}	
@@ -323,8 +359,16 @@
 	long flen;
 	char buf[BUFSIZE];
 
+	if (symbol_exists("dev_base_head")) {
+		show_net_devices_v2();
+		return;
+	} else if (symbol_exists("init_net")) {
+		show_net_devices_v3();
+		return;
+	}
+
 	if (!symbol_exists("dev_base"))
-		error(FATAL, "dev_base does not exist!\n");
+		error(FATAL, "dev_base, dev_base_head or init_net do not exist!\n");
 
 	get_symbol_data("dev_base", sizeof(void *), &next);
 
@@ -352,6 +396,114 @@
 	} while (next);
 }
 
+static void
+show_net_devices_v2(void)
+{
+	struct list_data list_data, *ld;
+	char *net_device_buf;
+	char buf[BUFSIZE];
+	ulong *ndevlist;
+	int ndevcnt, i;
+	long flen;
+
+	if (!net->netdevice) /* initialized in net_init() */
+		return;
+
+	flen = MAX(VADDR_PRLEN, strlen(net->netdevice));
+
+	fprintf(fp, "%s  NAME   IP ADDRESS(ES)\n",
+		mkstring(upper_case(net->netdevice, buf), 
+			flen, CENTER|LJUST, NULL));
+
+	net_device_buf = GETBUF(SIZE(net_device));
+
+	ld =  &list_data;
+	BZERO(ld, sizeof(struct list_data));
+	get_symbol_data("dev_base_head", sizeof(void *), &ld->start);
+	ld->end = symbol_value("dev_base_head");
+	ld->list_head_offset = OFFSET(net_device_dev_list);
+
+	hq_open();
+	ndevcnt = do_list(ld);
+	ndevlist = (ulong *)GETBUF(ndevcnt * sizeof(ulong));
+	ndevcnt = retrieve_list(ndevlist, ndevcnt);
+	hq_close();
+
+	for (i = 0; i < ndevcnt; ++i) {
+		readmem(ndevlist[i], KVADDR, net_device_buf,
+			SIZE(net_device), "net_device buffer",
+			FAULT_ON_ERROR);
+
+                fprintf(fp, "%s  ",
+			mkstring(buf, flen, CENTER|RJUST|LONG_HEX,
+			MKSTR(ndevlist[i])));
+
+		get_device_name(ndevlist[i], buf);
+		fprintf(fp, "%-6s ", buf);
+
+		get_device_address(ndevlist[i], buf);
+		fprintf(fp, "%s\n", buf);
+	}
+	
+	FREEBUF(ndevlist);
+	FREEBUF(net_device_buf);
+}
+
+static void
+show_net_devices_v3(void)
+{
+	struct list_data list_data, *ld;
+	char *net_device_buf;
+	char buf[BUFSIZE];
+	ulong *ndevlist;
+	int ndevcnt, i;
+	long flen;
+
+	if (!net->netdevice) /* initialized in net_init() */
+		return;
+
+	flen = MAX(VADDR_PRLEN, strlen(net->netdevice));
+
+	fprintf(fp, "%s  NAME   IP ADDRESS(ES)\n",
+		mkstring(upper_case(net->netdevice, buf), 
+			flen, CENTER|LJUST, NULL));
+
+	net_device_buf = GETBUF(SIZE(net_device));
+
+	ld =  &list_data;
+	BZERO(ld, sizeof(struct list_data));
+	ld->start = ld->end =
+		 symbol_value("init_net") + OFFSET(net_dev_base_head);
+	ld->list_head_offset = OFFSET(net_device_dev_list);
+
+	hq_open();
+	ndevcnt = do_list(ld);
+	ndevlist = (ulong *)GETBUF(ndevcnt * sizeof(ulong));
+	ndevcnt = retrieve_list(ndevlist, ndevcnt);
+	hq_close();
+
+	/*
+	 *  Skip the first entry (init_net).
+	 */
+	for (i = 1; i < ndevcnt; ++i) {
+		readmem(ndevlist[i], KVADDR, net_device_buf,
+			SIZE(net_device), "net_device buffer",
+			FAULT_ON_ERROR);
+
+                fprintf(fp, "%s  ",
+			mkstring(buf, flen, CENTER|RJUST|LONG_HEX,
+			MKSTR(ndevlist[i])));
+
+		get_device_name(ndevlist[i], buf);
+		fprintf(fp, "%-6s ", buf);
+
+		get_device_address(ndevlist[i], buf);
+		fprintf(fp, "%s\n", buf);
+	}
+	
+	FREEBUF(ndevlist);
+	FREEBUF(net_device_buf);
+}
 
 /*
  * Perform the actual work of dumping the ARP table...
@@ -378,6 +530,24 @@
 	nhash_buckets = (i = ARRAY_LENGTH(neigh_table_hash_buckets)) ?
 		i : get_array_length("neigh_table.hash_buckets", 
 			NULL, sizeof(void *));
+
+	/*
+	 *  NOTE: 2.6.8 -> 2.6.9 neigh_table struct changed from:
+	 *
+	 *    struct neighbour *hash_buckets[32];
+	 *  to
+	 *    struct neighbour **hash_buckets;
+	 *
+	 *  Even after hardwiring and testing with the correct
+	 *  array size, other changes cause this command to break
+	 *  down, so it needs to be looked at by someone who cares...
+	 */
+
+	if (nhash_buckets == 0) {
+		option_not_supported('a');
+		return;
+	}
+
 	hash_bytes = nhash_buckets * sizeof(*hash_buckets);
 
 	hash_buckets = (ulong *)GETBUF(hash_bytes);
@@ -609,8 +779,14 @@
 	uint16_t dport, sport;
 	ushort num, family, type;
 	char *sockbuf, *inet_sockbuf;
+	ulong ipv6_pinfo, ipv6_rcv_saddr, ipv6_daddr;
+	uint16_t u6_addr16_src[8];
+	uint16_t u6_addr16_dest[8];
+	char buf2[BUFSIZE];
+	int len;
 
 	BZERO(buf, BUFSIZE);
+	BZERO(buf2, BUFSIZE);
 	sockbuf = inet_sockbuf = NULL;
 
 	switch (net->flags & (SOCK_V1|SOCK_V2))
@@ -646,6 +822,7 @@
 			OFFSET(inet_opt_num));
 		family = USHORT(inet_sockbuf + OFFSET(sock_common_skc_family));
 		type = USHORT(inet_sockbuf + OFFSET(sock_sk_type));
+		ipv6_pinfo = ULONG(inet_sockbuf + SIZE(sock));
 		break;
 	}
 
@@ -723,27 +900,28 @@
 	}
 
 	/* make sure we have room at the end... */
-	sprintf(&buf[strlen(buf)], "%s", space(MINSPACE-1));
+//	sprintf(&buf[strlen(buf)], "%s", space(MINSPACE-1));
+	sprintf(&buf[strlen(buf)], " ");
            
 	if (family == AF_INET) {
 		if (BITS32()) {
-			sprintf(&buf[strlen(buf)], "%*s:%-*d%s",
+			sprintf(&buf[strlen(buf)], "%*s-%-*d%s",
 				BYTES_IP_ADDR,
 				inet_ntoa(*((struct in_addr *)&rcv_saddr)),
 				BYTES_PORT_NUM,
 				ntohs(sport),
 				space(1));
-			sprintf(&buf[strlen(buf)], "%*s:%-*d%s",
+			sprintf(&buf[strlen(buf)], "%*s-%-*d%s",
 				BYTES_IP_ADDR,
 				inet_ntoa(*((struct in_addr *)&daddr)), 
 				BYTES_PORT_NUM,
 				ntohs(dport),
 				space(1));
 		} else {
-	                sprintf(&buf[strlen(buf)], " %s:%d ",
+	                sprintf(&buf[strlen(buf)], " %s-%d ",
 	                        inet_ntoa(*((struct in_addr *)&rcv_saddr)),
 	                        ntohs(sport));
-	                sprintf(&buf[strlen(buf)], "%s:%d",
+	                sprintf(&buf[strlen(buf)], "%s-%d",
 	                        inet_ntoa(*((struct in_addr *)&daddr)),
 	                        ntohs(dport));
 		}
@@ -753,6 +931,60 @@
 		FREEBUF(sockbuf);
 	if (inet_sockbuf)
 		FREEBUF(inet_sockbuf);
+
+	if (family != AF_INET6)
+		return;
+
+	switch (net->flags & (SOCK_V1|SOCK_V2))
+	{
+	case SOCK_V1:
+		break;
+
+	case SOCK_V2:
+		if (INVALID_MEMBER(ipv6_pinfo_rcv_saddr) ||
+		    INVALID_MEMBER(ipv6_pinfo_daddr))
+			break;
+
+        	ipv6_rcv_saddr = ipv6_pinfo + OFFSET(ipv6_pinfo_rcv_saddr);
+		ipv6_daddr = ipv6_pinfo + OFFSET(ipv6_pinfo_daddr);
+
+		if (!readmem(ipv6_rcv_saddr, KVADDR, u6_addr16_src, SIZE(in6_addr),
+                    "ipv6_rcv_saddr buffer", QUIET|RETURN_ON_ERROR))
+			break;
+                if (!readmem(ipv6_daddr, KVADDR, u6_addr16_dest, SIZE(in6_addr),
+                    "ipv6_daddr buffer", QUIET|RETURN_ON_ERROR))
+			break;
+
+		sprintf(&buf[strlen(buf)], "%*s ", BITS32() ? 22 : 12,
+			dump_in6_addr_port(u6_addr16_src, sport, buf2, &len));
+		if (BITS32() && (len > 22))
+			len = 1;
+		mkstring(dump_in6_addr_port(u6_addr16_dest, dport, buf2, NULL),
+			len, CENTER, NULL);
+		sprintf(&buf[strlen(buf)], "%s", buf2);
+
+		break;
+	}
+}
+
+static char *
+dump_in6_addr_port(uint16_t *addr, uint16_t port, char *buf, int *len)
+{
+	sprintf(buf, "%x:%x:%x:%x:%x:%x:%x:%x-%d",
+                ntohs(addr[0]),
+                ntohs(addr[1]),
+                ntohs(addr[2]),
+                ntohs(addr[3]),
+                ntohs(addr[4]),
+                ntohs(addr[5]),
+                ntohs(addr[6]),
+                ntohs(addr[7]),
+                ntohs(port));
+
+	if (len)
+		*len = strlen(buf);
+
+	return buf;
 }
 
 
@@ -899,6 +1131,8 @@
 		fprintf(fp, "%sSTRUCT_DEVICE", others++ ? "|" : "");
 	if (net->flags & STRUCT_NET_DEVICE)
 		fprintf(fp, "%sSTRUCT_NET_DEVICE", others++ ? "|" : "");
+	if (net->flags & NO_INET_SOCK)
+		fprintf(fp, "%sNO_INET_SOCK", others++ ? "|" : "");
 	if (net->flags & SOCK_V1)
 		fprintf(fp, "%sSOCK_V1", others++ ? "|" : "");
 	if (net->flags & SOCK_V2)
@@ -972,7 +1206,7 @@
 void
 dump_sockets_workhorse(ulong task, ulong flag, struct reference *ref)
 {
-	ulong files_struct_addr = 0;
+	ulong files_struct_addr = 0, fdtable_addr = 0;
 	int max_fdset = 0;
 	int max_fds = 0;
 	ulong open_fds_addr = 0;
@@ -1004,32 +1238,54 @@
             sizeof(void *), "task files contents", FAULT_ON_ERROR);
 
         if (files_struct_addr) {
-        	readmem(files_struct_addr + OFFSET(files_struct_max_fdset), 
-                    	KVADDR, &max_fdset, sizeof(int), 
-			"files_struct max_fdset", FAULT_ON_ERROR);
-
-        	readmem(files_struct_addr + OFFSET(files_struct_max_fds), 
-                    	KVADDR, &max_fds, sizeof(int), "files_struct max_fds",
-                    	FAULT_ON_ERROR);
-    	}
+                if (VALID_MEMBER(files_struct_max_fdset)) {
+		 	readmem(files_struct_addr + OFFSET(files_struct_max_fdset),
+		          	KVADDR, &max_fdset, sizeof(int),
+				"files_struct max_fdset", FAULT_ON_ERROR);
+		      	readmem(files_struct_addr + OFFSET(files_struct_max_fds),
+        	        	KVADDR, &max_fds, sizeof(int), "files_struct max_fds",
+                	   	FAULT_ON_ERROR);
+                }
+		else if (VALID_MEMBER(files_struct_fdt)) {
+			readmem(files_struct_addr + OFFSET(files_struct_fdt), KVADDR,
+				&fdtable_addr, sizeof(void *), "fdtable buffer",
+				FAULT_ON_ERROR);
+			if (VALID_MEMBER(fdtable_max_fdset))
+		      		readmem(fdtable_addr + OFFSET(fdtable_max_fdset),
+        	         		KVADDR, &max_fdset, sizeof(int),
+					"fdtable_struct max_fdset", FAULT_ON_ERROR);
+			else
+				max_fdset = -1;
+		      	readmem(fdtable_addr + OFFSET(fdtable_max_fds),
+	      	            	KVADDR, &max_fds, sizeof(int), "fdtable_struct max_fds",
+	               	    	FAULT_ON_ERROR);
+    		}
+	}
 
-	if (!files_struct_addr || (max_fdset == 0) || (max_fds == 0)) {
+	if ((VALID_MEMBER(files_struct_fdt) && !fdtable_addr) ||
+	    !files_struct_addr || (max_fdset == 0) || (max_fds == 0)) {
 		if (!NET_REFERENCE_CHECK(ref))
 			fprintf(fp, "No open sockets.\n");
 		return;
 	}
 
-	readmem(files_struct_addr + OFFSET(files_struct_open_fds), KVADDR, 
-            	&open_fds_addr, sizeof(void *), "files_struct open_fds addr", 
-            	FAULT_ON_ERROR);
+	if (VALID_MEMBER(fdtable_open_fds)){
+		readmem(fdtable_addr + OFFSET(fdtable_open_fds), KVADDR,
+     	  		&open_fds_addr, sizeof(void *), "files_struct open_fds addr",
+	            	FAULT_ON_ERROR);
+		readmem(fdtable_addr + OFFSET(fdtable_fd), KVADDR, &fd,
+           		sizeof(void *), "files_struct fd addr", FAULT_ON_ERROR);
+	} else {
+		readmem(files_struct_addr + OFFSET(files_struct_open_fds), KVADDR,
+            		&open_fds_addr, sizeof(void *), "files_struct open_fds addr",
+	          	FAULT_ON_ERROR);
+		readmem(files_struct_addr + OFFSET(files_struct_fd), KVADDR, &fd,
+            		sizeof(void *), "files_struct fd addr", FAULT_ON_ERROR);
+	}
 
 	if (open_fds_addr) 
-        	readmem(open_fds_addr, KVADDR, &open_fds, sizeof(fd_set), 
-                    	"files_struct open_fds", FAULT_ON_ERROR);
-
-	readmem(files_struct_addr + OFFSET(files_struct_fd), KVADDR, &fd,
-            	sizeof(void *), "files_struct fd addr", FAULT_ON_ERROR);
-
+		readmem(open_fds_addr, KVADDR, &open_fds, sizeof(fd_set),
+	               	"files_struct open_fds", FAULT_ON_ERROR);
     	if (!open_fds_addr || !fd) { 
 		if (!NET_REFERENCE_CHECK(ref))
 			fprintf(fp, "No open sockets.\n");
@@ -1061,7 +1317,7 @@
     	for (;;) {
 	        unsigned long set;
 	        i = j * __NFDBITS;
-	        if ((i >= max_fdset) || (i >= max_fds))
+	        if (((max_fdset >= 0) && (i >= max_fdset)) || (i >= max_fds))
 	            	break;
 	        set = open_fds.__fds_bits[j++];
 	        while (set) {
@@ -1096,9 +1352,9 @@
  */
 
 static char *socket_hdr_32 = 
-"FD   SOCKET     SOCK    FAMILY:TYPE          SOURCE:PORT      DESTINATION:PORT";
+"FD   SOCKET     SOCK    FAMILY:TYPE          SOURCE-PORT      DESTINATION-PORT";
 static char *socket_hdr_64 = 
-"FD      SOCKET            SOCK       FAMILY:TYPE SOURCE:PORT DESTINATION:PORT";
+"FD      SOCKET            SOCK       FAMILY:TYPE SOURCE-PORT DESTINATION-PORT";
 
 static int
 sym_socket_dump(ulong file, 
@@ -1223,7 +1479,12 @@
     			dump_struct("sock", sock, 0);
 			break;
 		case SOCK_V2:
-			dump_struct("inet_sock", sock, 0);
+			if (STRUCT_EXISTS("inet_sock") && !(net->flags & NO_INET_SOCK))
+				dump_struct("inet_sock", sock, 0);
+			else if (STRUCT_EXISTS("sock"))
+				dump_struct("sock", sock, 0);
+			else
+				fprintf(fp, "\nunable to display inet_sock structure\n");
 			break;
 		}
 		break;
--- crash/ppc64.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/ppc64.c	2009-02-04 15:34:33.000000000 -0500
@@ -1,8 +1,8 @@
 /* ppc64.c -- core analysis suite
  *
- * Copyright (C) 2004, 2005 David Anderson
- * Copyright (C) 2004, 2005 Red Hat, Inc. All rights reserved.
- * Copyright (C) 2004 Haren Myneni, IBM Corporation
+ * Copyright (C) 2004, 2005, 2006, 2007, 2008 David Anderson
+ * Copyright (C) 2004, 2005, 2006, 2007, 2008 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2004, 2006 Haren Myneni, IBM Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -47,6 +47,9 @@
 static char * ppc64_check_eframe(struct ppc64_pt_regs *);
 static void ppc64_print_eframe(char *, struct ppc64_pt_regs *, 
 		struct bt_info *);
+static void parse_cmdline_arg(void);
+static void ppc64_paca_init(void);
+static void ppc64_clear_machdep_cache(void);
 
 struct machine_specific ppc64_machine_specific = { { 0 }, 0, 0 };
 
@@ -64,26 +67,53 @@
 		machdep->verify_symbol = ppc64_verify_symbol;
 		if (pc->flags & KERNEL_DEBUG_QUERY)
 			return;
-		machdep->pagesize = memory_page_size();
+		machdep->stacksize = PPC64_STACK_SIZE;
+		machdep->last_pgd_read = 0;
+                machdep->last_pmd_read = 0;
+                machdep->last_ptbl_read = 0;
+                machdep->machspec->last_level4_read = 0;
+		machdep->verify_paddr = generic_verify_paddr;
+		machdep->ptrs_per_pgd = PTRS_PER_PGD;
+		machdep->flags |= MACHDEP_BT_TEXT;
+                if (machdep->cmdline_arg)
+                        parse_cmdline_arg();
+		 machdep->clear_machdep_cache = ppc64_clear_machdep_cache;
+		break;
+
+	case PRE_GDB:
+		/*
+                * Recently there were changes made to kexec tools
+                * to support 64K page size. With those changes
+                * vmcore file obtained from a kernel which supports
+                * 64K page size cannot be analyzed using crash on a
+                * machine running with kernel supporting 4K page size
+                *
+                * The following modifications are required in crash
+                * tool to be in sync with kexec tools.
+                *
+                * Look if the following symbol exists. If yes then
+                * the dump was taken with a kernel supporting 64k
+                * page size. So change the page size accordingly.
+                *
+                * Also moved the following code block from
+                * PRE_SYMTAB case here.
+                */
+                if (symbol_exists("__hash_page_64K"))
+                        machdep->pagesize = PPC64_64K_PAGE_SIZE;
+                else
+			machdep->pagesize = memory_page_size();
 		machdep->pageshift = ffs(machdep->pagesize) - 1;
 		machdep->pageoffset = machdep->pagesize - 1;
 		machdep->pagemask = ~((ulonglong)machdep->pageoffset);
-		machdep->stacksize = 4 * machdep->pagesize;
 		if ((machdep->pgd = (char *)malloc(PAGESIZE())) == NULL)
 			error(FATAL, "cannot malloc pgd space.");
 		if ((machdep->pmd = (char *)malloc(PAGESIZE())) == NULL)
 			error(FATAL, "cannot malloc pmd space.");
 		if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL)
 			error(FATAL, "cannot malloc ptbl space.");
-		machdep->last_pgd_read = 0;
-                machdep->last_pmd_read = 0;
-                machdep->last_ptbl_read = 0;
-		machdep->verify_paddr = generic_verify_paddr;
-		machdep->ptrs_per_pgd = PTRS_PER_PGD;
-		machdep->flags |= MACHDEP_BT_TEXT;
-		break;
+		if ((machdep->machspec->level4 = (char *)malloc(PAGESIZE())) == NULL)
+			error(FATAL, "cannot malloc level4 space.");
 
-	case PRE_GDB:
 	        machdep->kvbase = symbol_value("_stext");
 		machdep->identity_map_base = machdep->kvbase;
                 machdep->is_kvaddr = generic_is_kvaddr;
@@ -109,6 +139,57 @@
 		break;
 
 	case POST_GDB:
+		if (!(machdep->flags & (VM_ORIG|VM_4_LEVEL))) {
+			if (THIS_KERNEL_VERSION >= LINUX(2,6,14)) {
+				machdep->flags |= VM_4_LEVEL;
+			} else {
+				machdep->flags |= VM_ORIG;
+			}
+		}
+		if (machdep->flags & VM_ORIG) {
+			/* pre-2.6.14 layout */
+			free(machdep->machspec->level4);
+			machdep->machspec->level4 = NULL;
+			machdep->ptrs_per_pgd = PTRS_PER_PGD;
+		} else {
+			/* 2.6.14 layout */
+			struct machine_specific *m = machdep->machspec;
+			if (machdep->pagesize == 65536) {
+				/* 64K pagesize */
+				m->l1_index_size = PTE_INDEX_SIZE_L4_64K;
+				m->l2_index_size = PMD_INDEX_SIZE_L4_64K;
+				m->l3_index_size = PUD_INDEX_SIZE_L4_64K;
+				m->l4_index_size = PGD_INDEX_SIZE_L4_64K;
+				m->pte_shift = symbol_exists("demote_segment_4k") ?
+					PTE_SHIFT_L4_64K_V2 : PTE_SHIFT_L4_64K_V1; 
+				m->l2_masked_bits = PMD_MASKED_BITS_64K;
+			} else {
+				/* 4K pagesize */
+				m->l1_index_size = PTE_INDEX_SIZE_L4_4K;
+				m->l2_index_size = PMD_INDEX_SIZE_L4_4K;
+				m->l3_index_size = PUD_INDEX_SIZE_L4_4K;
+				m->l4_index_size = PGD_INDEX_SIZE_L4_4K;
+				m->pte_shift = PTE_SHIFT_L4_4K; 
+				m->l2_masked_bits = PMD_MASKED_BITS_4K;
+			}
+
+			/* Compute ptrs per each level */
+			m->l1_shift = machdep->pageshift;
+			m->ptrs_per_l1 = (1 << m->l1_index_size);
+			m->ptrs_per_l2 = (1 << m->l2_index_size);
+			m->ptrs_per_l3 = (1 << m->l3_index_size);
+
+			machdep->ptrs_per_pgd = m->ptrs_per_l3;
+
+			/* Compute shifts */
+			m->l2_shift = m->l1_shift + m->l1_index_size;
+			m->l3_shift = m->l2_shift + m->l2_index_size;
+			m->l4_shift = m->l3_shift + m->l3_index_size;
+		}
+
+		machdep->section_size_bits = _SECTION_SIZE_BITS;
+		machdep->max_physmem_bits = _MAX_PHYSMEM_BITS;
+		ppc64_paca_init();
 		machdep->vmalloc_start = ppc64_vmalloc_start;
 		MEMBER_OFFSET_INIT(thread_struct_pg_tables,
 			"thread_struct", "pg_tables");
@@ -178,9 +259,11 @@
 			 */
 			BZERO(&machdep->machspec->hwintrstack,
 				NR_CPUS*sizeof(ulong));
-		machdep->hz = HZ;
-		if (THIS_KERNEL_VERSION >= LINUX(2,6,0))
-			machdep->hz = 1000;
+		if (!machdep->hz) {
+			machdep->hz = HZ;
+			if (THIS_KERNEL_VERSION >= LINUX(2,6,0))
+				machdep->hz = 1000;
+		}
 		/*
 		 * IRQ stacks are introduced in 2.6 and also configurable.
 		 */
@@ -223,16 +306,18 @@
 void
 ppc64_dump_machdep_table(ulong arg)
 {
-        int others; 
+        int i, c, others; 
  
         others = 0;
         fprintf(fp, "              flags: %lx (", machdep->flags);
 	if (machdep->flags & KSYMS_START)
 		fprintf(fp, "%sKSYMS_START", others++ ? "|" : "");
-	if (machdep->flags & SYSRQ)
-		fprintf(fp, "%sSYSRQ", others++ ? "|" : "");
 	if (machdep->flags & MACHDEP_BT_TEXT)
 		fprintf(fp, "%sMACHDEP_BT_TEXT", others++ ? "|" : "");
+	if (machdep->flags & VM_ORIG)
+		fprintf(fp, "%sVM_ORIG", others++ ? "|" : "");
+	if (machdep->flags & VM_4_LEVEL)
+		fprintf(fp, "%sVM_4_LEVEL", others++ ? "|" : "");
         fprintf(fp, ")\n");
 
 	fprintf(fp, "             kvbase: %lx\n", machdep->kvbase);
@@ -269,15 +354,56 @@
         fprintf(fp, "          is_kvaddr: generic_is_kvaddr()\n");
         fprintf(fp, "          is_uvaddr: generic_is_uvaddr()\n");
         fprintf(fp, "       verify_paddr: generic_verify_paddr()\n");
+	fprintf(fp, " xendump_p2m_create: NULL\n");
+	fprintf(fp, "xen_kdump_p2m_create: NULL\n");
         fprintf(fp, "  line_number_hooks: ppc64_line_number_hooks\n");
         fprintf(fp, "      last_pgd_read: %lx\n", machdep->last_pgd_read);
         fprintf(fp, "      last_pmd_read: %lx\n", machdep->last_pmd_read);
         fprintf(fp, "     last_ptbl_read: %lx\n", machdep->last_ptbl_read);
+        fprintf(fp, "clear_machdep_cache: ppc64_clear_machdep_cache()\n");
         fprintf(fp, "                pgd: %lx\n", (ulong)machdep->pgd);
         fprintf(fp, "                pmd: %lx\n", (ulong)machdep->pmd);
         fprintf(fp, "               ptbl: %lx\n", (ulong)machdep->ptbl);
 	fprintf(fp, "       ptrs_per_pgd: %d\n", machdep->ptrs_per_pgd);
+	fprintf(fp, "  section_size_bits: %ld\n", machdep->section_size_bits);
+        fprintf(fp, "   max_physmem_bits: %ld\n", machdep->max_physmem_bits);
+        fprintf(fp, "  sections_per_root: %ld\n", machdep->sections_per_root);
 	fprintf(fp, "           machspec: %lx\n", (ulong)machdep->machspec);
+	fprintf(fp, "     hwintrstack[%d]: ", NR_CPUS);
+       	for (c = 0; c < NR_CPUS; c++) {
+		for (others = 0, i = c; i < NR_CPUS; i++) {
+			if (machdep->machspec->hwintrstack[i])
+				others++;
+		}
+		if (!others) {
+			fprintf(fp, "%s%s", 
+			        c && ((c % 4) == 0) ? "\n  " : "",
+				c ? "(remainder unused)" : "(unused)");
+			break;
+		}
+
+		fprintf(fp, "%s%016lx ", 
+			((c % 4) == 0) ? "\n  " : "",
+			machdep->machspec->hwintrstack[c]);
+	}
+	fprintf(fp, "\n");
+	fprintf(fp, "           hwstackbuf: %lx\n", (ulong)machdep->machspec->hwstackbuf);
+	fprintf(fp, "          hwstacksize: %d\n", machdep->machspec->hwstacksize);
+	fprintf(fp, "               level4: %lx\n", (ulong)machdep->machspec->level4);
+	fprintf(fp, "     last_level4_read: %lx\n", (ulong)machdep->machspec->last_level4_read);
+	fprintf(fp, "        l4_index_size: %d\n", machdep->machspec->l4_index_size);
+	fprintf(fp, "        l3_index_size: %d\n", machdep->machspec->l3_index_size);
+	fprintf(fp, "        l2_index_size: %d\n", machdep->machspec->l2_index_size);
+	fprintf(fp, "        l1_index_size: %d\n", machdep->machspec->l1_index_size);
+	fprintf(fp, "          ptrs_per_l3: %d\n", machdep->machspec->ptrs_per_l3);
+	fprintf(fp, "          ptrs_per_l2: %d\n", machdep->machspec->ptrs_per_l2);
+	fprintf(fp, "          ptrs_per_l1: %d\n", machdep->machspec->ptrs_per_l1);
+	fprintf(fp, "             l4_shift: %d\n", machdep->machspec->l4_shift);
+	fprintf(fp, "             l3_shift: %d\n", machdep->machspec->l3_shift);
+	fprintf(fp, "             l2_shift: %d\n", machdep->machspec->l2_shift);
+	fprintf(fp, "             l1_shift: %d\n", machdep->machspec->l1_shift);
+	fprintf(fp, "            pte_shift: %d\n", machdep->machspec->pte_shift);
+	fprintf(fp, "       l2_masked_bits: %x\n", machdep->machspec->l2_masked_bits);
 }
 
 /*
@@ -342,7 +468,7 @@
 	if (!(pte & _PAGE_PRESENT)) {
 		if (pte && verbose) {
 			fprintf(fp, "\n");
-			ppc64_translate_pte(pte, 0, 0);
+			ppc64_translate_pte(pte, 0, PTE_SHIFT);
 		}
 		return FALSE;
 	}
@@ -354,7 +480,90 @@
 
 	if (verbose) {
 		fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr));
-		ppc64_translate_pte(pte, 0, 0);
+		ppc64_translate_pte(pte, 0, PTE_SHIFT);
+	}
+
+	return TRUE;
+}
+
+/*
+ * Virtual to physical memory translation. This function will be called
+ * by both ppc64_kvtop and ppc64_uvtop.
+ */
+static int
+ppc64_vtop_level4(ulong vaddr, ulong *level4, physaddr_t *paddr, int verbose)
+{
+	ulong *level4_dir;
+	ulong *page_dir;
+	ulong *page_middle;
+	ulong *page_table;
+	ulong level4_pte, pgd_pte, pmd_pte;
+	ulong pte;
+
+	if (verbose)
+		fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)level4);
+
+	level4_dir = (ulong *)((ulong *)level4 + L4_OFFSET(vaddr));
+	FILL_L4(PAGEBASE(level4), KVADDR, PAGESIZE());
+	level4_pte = ULONG(machdep->machspec->level4 + PAGEOFFSET(level4_dir));
+	if (verbose)
+		fprintf(fp, "  L4: %lx => %lx\n", (ulong)level4_dir, level4_pte);
+	if (!level4_pte)
+		return FALSE;
+
+	/* Sometimes we don't have level3 pagetable entries */
+	if (machdep->machspec->l3_index_size != 0) {
+		page_dir = (ulong *)((ulong *)level4_pte + PGD_OFFSET_L4(vaddr));
+		FILL_PGD(PAGEBASE(level4_pte), KVADDR, PAGESIZE());
+		pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir));
+
+		if (verbose)
+			fprintf(fp, "  PGD: %lx => %lx\n", (ulong)page_dir, pgd_pte);
+		if (!pgd_pte)
+			return FALSE;
+	} else {
+		pgd_pte = level4_pte;
+	}
+
+	page_middle = (ulong *)((ulong *)pgd_pte + PMD_OFFSET_L4(vaddr));
+	FILL_PMD(PAGEBASE(pgd_pte), KVADDR, PAGESIZE());
+	pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle));
+
+	if (verbose)
+		fprintf(fp, "  PMD: %lx => %lx\n", (ulong)page_middle, pmd_pte);
+
+	if (!(pmd_pte))
+		return FALSE;
+
+	page_table = (ulong *)(pmd_pte & ~(machdep->machspec->l2_masked_bits))
+			 + (BTOP(vaddr) & (machdep->machspec->ptrs_per_l1 - 1));
+	if (verbose)
+		fprintf(fp, "  PMD: %lx => %lx\n",(ulong)page_middle,
+			(ulong)page_table);
+
+	FILL_PTBL(PAGEBASE(pmd_pte), KVADDR, PAGESIZE());
+	pte = ULONG(machdep->ptbl + PAGEOFFSET(page_table));
+
+	if (verbose)
+		fprintf(fp, "  PTE: %lx => %lx\n", (ulong)page_table, pte);
+
+	if (!(pte & _PAGE_PRESENT)) {
+		if (pte && verbose) {
+			fprintf(fp, "\n");
+			ppc64_translate_pte(pte, 0, machdep->machspec->pte_shift);
+		}
+		return FALSE;
+	}
+
+	if (!pte)
+		return FALSE;
+
+	*paddr = PAGEBASE(PTOB(pte >> machdep->machspec->pte_shift)) 
+			+ PAGEOFFSET(vaddr);
+
+	if (verbose) {
+		fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr));
+		ppc64_translate_pte(pte, 0, machdep->machspec->pte_shift);
 	}
 
 	return TRUE;
@@ -411,7 +620,10 @@
 				FAULT_ON_ERROR);
 	}
 
-	return ppc64_vtop(vaddr, pgd, paddr, verbose);
+	if (machdep->flags & VM_4_LEVEL)
+		return ppc64_vtop_level4(vaddr, pgd, paddr, verbose);
+	else
+		return ppc64_vtop(vaddr, pgd, paddr, verbose);
 }
 
 /*
@@ -436,7 +648,10 @@
 			return TRUE;
 	}
 
-	return ppc64_vtop(kvaddr, (ulong *)vt->kernel_pgd[0], paddr, verbose);
+	if (machdep->flags & VM_4_LEVEL)
+		return ppc64_vtop_level4(kvaddr, (ulong *)vt->kernel_pgd[0], paddr, verbose);
+	else
+		return ppc64_vtop(kvaddr, (ulong *)vt->kernel_pgd[0], paddr, verbose);
 }
 
 /*
@@ -478,8 +693,9 @@
         ulong res, value, ppc_md, md_setup_res;
         ulong we_have_of, prep_setup_res;
         ulong node, type, name, properties;
-        char str_buf[16];
-        uint len, mhz = 0;
+	char str_buf[32];
+	uint len;
+	ulong mhz = 0;
 
         if (machdep->mhz)
                 return(machdep->mhz);
@@ -549,6 +765,23 @@
                                         mhz /= 1000000;
                                         break;
                                 }
+				else if(len && (strcasecmp(str_buf,
+				    "ibm,extended-clock-frequency") == 0)){
+					/* found the right cpu property */
+
+					readmem(properties+
+					    OFFSET(property_value),
+					    KVADDR, &value, sizeof(ulong),
+					    "clock freqency pointer",
+					    FAULT_ON_ERROR);
+					readmem(value, KVADDR, &mhz,
+					    sizeof(ulong),
+					    "clock frequency value",
+					    FAULT_ON_ERROR);
+					mhz /= 1000000;
+					break;
+                                }
+
                                 /* keep looking */
 
                                 readmem(properties+
@@ -657,7 +890,7 @@
  *  If a physaddr pointer is passed in, don't print anything.
  */
 static int
-ppc64_translate_pte(ulong pte, void *physaddr, ulonglong unused)
+ppc64_translate_pte(ulong pte, void *physaddr, ulonglong pte_shift)
 {
         int c, len1, len2, len3, others, page_present;
         char buf[BUFSIZE];
@@ -668,7 +901,7 @@
         char *arglist[MAXARGS];
         ulong paddr;
 
-        paddr =  PTOB(pte >> PTE_SHIFT);
+        paddr =  PTOB(pte >> pte_shift);
         page_present = (pte & _PAGE_PRESENT);
 
         if (physaddr) {
@@ -917,6 +1150,9 @@
 {
 	int c;
 	
+	if (!(tt->flags & IRQSTACKS))
+		return 0;
+
 	for (c = 0; c < NR_CPUS; c++) {
                 if (tt->hardirq_ctx[c]) {
 			if ((addr >= tt->hardirq_ctx[c]) &&
@@ -1034,8 +1270,12 @@
 				ms->hwstacksize + STACK_FRAME_OVERHEAD;
 			bt->stackbuf = ms->hwstackbuf;
 			alter_stackbuf(bt);
-		} else 
-			error(FATAL, "cannot find the stack info");
+		} else {
+			if (CRASHDEBUG(1)) {
+				fprintf(fp, "cannot find the stack info.\n");
+			}
+			return;
+		}
 	}
 	
 		
@@ -1270,20 +1510,11 @@
 	return NULL;
 }
 
-/*
- *  Print exception frame information for ppc64
- */
 static void
-ppc64_print_eframe(char *efrm_str, struct ppc64_pt_regs *regs,
-		struct bt_info *bt)
+ppc64_print_regs(struct ppc64_pt_regs *regs)
 {
 	int i;
 
-	if (BT_REFERENCE_CHECK(bt))
-		return;
-
-        fprintf(fp, " %s  [%lx] exception frame:", efrm_str, regs->trap);
-
         /* print out the gprs... */
         for(i=0; i<32; i++) {
                 if(!(i % 3))
@@ -1315,9 +1546,78 @@
         fprintf(fp, "DAR: %016lx\n", regs->dar);
         fprintf(fp, " DSISR: %016lx ", regs->dsisr);
         fprintf(fp, "    Syscall Result: %016lx\n", regs->result);
+}
+
+/*
+ * Print the exception frame information
+ */
+static void
+ppc64_print_eframe(char *efrm_str, struct ppc64_pt_regs *regs,
+			struct bt_info *bt)
+{
+	if (BT_REFERENCE_CHECK(bt))
+		return;
+
+	fprintf(fp, " %s  [%lx] exception frame:", efrm_str, regs->trap);
+	ppc64_print_regs(regs);
 	fprintf(fp, "\n");
 }
 
+/*
+ * get SP and IP from the saved ptregs.
+ */
+static int
+ppc64_kdump_stack_frame(struct bt_info *bt_in, ulong *nip, ulong *ksp)
+{
+	struct ppc64_pt_regs *pt_regs;
+	unsigned long unip;
+
+	pt_regs = (struct ppc64_pt_regs *)bt_in->machdep;
+	if (!pt_regs->gpr[1]) {
+		/*
+		 * Not collected regs. May be the corresponding CPU not
+		 * responded to an IPI.
+		 */
+		fprintf(fp, "%0lx: GPR1 register value (SP) was not saved\n",
+			bt_in->task);
+		return FALSE;
+	}
+	*ksp = pt_regs->gpr[1];
+	if (IS_KVADDR(*ksp)) {
+		readmem(*ksp+16, KVADDR, &unip, sizeof(ulong), "Regs NIP value",
+			FAULT_ON_ERROR);
+		*nip = unip;
+	} else {
+		if (IN_TASK_VMA(bt_in->task, *ksp))
+			fprintf(fp, "%0lx: Task is running in user space\n",
+				bt_in->task);
+		else 
+			fprintf(fp, "%0lx: Invalid Stack Pointer %0lx\n",
+				bt_in->task, *ksp);
+		*nip = pt_regs->nip;
+	}
+
+	if (bt_in->flags && 
+	((BT_TEXT_SYMBOLS|BT_TEXT_SYMBOLS_PRINT|BT_TEXT_SYMBOLS_NOPRINT))) 
+		return TRUE;
+
+	/*
+	 * Print the collected regs for the active task
+	 */
+	ppc64_print_regs(pt_regs);
+	if (!IS_KVADDR(*ksp)) 
+		return FALSE;
+	
+	fprintf(fp, " NIP [%016lx] %s\n", pt_regs->nip,
+		closest_symbol(pt_regs->nip));
+	if (unip != pt_regs->link)
+		fprintf(fp, " LR  [%016lx] %s\n", pt_regs->link,
+			closest_symbol(pt_regs->link));
+
+	fprintf(fp, "\n");
+
+	return TRUE;
+}
 
 /*
  *  Get the starting point for the active cpus in a diskdump/netdump.
@@ -1335,12 +1635,18 @@
         ulong ur_ksp = 0;
 	int check_hardirq, check_softirq;
 	int check_intrstack = TRUE;
+	struct ppc64_pt_regs *pt_regs;
+
+	/* 
+	 * For the kdump vmcore, Use SP and IP values that are saved in ptregs.
+	 */ 
+	if (pc->flags & KDUMP)
+		return ppc64_kdump_stack_frame(bt_in, nip, ksp);
 
         bt = &bt_local;
         BCOPY(bt_in, bt, sizeof(struct bt_info));
         ms = machdep->machspec;
         ur_nip = ur_ksp = 0;
-	struct ppc64_pt_regs *pt_regs;
 	
 	panic_task = tt->panic_task == bt->task ? TRUE : FALSE;
 
@@ -1424,6 +1730,7 @@
                 if (STREQ(sym, ".netconsole_netdump") || 
 			STREQ(sym, ".netpoll_start_netdump") ||
 		 	STREQ(sym, ".start_disk_dump") ||
+		 	STREQ(sym, ".crash_kexec") ||
 			STREQ(sym, ".disk_dump")) {
                         *nip = *up;
                         *ksp = bt->stackbase + 
@@ -1853,7 +2160,7 @@
                 fprintf(fp, "(unknown)\n");
         fprintf(fp, "                 HZ: %d\n", machdep->hz);
         fprintf(fp, "          PAGE SIZE: %d\n", PAGESIZE());
-        fprintf(fp, "      L1 CACHE SIZE: %d\n", l1_cache_size());
+//      fprintf(fp, "      L1 CACHE SIZE: %d\n", l1_cache_size());
         fprintf(fp, "KERNEL VIRTUAL BASE: %lx\n", machdep->kvbase);
         fprintf(fp, "KERNEL VMALLOC BASE: %lx\n", vt->vmalloc_start);
         fprintf(fp, "  KERNEL STACK SIZE: %ld\n", STACKSIZE());
@@ -2000,4 +2307,145 @@
 	ppc64_dump_line_number(0);
 }
 
+/*
+ *  Force the VM address-range selection via:
+ *
+ *   --machdep vm=orig 
+ *   --machdep vm=2.6.14
+ */
+
+void
+parse_cmdline_arg(void)
+{
+	int i, c, errflag;
+	char *p;
+	char buf[BUFSIZE];
+	char *arglist[MAXARGS];
+	int lines = 0;
+
+	if (!strstr(machdep->cmdline_arg, "=")) {
+		error(WARNING, "ignoring --machdep option: %s\n\n",
+			machdep->cmdline_arg);
+		return;
+        }
+
+	strcpy(buf, machdep->cmdline_arg);
+
+	for (p = buf; *p; p++) {
+		if (*p == ',')
+			 *p = ' ';
+	}
+
+	c = parse_line(buf, arglist);
+
+	for (i = 0; i < c; i++) {
+		errflag = 0;
+
+		if (STRNEQ(arglist[i], "vm=")) {
+			p = arglist[i] + strlen("vm=");
+			if (strlen(p)) {
+				if (STREQ(p, "orig")) {
+					machdep->flags |= VM_ORIG;
+					continue;
+				} else if (STREQ(p, "2.6.14")) {
+					machdep->flags |= VM_4_LEVEL;
+					continue;
+				}
+			}
+		}
+
+		error(WARNING, "ignoring --machdep option: %s\n", arglist[i]);
+		lines++;
+	} 
+
+	switch (machdep->flags & (VM_ORIG|VM_4_LEVEL))
+	{
+	case VM_ORIG:
+		error(NOTE, "using original PPC64 VM address ranges\n");
+		lines++;
+		break;
+
+	case VM_4_LEVEL:
+		error(NOTE, "using 4-level pagetable PPC64 VM address ranges\n");
+		lines++;
+		break;
+
+	case (VM_ORIG|VM_4_LEVEL):
+		error(WARNING, "cannot set both vm=orig and vm=2.6.14\n");
+		lines++;
+		machdep->flags &= ~(VM_ORIG|VM_4_LEVEL);
+		break;
+	} 
+
+	if (lines)
+		fprintf(fp, "\n");
+}
+
+/*
+ *  Updating any smp-related items that were possibly bypassed
+ *  or improperly initialized in kernel_init().
+ */
+static void
+ppc64_paca_init(void)
+{
+	int i, cpus, nr_paca;
+	char *cpu_paca_buf;
+	ulong data_offset;
+	int map;
+
+	if (!symbol_exists("paca"))
+		error(FATAL, "PPC64: Could not find 'paca' symbol\n");
+
+	if (symbol_exists("cpu_present_map"))
+		map = PRESENT;
+	else if (symbol_exists("cpu_online_map"))
+		map = ONLINE;
+	else
+		error(FATAL, 
+		    "PPC64: cannot find 'cpu_present_map' or 'cpu_online_map' symbols\n");
+
+	if (!MEMBER_EXISTS("paca_struct", "data_offset"))
+		return;
+	
+	STRUCT_SIZE_INIT(ppc64_paca, "paca_struct");
+	data_offset = MEMBER_OFFSET("paca_struct", "data_offset");
+
+	cpu_paca_buf = GETBUF(SIZE(ppc64_paca));
+
+	if (!(nr_paca = get_array_length("paca", NULL, 0))) 
+		nr_paca = NR_CPUS;
+
+	if (nr_paca > NR_CPUS) {
+		error(WARNING, 
+			"PPC64: Number of paca entries (%d) greater than NR_CPUS (%d)\n", 
+			nr_paca, NR_CPUS);
+		error(FATAL, "Recompile crash with larger NR_CPUS\n");
+	}
+	
+	for (i = cpus = 0; i < nr_paca; i++) {
+		/*
+		 * CPU present (or online)?
+		 */
+		if (!in_cpu_map(map, i))
+			continue;
+
+        	readmem(symbol_value("paca") + (i * SIZE(ppc64_paca)),
+             		KVADDR, cpu_paca_buf, SIZE(ppc64_paca),
+			"paca entry", FAULT_ON_ERROR);
+
+		kt->__per_cpu_offset[i] = ULONG(cpu_paca_buf + data_offset);
+		kt->flags |= PER_CPU_OFF;
+		cpus++;
+	}
+	kt->cpus = cpus;
+	if (kt->cpus > 1)
+		kt->flags |= SMP;
+}
+
+void
+ppc64_clear_machdep_cache(void)
+{
+	if (machdep->machspec->last_level4_read != vt->kernel_pgd[0])
+        	machdep->machspec->last_level4_read = 0;
+}
 #endif /* PPC64 */ 
--- crash/memory.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/memory.c	2009-02-12 08:36:21.000000000 -0500
@@ -1,8 +1,8 @@
 /* memory.c - core analysis suite
  *
  * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc.
- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson
- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 David Anderson
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 Red Hat, Inc. All rights reserved.
  * Copyright (C) 2002 Silicon Graphics, Inc.
  *
  * This program is free software; you can redistribute it and/or modify
@@ -35,34 +35,47 @@
 	ulong order;
 	ulong slabsize;
         ulong num_slabs;
+	ulong objects;
         ulonglong spec_addr;
         ulong flags;
 	ulong size;
+	ulong objsize;
 	int memtype;
 	int free;
+	int slab_offset;
         char *reqname;
 	char *curname;
 	ulong *addrlist;
 	int *kmem_bufctl;
 	ulong *cpudata[NR_CPUS];
+	ulong *shared_array_cache;
+	int current_cache_index;
 	ulong found;
 	ulong retval;
 	char *ignore;
 	int errors;
 	int calls;
 	int cpu;
+	int cache_count;
 	ulong get_shared;
 	ulong get_totalram;
 	ulong get_buffers;
 	ulong get_slabs;
 	char *slab_buf;
 	char *cache_buf;
+	ulong *cache_list;
+	struct vmlist {
+		ulong addr;
+		ulong size;
+	} *vmlist;
+	ulong container;
 };
 
 static char *memtype_string(int, int);
 static char *error_handle_string(ulong);
 static void dump_mem_map(struct meminfo *);
-static void fill_mem_map_cache(ulong, char *);
+static void dump_mem_map_SPARSEMEM(struct meminfo *);
+static void fill_mem_map_cache(ulong, ulong, char *);
 static void dump_free_pages(struct meminfo *);
 static int dump_zone_page_usage(void);
 static void dump_multidimensional_free_pages(struct meminfo *);
@@ -72,19 +85,29 @@
 static void dump_page_hash_table(struct meminfo *);
 static void kmem_search(struct meminfo *);
 static void kmem_cache_init(void);
+static void kmem_cache_init_slub(void);
 static ulong max_cpudata_limit(ulong, ulong *);
 static int ignore_cache(struct meminfo *, char *);
 static char *is_kmem_cache_addr(ulong, char *);
+static char *is_kmem_cache_addr_slub(ulong, char *);
 static void kmem_cache_list(void);
 static void dump_kmem_cache(struct meminfo *);
 static void dump_kmem_cache_percpu_v1(struct meminfo *);
 static void dump_kmem_cache_percpu_v2(struct meminfo *);
+static void dump_kmem_cache_slub(struct meminfo *);
 static void dump_kmem_cache_info_v2(struct meminfo *);
-static char *vaddr_to_kmem_cache(ulong, char *);
+static void kmem_cache_list_slub(void);
+static ulong get_cpu_slab_ptr(struct meminfo *, int, ulong *);
+static unsigned int oo_order(ulong);
+static unsigned int oo_objects(ulong);
+static char *vaddr_to_kmem_cache(ulong, char *, int);
 static ulong vaddr_to_slab(ulong);
 static void do_slab_chain(int, struct meminfo *);
 static void do_slab_chain_percpu_v1(long, struct meminfo *);
 static void do_slab_chain_percpu_v2(long, struct meminfo *);
+static void do_slab_chain_percpu_v2_nodes(long, struct meminfo *);
+static void do_slab_slub(struct meminfo *, int);
+static void do_kmem_cache_slub(struct meminfo *);
 static void save_slab_data(struct meminfo *);
 static int slab_data_saved(struct meminfo *);
 static void dump_saved_slab_data(void);
@@ -97,7 +120,9 @@
 static void gather_slab_free_list_percpu(struct meminfo *);
 static void gather_cpudata_list_v1(struct meminfo *);
 static void gather_cpudata_list_v2(struct meminfo *);
+static void gather_cpudata_list_v2_nodes(struct meminfo *, int);
 static int check_cpudata_list(struct meminfo *, ulong);
+static int check_shared_list(struct meminfo *, ulong);
 static void gather_slab_cached_count(struct meminfo *);
 static void dump_slab_objects(struct meminfo *);
 static void dump_slab_objects_percpu(struct meminfo *);
@@ -110,6 +135,9 @@
 static void search(ulong, ulong, ulong, int, ulong *, int);
 static int next_upage(struct task_context *, ulong, ulong *);
 static int next_kpage(ulong, ulong *);
+static ulong last_vmalloc_address(void);
+static ulong next_vmlist_vaddr(ulong);
+static int next_identity_mapping(ulong, ulong *);
 static int vm_area_page_dump(ulong, ulong, ulong, ulong, void *, 
 	struct reference *);
 static int dump_swap_info(ulong, ulong *, ulong *);
@@ -118,15 +146,45 @@
 static char *vma_file_offset(ulong, ulong, char *);
 static ssize_t read_dev_kmem(ulong, char *, long);
 static void dump_memory_nodes(int);
+static void dump_zone_stats(void);
 #define MEMORY_NODES_DUMP       (0)
 #define MEMORY_NODES_INITIALIZE (1)
 static void node_table_init(void);
 static int compare_node_data(const void *, const void *);
 static void do_vm_flags(ulong);
 static void PG_reserved_flag_init(void);
+static void PG_slab_flag_init(void);
 static ulong nr_blockdev_pages(void);
-
-
+void sparse_mem_init(void);
+void dump_mem_sections(void);
+void list_mem_sections(void);
+ulong sparse_decode_mem_map(ulong, ulong);
+char *read_mem_section(ulong);
+ulong nr_to_section(ulong);
+int valid_section(ulong);
+int section_has_mem_map(ulong);
+ulong section_mem_map_addr(ulong);
+ulong valid_section_nr(ulong);
+ulong pfn_to_map(ulong);
+static int get_nodes_online(void);
+static int next_online_node(int);
+static ulong next_online_pgdat(int);
+static int vm_stat_init(void);
+static int vm_event_state_init(void);
+static int dump_vm_stat(char *, long *, ulong);
+static int dump_vm_event_state(void);
+static int dump_page_states(void);
+static int generic_read_dumpfile(ulonglong, void *, long, char *, ulong);
+static int generic_write_dumpfile(ulonglong, void *, long, char *, ulong);
+static int page_to_nid(ulong);
+static int get_kmem_cache_list(ulong **);
+static int get_kmem_cache_slub_data(long, struct meminfo *);
+static ulong compound_head(ulong);
+static long count_partial(ulong);
+static ulong get_freepointer(struct meminfo *, void *);
+static int count_free_objects(struct meminfo *, ulong);
+char *is_slab_page(struct meminfo *, char *);
+static void do_node_lists_slub(struct meminfo *, ulong, int);
 
 /*
  *  Memory display modes specific to this file.
@@ -142,6 +200,8 @@
 #define DECIMAL        (0x100)
 #define UDECIMAL       (0x200)
 #define ASCII_ENDLINE  (0x400)
+#define NO_ASCII       (0x800)
+#define SLAB_CACHE    (0x1000)
 
 static ulong DISPLAY_DEFAULT;
 
@@ -182,6 +242,10 @@
         MEMBER_OFFSET_INIT(mm_struct_mmap, "mm_struct", "mmap");
         MEMBER_OFFSET_INIT(mm_struct_pgd, "mm_struct", "pgd");
 	MEMBER_OFFSET_INIT(mm_struct_rss, "mm_struct", "rss");
+	if (!VALID_MEMBER(mm_struct_rss))
+		MEMBER_OFFSET_INIT(mm_struct_rss, "mm_struct", "_rss");
+	MEMBER_OFFSET_INIT(mm_struct_anon_rss, "mm_struct", "_anon_rss");
+	MEMBER_OFFSET_INIT(mm_struct_file_rss, "mm_struct", "_file_rss");
 	MEMBER_OFFSET_INIT(mm_struct_total_vm, "mm_struct", "total_vm");
 	MEMBER_OFFSET_INIT(mm_struct_start_code, "mm_struct", "start_code");
         MEMBER_OFFSET_INIT(vm_area_struct_vm_mm, "vm_area_struct", "vm_mm");
@@ -222,7 +286,16 @@
 		MEMBER_OFFSET_INIT(page_count, "page", "_count");
 	MEMBER_OFFSET_INIT(page_flags, "page", "flags");
         MEMBER_OFFSET_INIT(page_mapping, "page", "mapping");
+	if (INVALID_MEMBER(page_mapping))
+		ANON_MEMBER_OFFSET_INIT(page_mapping, "page", "mapping");
+	if (INVALID_MEMBER(page_mapping) && 
+	    (THIS_KERNEL_VERSION < LINUX(2,6,17)) &&
+	    MEMBER_EXISTS("page", "_mapcount"))
+		ASSIGN_OFFSET(page_mapping) = MEMBER_OFFSET("page", "_mapcount") +
+			STRUCT_SIZE("atomic_t") + sizeof(ulong);
         MEMBER_OFFSET_INIT(page_index, "page", "index");
+	if (INVALID_MEMBER(page_index))
+		ANON_MEMBER_OFFSET_INIT(page_index, "page", "index");
         MEMBER_OFFSET_INIT(page_buffers, "page", "buffers");
 	MEMBER_OFFSET_INIT(page_lru, "page", "lru");
 	MEMBER_OFFSET_INIT(page_pte, "page", "pte");
@@ -249,6 +322,9 @@
 	MEMBER_OFFSET_INIT(block_device_bd_disk, "block_device", "bd_disk");
 	MEMBER_OFFSET_INIT(inode_i_mapping, "inode", "i_mapping");
 	MEMBER_OFFSET_INIT(address_space_nrpages, "address_space", "nrpages");
+	if (INVALID_MEMBER(address_space_nrpages))
+		MEMBER_OFFSET_INIT(address_space_nrpages, "address_space", "__nrpages");
+
 	MEMBER_OFFSET_INIT(gendisk_major, "gendisk", "major");
 	MEMBER_OFFSET_INIT(gendisk_fops, "gendisk", "fops");
 	MEMBER_OFFSET_INIT(gendisk_disk_name, "gendisk", "disk_name");
@@ -270,6 +346,7 @@
 	STRUCT_SIZE_INIT(kmem_slab_s, "kmem_slab_s");
 	STRUCT_SIZE_INIT(slab_s, "slab_s");
 	STRUCT_SIZE_INIT(slab, "slab");
+	STRUCT_SIZE_INIT(kmem_cache_s, "kmem_cache_s");
 	STRUCT_SIZE_INIT(pgd_t, "pgd_t");
 
         if (!VALID_STRUCT(kmem_slab_s) && VALID_STRUCT(slab_s)) {
@@ -310,17 +387,49 @@
 		   !VALID_STRUCT(slab_s) && VALID_STRUCT(slab)) {
                 vt->flags |= PERCPU_KMALLOC_V2;
 
-		MEMBER_OFFSET_INIT(kmem_cache_s_num, "kmem_cache_s", "num");
-		MEMBER_OFFSET_INIT(kmem_cache_s_next, "kmem_cache_s", "next");
-		MEMBER_OFFSET_INIT(kmem_cache_s_name, "kmem_cache_s", "name");
-		MEMBER_OFFSET_INIT(kmem_cache_s_colour_off, "kmem_cache_s", 
-			"colour_off");
-		MEMBER_OFFSET_INIT(kmem_cache_s_objsize,  "kmem_cache_s", 
-			"objsize");
-		MEMBER_OFFSET_INIT(kmem_cache_s_flags, "kmem_cache_s", "flags");
-		MEMBER_OFFSET_INIT(kmem_cache_s_gfporder,  
-			"kmem_cache_s", "gfporder");
-
+		if (VALID_STRUCT(kmem_cache_s)) {
+			MEMBER_OFFSET_INIT(kmem_cache_s_num, "kmem_cache_s", "num");
+			MEMBER_OFFSET_INIT(kmem_cache_s_next, "kmem_cache_s", "next");
+			MEMBER_OFFSET_INIT(kmem_cache_s_name, "kmem_cache_s", "name");
+			MEMBER_OFFSET_INIT(kmem_cache_s_colour_off, "kmem_cache_s", 
+				"colour_off");
+			MEMBER_OFFSET_INIT(kmem_cache_s_objsize,  "kmem_cache_s", 
+				"objsize");
+			MEMBER_OFFSET_INIT(kmem_cache_s_flags, "kmem_cache_s", "flags");
+			MEMBER_OFFSET_INIT(kmem_cache_s_gfporder,  
+				"kmem_cache_s", "gfporder");
+
+			MEMBER_OFFSET_INIT(kmem_cache_s_lists, "kmem_cache_s", "lists");
+			MEMBER_OFFSET_INIT(kmem_cache_s_array, "kmem_cache_s", "array");
+			ARRAY_LENGTH_INIT(len, NULL, "kmem_cache_s.array", NULL, 0);
+		} else {
+			STRUCT_SIZE_INIT(kmem_cache_s, "kmem_cache");
+			MEMBER_OFFSET_INIT(kmem_cache_s_num, "kmem_cache", "num");
+			MEMBER_OFFSET_INIT(kmem_cache_s_next, "kmem_cache", "next");
+			MEMBER_OFFSET_INIT(kmem_cache_s_name, "kmem_cache", "name");
+			MEMBER_OFFSET_INIT(kmem_cache_s_colour_off, "kmem_cache", 
+				"colour_off");
+			if (MEMBER_EXISTS("kmem_cache", "objsize"))
+				MEMBER_OFFSET_INIT(kmem_cache_s_objsize, "kmem_cache", 
+					"objsize");
+			else if (MEMBER_EXISTS("kmem_cache", "buffer_size"))
+				MEMBER_OFFSET_INIT(kmem_cache_s_objsize, "kmem_cache", 
+					"buffer_size");
+			MEMBER_OFFSET_INIT(kmem_cache_s_flags, "kmem_cache", "flags");
+			MEMBER_OFFSET_INIT(kmem_cache_s_gfporder,  
+				"kmem_cache", "gfporder");
+
+			if (MEMBER_EXISTS("kmem_cache", "lists"))
+				MEMBER_OFFSET_INIT(kmem_cache_s_lists, "kmem_cache", "lists");
+			else if (MEMBER_EXISTS("kmem_cache", "nodelists")) {
+                		vt->flags |= PERCPU_KMALLOC_V2_NODES;
+				MEMBER_OFFSET_INIT(kmem_cache_s_lists, "kmem_cache", "nodelists");
+				ARRAY_LENGTH_INIT(vt->kmem_cache_len_nodes, NULL, 
+					"kmem_cache.nodelists", NULL, 0);
+			}
+			MEMBER_OFFSET_INIT(kmem_cache_s_array, "kmem_cache", "array");
+			ARRAY_LENGTH_INIT(len, NULL, "kmem_cache.array", NULL, 0);
+		}
 		MEMBER_OFFSET_INIT(slab_list, "slab", "list");
 		MEMBER_OFFSET_INIT(slab_s_mem, "slab", "s_mem");
 		MEMBER_OFFSET_INIT(slab_inuse, "slab", "inuse");
@@ -330,10 +439,6 @@
 		MEMBER_OFFSET_INIT(array_cache_limit, "array_cache", "limit");
 		STRUCT_SIZE_INIT(array_cache, "array_cache");
 
-		MEMBER_OFFSET_INIT(kmem_cache_s_lists, "kmem_cache_s", "lists");
-		MEMBER_OFFSET_INIT(kmem_cache_s_array, "kmem_cache_s", "array");
-                ARRAY_LENGTH_INIT(len, NULL, "kmem_cache_s.array", NULL, 0);
-
 		MEMBER_OFFSET_INIT(kmem_list3_slabs_partial, 
 			"kmem_list3", "slabs_partial");
 		MEMBER_OFFSET_INIT(kmem_list3_slabs_full, 
@@ -343,6 +448,52 @@
 		MEMBER_OFFSET_INIT(kmem_list3_free_objects, 
 			"kmem_list3", "free_objects");
 		MEMBER_OFFSET_INIT(kmem_list3_shared, "kmem_list3", "shared");
+	} else if (MEMBER_EXISTS("kmem_cache", "cpu_slab") &&
+		STRUCT_EXISTS("kmem_cache_node")) {
+		vt->flags |= KMALLOC_SLUB;
+
+		STRUCT_SIZE_INIT(kmem_cache, "kmem_cache");
+		MEMBER_OFFSET_INIT(kmem_cache_size, "kmem_cache", "size");
+		MEMBER_OFFSET_INIT(kmem_cache_objsize, "kmem_cache", "objsize");
+		MEMBER_OFFSET_INIT(kmem_cache_offset, "kmem_cache", "offset");
+		MEMBER_OFFSET_INIT(kmem_cache_order, "kmem_cache", "order");
+		MEMBER_OFFSET_INIT(kmem_cache_local_node, "kmem_cache", "local_node");
+		MEMBER_OFFSET_INIT(kmem_cache_objects, "kmem_cache", "objects");
+		MEMBER_OFFSET_INIT(kmem_cache_inuse, "kmem_cache", "inuse");
+		MEMBER_OFFSET_INIT(kmem_cache_align, "kmem_cache", "align");
+		MEMBER_OFFSET_INIT(kmem_cache_node, "kmem_cache", "node");
+		MEMBER_OFFSET_INIT(kmem_cache_cpu_slab, "kmem_cache", "cpu_slab");
+		MEMBER_OFFSET_INIT(kmem_cache_list, "kmem_cache", "list");
+		MEMBER_OFFSET_INIT(kmem_cache_name, "kmem_cache", "name");
+		MEMBER_OFFSET_INIT(kmem_cache_flags, "kmem_cache", "flags");
+		MEMBER_OFFSET_INIT(kmem_cache_cpu_freelist, "kmem_cache_cpu", "freelist");
+		MEMBER_OFFSET_INIT(kmem_cache_cpu_page, "kmem_cache_cpu", "page");
+		MEMBER_OFFSET_INIT(kmem_cache_cpu_node, "kmem_cache_cpu", "node");
+		ANON_MEMBER_OFFSET_INIT(page_inuse, "page", "inuse");
+		ANON_MEMBER_OFFSET_INIT(page_offset, "page", "offset");
+		ANON_MEMBER_OFFSET_INIT(page_slab, "page", "slab");
+		ANON_MEMBER_OFFSET_INIT(page_first_page, "page", "first_page");
+		ANON_MEMBER_OFFSET_INIT(page_freelist, "page", "freelist");
+		if (INVALID_MEMBER(kmem_cache_objects)) {
+			MEMBER_OFFSET_INIT(kmem_cache_oo, "kmem_cache", "oo");
+			ANON_MEMBER_OFFSET_INIT(page_objects, "page", "objects");
+		}
+		if (VALID_MEMBER(kmem_cache_node)) {
+                	ARRAY_LENGTH_INIT(len, NULL, "kmem_cache.node", NULL, 0);
+			vt->flags |= CONFIG_NUMA;
+		}
+                ARRAY_LENGTH_INIT(len, NULL, "kmem_cache.cpu_slab", NULL, 0);
+
+		STRUCT_SIZE_INIT(kmem_cache_node, "kmem_cache_node");
+		STRUCT_SIZE_INIT(kmem_cache_cpu, "kmem_cache_cpu");
+		MEMBER_OFFSET_INIT(kmem_cache_node_nr_partial, 
+			"kmem_cache_node", "nr_partial");
+		MEMBER_OFFSET_INIT(kmem_cache_node_nr_slabs, 
+			"kmem_cache_node", "nr_slabs");
+		MEMBER_OFFSET_INIT(kmem_cache_node_partial, 
+			"kmem_cache_node", "partial");
+		MEMBER_OFFSET_INIT(kmem_cache_node_full, 
+			"kmem_cache_node", "full");
 	} else {
 		MEMBER_OFFSET_INIT(kmem_cache_s_c_nextp,  
 			"kmem_cache_s", "c_nextp");
@@ -381,6 +532,22 @@
 			"kmem_slab_s", "s_magic");
 	}
 
+	if (!kt->kernel_NR_CPUS) {
+		if (ARRAY_LENGTH(kmem_cache_s_cpudata))
+			kt->kernel_NR_CPUS = ARRAY_LENGTH(kmem_cache_s_cpudata);
+		else if (ARRAY_LENGTH(kmem_cache_s_array))
+			kt->kernel_NR_CPUS = ARRAY_LENGTH(kmem_cache_s_array);
+		else if (ARRAY_LENGTH(kmem_cache_cpu_slab))
+			kt->kernel_NR_CPUS = ARRAY_LENGTH(kmem_cache_cpu_slab);
+	}
+		
+        if (kt->kernel_NR_CPUS > NR_CPUS) {
+		error(WARNING, 
+		    "kernel-configured NR_CPUS (%d) greater than compiled-in NR_CPUS (%d)\n",
+			kt->kernel_NR_CPUS, NR_CPUS);
+		error(FATAL, "recompile crash with larger NR_CPUS\n");
+	}
+
 	if (machdep->init_kernel_pgd)
 		machdep->init_kernel_pgd();
 	else if (symbol_exists("swapper_pg_dir")) {
@@ -415,10 +582,17 @@
 		error(FATAL, "no swapper_pg_dir or cpu_pgd symbols exist?\n");
 
 	get_symbol_data("high_memory", sizeof(ulong), &vt->high_memory);
-	if (kernel_symbol_exists("mem_map"))
+
+	if (kernel_symbol_exists("mem_section"))
+		vt->flags |= SPARSEMEM;
+	else if (kernel_symbol_exists("mem_map")) {
 		get_symbol_data("mem_map", sizeof(char *), &vt->mem_map);
-	else
+		vt->flags |= FLATMEM;
+	} else
 		vt->flags |= DISCONTIGMEM;
+
+	sparse_mem_init();
+
 	vt->vmalloc_start = machdep->vmalloc_start();
 	if (IS_VMALLOC_ADDR(vt->mem_map))
 		vt->flags |= V_MEM_MAP;
@@ -470,15 +644,15 @@
 
 	if (kernel_symbol_exists("mem_map"))
         	get_symbol_data("max_mapnr", sizeof(ulong), &vt->max_mapnr);
-	get_symbol_data("nr_swapfiles", sizeof(unsigned int), 
-		&vt->nr_swapfiles);
+	if (kernel_symbol_exists("nr_swapfiles"))
+		get_symbol_data("nr_swapfiles", sizeof(unsigned int), 
+			&vt->nr_swapfiles);
 
 	STRUCT_SIZE_INIT(page, "page");
 	STRUCT_SIZE_INIT(free_area, "free_area");
 	STRUCT_SIZE_INIT(free_area_struct, "free_area_struct");
 	STRUCT_SIZE_INIT(zone, "zone");
 	STRUCT_SIZE_INIT(zone_struct, "zone_struct");
-	STRUCT_SIZE_INIT(kmem_cache_s, "kmem_cache_s");
 	STRUCT_SIZE_INIT(kmem_bufctl_t, "kmem_bufctl_t");
 	STRUCT_SIZE_INIT(swap_info_struct, "swap_info_struct");
 	STRUCT_SIZE_INIT(mm_struct, "mm_struct");
@@ -488,13 +662,20 @@
 	if (VALID_STRUCT(pglist_data)) {
 		vt->flags |= ZONES;
 
-		if (symbol_exists("pgdat_list")) 
+		if (symbol_exists("pgdat_list") && !IS_SPARSEMEM()) 
 			vt->flags |= NODES;
 
+		/*
+		 *  Determine the number of nodes the best way possible,
+		 *  starting with a default of 1.
+		 */
+		vt->numnodes = 1;
+
 		if (symbol_exists("numnodes"))
 			get_symbol_data("numnodes", sizeof(int), &vt->numnodes);
-		else 
-			vt->numnodes = 1;
+
+		if (get_nodes_online())
+			vt->flags |= NODES_ONLINE;
 
 		MEMBER_OFFSET_INIT(pglist_data_node_zones, 
 			"pglist_data", "node_zones");
@@ -524,6 +705,7 @@
 		ARRAY_LENGTH_INIT(vt->nr_zones, pglist_data_node_zones,
 			"pglist_data.node_zones", NULL, 
 			SIZE_OPTION(zone_struct, zone));
+		vt->ZONE_HIGHMEM = vt->nr_zones - 1;
 
 		if (VALID_STRUCT(zone_struct)) {
 	                MEMBER_OFFSET_INIT(zone_struct_free_pages, 
@@ -539,6 +721,8 @@
 			if (INVALID_MEMBER(zone_struct_size))
 	                	MEMBER_OFFSET_INIT(zone_struct_memsize, 
 					"zone_struct", "memsize");
+			MEMBER_OFFSET_INIT(zone_struct_zone_start_pfn,
+				"zone_struct", "zone_start_pfn");
 	                MEMBER_OFFSET_INIT(zone_struct_zone_start_paddr,  
 	                        "zone_struct", "zone_start_paddr");
 	                MEMBER_OFFSET_INIT(zone_struct_zone_start_mapnr, 
@@ -565,8 +749,17 @@
                 	vt->dump_free_pages = dump_free_pages_zones_v1;
 
 		} else if (VALID_STRUCT(zone)) {
-                        MEMBER_OFFSET_INIT(zone_free_pages, 
-				"zone", "free_pages");
+			MEMBER_OFFSET_INIT(zone_vm_stat, "zone", "vm_stat");
+			MEMBER_OFFSET_INIT(zone_free_pages, "zone", "free_pages");
+			if (INVALID_MEMBER(zone_free_pages) && 
+			    VALID_MEMBER(zone_vm_stat)) {
+				long nr_free_pages = 0;
+				if (!enumerator_value("NR_FREE_PAGES", &nr_free_pages))
+					error(WARNING, 
+					    "cannot determine NR_FREE_PAGES enumerator\n");
+				ASSIGN_OFFSET(zone_free_pages) = OFFSET(zone_vm_stat) + 
+					(nr_free_pages * sizeof(long));
+			}
                         MEMBER_OFFSET_INIT(zone_free_area,
                                 "zone", "free_area");
                         MEMBER_OFFSET_INIT(zone_zone_pgdat,
@@ -579,12 +772,23 @@
                                 "zone", "zone_start_pfn");
                         MEMBER_OFFSET_INIT(zone_spanned_pages,
                                 "zone", "spanned_pages");
+                        MEMBER_OFFSET_INIT(zone_present_pages,
+                                "zone", "present_pages");
                         MEMBER_OFFSET_INIT(zone_pages_min,
                                 "zone", "pages_min");
                         MEMBER_OFFSET_INIT(zone_pages_low,
                                 "zone", "pages_low");
                         MEMBER_OFFSET_INIT(zone_pages_high,
                                 "zone", "pages_high");
+                        MEMBER_OFFSET_INIT(zone_nr_active,
+                                "zone", "nr_active");
+                        MEMBER_OFFSET_INIT(zone_nr_inactive,
+                                "zone", "nr_inactive");
+                        MEMBER_OFFSET_INIT(zone_all_unreclaimable,
+                                "zone", "all_unreclaimable");
+                        MEMBER_OFFSET_INIT(zone_flags, "zone", "flags");
+                        MEMBER_OFFSET_INIT(zone_pages_scanned, "zone", 
+				"pages_scanned");
 	        	ARRAY_LENGTH_INIT(vt->nr_free_areas, zone_free_area,
 				"zone.free_area", NULL, SIZE(free_area));
                 	vt->dump_free_pages = dump_free_pages_zones_v2;
@@ -603,6 +807,8 @@
                 vt->dump_kmem_cache = dump_kmem_cache_percpu_v1;
 	else if (vt->flags & PERCPU_KMALLOC_V2) 
                 vt->dump_kmem_cache = dump_kmem_cache_percpu_v2;
+	else if (vt->flags & KMALLOC_SLUB)
+                vt->dump_kmem_cache = dump_kmem_cache_slub;
 	else 
                 vt->dump_kmem_cache = dump_kmem_cache;
 
@@ -640,13 +846,7 @@
 	kmem_cache_init();
 
 	PG_reserved_flag_init();
-
-        if (VALID_MEMBER(page_pte)) {
-		if (THIS_KERNEL_VERSION < LINUX(2,6,0))
-                	vt->PG_slab = 10;
-		else if (THIS_KERNEL_VERSION >= LINUX(2,6,0))
-                	vt->PG_slab = 7;
-	}
+	PG_slab_flag_init();
 }
 
 /*
@@ -685,7 +885,7 @@
 	memtype = KVADDR;
 	count = -1;
 
-        while ((c = getopt(argcnt, args, "e:pudDuso:81:3:6:")) != EOF) {
+        while ((c = getopt(argcnt, args, "xme:pfudDusSo:81:3:6:")) != EOF) {
                 switch(c)
 		{
 		case '8':
@@ -731,12 +931,15 @@
 			break;
 
 		case 's':
-			if (flag & DISPLAY_DEFAULT)
+		case 'S':
+			if (flag & DISPLAY_DEFAULT) {
 				flag |= SYMBOLIC;
-			else {
-				error(INFO, 
-				   "-s only allowed with %d-bit display\n",
-					DISPLAY_DEFAULT == DISPLAY_64 ?
+				if (c == 'S')
+					flag |= SLAB_CACHE;
+			} else {
+				error(INFO, "-%c option"
+				    " is only allowed with %d-bit display\n",
+					c, DISPLAY_DEFAULT == DISPLAY_64 ?
 					64 : 32);
 				argerrs++;
 			}
@@ -748,12 +951,12 @@
 			break;
 
 		case 'p':
-			memtype &= ~(UVADDR|KVADDR);
+			memtype &= ~(UVADDR|KVADDR|XENMACHADDR|FILEADDR);
 			memtype = PHYSADDR;
 			break;
 
 		case 'u':
-			memtype &= ~(KVADDR|PHYSADDR);
+			memtype &= ~(KVADDR|PHYSADDR|XENMACHADDR|FILEADDR);
 			memtype = UVADDR;
 			break;
 
@@ -767,6 +970,25 @@
                         flag |= UDECIMAL;
 			break;
 
+		case 'm':
+                	if (!(kt->flags & ARCH_XEN))
+                        	error(FATAL, "-m option only applies to xen architecture\n");
+			memtype &= ~(UVADDR|KVADDR|FILEADDR);
+			memtype = XENMACHADDR;
+			break;
+
+		case 'f':
+			if (!pc->dumpfile)
+				error(FATAL, 
+					"-f option requires a dumpfile\n");
+			memtype &= ~(KVADDR|UVADDR|PHYSADDR|XENMACHADDR);
+			memtype = FILEADDR;
+			break;
+
+		case 'x':
+                        flag |= NO_ASCII;
+			break;
+
 		default:
 			argerrs++;
 			break;
@@ -830,7 +1052,7 @@
 		error(WARNING, 
 		    "ending address ignored when count is specified\n");
 
-	if ((flag & HEXADECIMAL) && !(flag & SYMBOLIC))
+	if ((flag & HEXADECIMAL) && !(flag & SYMBOLIC) && !(flag & NO_ASCII))
 		flag |= ASCII_ENDLINE;
 
 	if (memtype == KVADDR) {
@@ -839,7 +1061,6 @@
 	}
 
 	display_memory(addr, count, flag, memtype);
-        
 }
 
 /*
@@ -866,6 +1087,7 @@
         uint16_t u16;
         uint32_t u32;
         uint64_t u64;
+        uint64_t limit64;
 };
 
 static void
@@ -884,6 +1106,7 @@
 	char ch;
 	int linelen;
 	char buf[BUFSIZE];
+	char slab[BUFSIZE];
 	int ascii_start;
 	char *hex_64_fmt = BITS32() ? "%.*llx " : "%.*lx ";
 	char *dec_64_fmt = BITS32() ? "%12lld " : "%15ld ";
@@ -903,6 +1126,12 @@
 	case PHYSADDR:
 		addrtype = "PHYSADDR";
 		break;
+	case XENMACHADDR:
+		addrtype = "XENMACHADDR";
+		break;
+	case FILEADDR:
+		addrtype = "FILEADDR";
+		break;
 	}
 
 	if (CRASHDEBUG(4))
@@ -910,6 +1139,7 @@
 			addr, count, flag, addrtype);
 
 	origaddr = addr;
+	BZERO(&mem, sizeof(struct memloc));
 
 	switch (flag & (DISPLAY_TYPES))
 	{
@@ -919,6 +1149,8 @@
 		location = &mem.u64;
 		sprintf(readtype, "64-bit %s", addrtype); 
 		per_line = ENTRIES_64; 
+		if (machine_type("IA64"))
+			mem.limit64 = kt->end;
 		break;
 
 	case DISPLAY_32:
@@ -970,9 +1202,23 @@
 	        case DISPLAY_64:
 			if ((flag & (HEXADECIMAL|SYMBOLIC|DISPLAY_DEFAULT)) ==
 			    (HEXADECIMAL|SYMBOLIC|DISPLAY_DEFAULT)) {
-				if (in_ksymbol_range(mem.u64)) {
-					fprintf(fp, "%-16s ",
-                                            value_to_symstr(mem.u64, buf, 0));
+				if ((!mem.limit64 || (mem.u64 <= mem.limit64)) && 
+				    in_ksymbol_range(mem.u64) &&
+				    strlen(value_to_symstr(mem.u64, buf, 0))) {
+					fprintf(fp, "%-16s ", buf);
+					linelen += strlen(buf)+1;
+					break;
+				}
+				if ((flag & SLAB_CACHE) && 
+				    vaddr_to_kmem_cache(mem.u64, slab, 
+				    !VERBOSE)) {
+					if (CRASHDEBUG(1))
+						sprintf(buf, "[%llx:%s]", 
+							(ulonglong)mem.u64,
+							slab);
+					else
+						sprintf(buf, "[%s]", slab);
+					fprintf(fp, "%-16s ", buf);
 					linelen += strlen(buf)+1;
 					break;
 				}
@@ -993,11 +1239,23 @@
 	        case DISPLAY_32:
                         if ((flag & (HEXADECIMAL|SYMBOLIC|DISPLAY_DEFAULT)) ==
                             (HEXADECIMAL|SYMBOLIC|DISPLAY_DEFAULT)) {
-				if (in_ksymbol_range(mem.u32)) {
+				if (in_ksymbol_range(mem.u32) &&
+				    strlen(value_to_symstr(mem.u32, buf, 0))) {
+					fprintf(fp, INT_PRLEN == 16 ? 
+					    "%-16s " : "%-8s ", buf);
+					linelen += strlen(buf)+1;
+					break;
+				}
+				if ((flag & SLAB_CACHE) && 
+				    vaddr_to_kmem_cache(mem.u32, slab, 
+				    !VERBOSE)) {
+					if (CRASHDEBUG(1))
+						sprintf(buf, "[%x:%s]", 
+							mem.u32, slab);
+					else
+						sprintf(buf, "[%s]", slab);
 					fprintf(fp, INT_PRLEN == 16 ? 
-					    "%-16s " : "%-8s ",
-                                                value_to_symstr(mem.u32,
-						                buf, 0));
+					    "%-16s " : "%-8s ", buf);
 					linelen += strlen(buf)+1;
 					break;
 				}
@@ -1138,7 +1396,7 @@
 	size = sizeof(void*);
 	addr_entered = value_entered = FALSE;
 
-        while ((c = getopt(argcnt, args, "ukp81:3:6:")) != EOF) {
+        while ((c = getopt(argcnt, args, "fukp81:3:6:")) != EOF) {
                 switch(c)
 		{
 		case '8':
@@ -1173,17 +1431,33 @@
 			break;
 
 		case 'p':
+			memtype &= ~(UVADDR|KVADDR|FILEADDR);
 			memtype = PHYSADDR;
 			break;
 
 		case 'u':
+			memtype &= ~(PHYSADDR|KVADDR|FILEADDR);
 			memtype = UVADDR;
 			break;
 
 		case 'k':
+			memtype &= ~(PHYSADDR|UVADDR|FILEADDR);
 			memtype = KVADDR;
 			break;
 
+		case 'f':   
+			/*  
+			 *  Unsupported, but can be forcibly implemented
+			 *  by removing the DUMPFILE() check above and
+		 	 *  recompiling.
+			 */
+			if (!pc->dumpfile)
+				error(FATAL, 
+					"-f option requires a dumpfile\n");
+			memtype &= ~(PHYSADDR|UVADDR|KVADDR);
+			memtype = FILEADDR;
+			break;
+
 		default:
 			argerrs++;
 			break;
@@ -1262,6 +1536,9 @@
 	case PHYSADDR:
 		break;
 
+	case FILEADDR:
+		break;
+
 	case AMBIGUOUS:	
 		error(INFO, 
 		    "ambiguous address: %llx  (requires -p, -u or -k)\n",
@@ -1309,6 +1586,8 @@
 raw_data_dump(ulong addr, long count, int symbolic)
 {
 	long wordcnt;
+	ulonglong address;
+	int memtype;
 
 	switch (sizeof(long))
 	{
@@ -1328,9 +1607,20 @@
 		break;
 	}
 
-	display_memory(addr, wordcnt, 
+	if (pc->curcmd_flags & MEMTYPE_FILEADDR) {
+		address = pc->curcmd_private;
+		memtype = FILEADDR;
+	} else if (pc->curcmd_flags & MEMTYPE_UVADDR) {
+		address = (ulonglong)addr;
+		memtype = UVADDR;
+	} else {
+		address = (ulonglong)addr;
+		memtype = KVADDR;
+	}
+
+	display_memory(address, wordcnt, 
  	    HEXADECIMAL|DISPLAY_DEFAULT|(symbolic ? SYMBOLIC : ASCII_ENDLINE),
-		KVADDR);
+		memtype);
 }
 
 /*
@@ -1351,7 +1641,7 @@
  *  is appropriate:
  *
  *         addr  a user, kernel or physical memory address.
- *      memtype  addr type: UVADDR, KVADDR or PHYSADDR. 
+ *      memtype  addr type: UVADDR, KVADDR, PHYSADDR, XENMACHADDR or FILEADDR 
  *       buffer  supplied buffer to read the data into.
  *         size  number of bytes to read.
  *         type  string describing the request -- helpful when the read fails.
@@ -1368,6 +1658,7 @@
 #define SEEK_ERRMSG      "seek error: %s address: %llx  type: \"%s\"\n"
 #define READ_ERRMSG      "read error: %s address: %llx  type: \"%s\"\n"
 #define WRITE_ERRMSG     "write error: %s address: %llx  type: \"%s\"\n"
+#define PAGE_EXCLUDED_ERRMSG  "page excluded: %s address: %llx  type: \"%s\"\n"
 
 int
 readmem(ulonglong addr, int memtype, void *buffer, long size,
@@ -1376,6 +1667,7 @@
 	int fd;
 	long cnt;
 	physaddr_t paddr;
+	ulonglong pseudo;
 	char *bufptr;
 
 	if (CRASHDEBUG(4))
@@ -1424,7 +1716,11 @@
                 break;
 
         case PHYSADDR:
+	case XENMACHADDR:
                 break;
+
+	case FILEADDR:
+		return generic_read_dumpfile(addr, buffer, size, type, error_handle);
         }
 
         while (size > 0) {
@@ -1449,6 +1745,17 @@
 		case PHYSADDR:
 			paddr = addr;
 			break;
+
+		case XENMACHADDR:
+			pseudo = xen_m2p(addr);
+
+                	if (pseudo == XEN_MACHADDR_NOT_FOUND) {
+                        	pc->curcmd_flags |= XEN_MACHINE_ADDR;
+				paddr = addr;  
+                	} else
+                        	paddr = pseudo | PAGEOFFSET(addr);
+
+			break;
 		}
 
 		/* 
@@ -1460,7 +1767,7 @@
                         cnt = size;
 
 		switch (READMEM(fd, bufptr, cnt, 
-		    memtype == PHYSADDR ? 0 : addr, paddr))
+		    (memtype == PHYSADDR) || (memtype == XENMACHADDR) ? 0 : addr, paddr))
 		{
 		case SEEK_ERROR:
                         if (PRINT_ERROR_MESSAGE)
@@ -1472,6 +1779,11 @@
                         	error(INFO, READ_ERRMSG, memtype_string(memtype, 0), addr, type);
                         goto readmem_error;
 
+		case PAGE_EXCLUDED:
+                        if (PRINT_ERROR_MESSAGE)
+                        	error(INFO, PAGE_EXCLUDED_ERRMSG, memtype_string(memtype, 0), addr, type);
+                        goto readmem_error;
+
 		default:
 			break;
 		}
@@ -1610,6 +1922,9 @@
 int
 read_memory_device(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr)
 {
+	if (pc->curcmd_flags & XEN_MACHINE_ADDR)
+		return READ_ERROR;
+
         if (!machdep->verify_paddr(paddr)) {
                 if (CRASHDEBUG(1))
                         error(INFO, "verify_paddr(%lx) failed\n", paddr);
@@ -1754,6 +2069,12 @@
 	case PHYSADDR:
 		sprintf(membuf, debug ? "PHYSADDR" : "physical");
 		break;
+	case XENMACHADDR:
+		sprintf(membuf, debug ? "XENMACHADDR" : "xen machine");
+		break;
+	case FILEADDR:
+		sprintf(membuf, debug ? "FILEADDR" : "dumpfile");
+		break;
 	default:
 		if (debug)
 			sprintf(membuf, "0x%x (?)", memtype);
@@ -1849,6 +2170,10 @@
 
         case PHYSADDR:
                 break;
+
+
+	case FILEADDR:
+		return generic_write_dumpfile(addr, buffer, size, type, error_handle);
         }
 
         while (size > 0) {
@@ -1946,6 +2271,77 @@
 }
 
 /*
+ *  Generic dumpfile read/write functions to handle FILEADDR 
+ *  memtype arguments to readmem() and writemem().  These are
+ *  not to be confused with pc->readmem/writemem plug-ins.
+ */
+static int 
+generic_read_dumpfile(ulonglong addr, void *buffer, long size, char *type, 
+	ulong error_handle)
+{
+	int fd;
+	int retval;
+
+	retval = TRUE;
+
+	if (!pc->dumpfile)
+		error(FATAL, "command requires a dumpfile\n");
+
+	if ((fd = open(pc->dumpfile, O_RDONLY)) < 0)
+		error(FATAL, "%s: %s\n", pc->dumpfile,
+			strerror(errno));
+
+	if (lseek(fd, addr, SEEK_SET) == -1) {
+		if (PRINT_ERROR_MESSAGE)
+                	error(INFO, SEEK_ERRMSG, 
+				memtype_string(FILEADDR, 0), addr, type);
+		retval = FALSE;
+	} else if (read(fd, buffer, size) != size) {
+		if (PRINT_ERROR_MESSAGE)
+			error(INFO, READ_ERRMSG, 
+				memtype_string(FILEADDR, 0), addr, type);
+		retval = FALSE;
+	}
+
+	close(fd);
+
+	return retval;
+}
+
+static int 
+generic_write_dumpfile(ulonglong addr, void *buffer, long size, char *type, 
+	ulong error_handle)
+{
+	int fd;
+	int retval;
+
+	retval = TRUE;
+
+	if (!pc->dumpfile)
+		error(FATAL, "command requires a dumpfile\n");
+
+	if ((fd = open(pc->dumpfile, O_WRONLY)) < 0)
+		error(FATAL, "%s: %s\n", pc->dumpfile,
+			strerror(errno));
+
+	if (lseek(fd, addr, SEEK_SET) == -1) {
+		if (PRINT_ERROR_MESSAGE)
+                	error(INFO, SEEK_ERRMSG, 
+				memtype_string(FILEADDR, 0), addr, type);
+		retval = FALSE;
+	} else if (write(fd, buffer, size) != size) {
+		if (PRINT_ERROR_MESSAGE)
+			error(INFO, WRITE_ERRMSG, 
+				memtype_string(FILEADDR, 0), addr, type);
+		retval = FALSE;
+	}
+
+	close(fd);
+
+	return retval;
+}
+
+/*
  *  Translates a kernel virtual address to its physical address.  cmd_vtop()
  *  sets the verbose flag so that the pte translation gets displayed; all 
  *  other callers quietly accept the translation.
@@ -2113,6 +2509,8 @@
 		break;
         }
 
+	paddr = 0;
+
 	switch (memtype) {
 	case UVADDR: 
                 fprintf(fp, "%s  %s\n",
@@ -2126,9 +2524,12 @@
 			return;
 		}
 		if (!uvtop(tc, vaddr, &paddr, 0)) {
-			fprintf(fp, "%s  (not mapped)\n\n", 
+			fprintf(fp, "%s  %s\n\n", 
 				mkstring(buf1, UVADDR_PRLEN, LJUST|LONG_HEX,
-				    MKSTR(vaddr)));
+				    MKSTR(vaddr)),
+				(XEN() && (paddr == PADDR_NOT_AVAILABLE)) ?
+				"(page not available)" : "(not mapped)");
+
 			page_exists = FALSE;
 		} else {
 			fprintf(fp, "%s  %s\n\n",
@@ -2161,9 +2562,13 @@
 		}
 		if (vtop_flags & USE_USER_PGD) {
                 	if (!uvtop(tc, vaddr, &paddr, 0)) {
-                        	fprintf(fp, "%s  (not mapped)\n\n", 
+                        	fprintf(fp, "%s  %s\n\n", 
 					mkstring(buf1, UVADDR_PRLEN,
-                                        LJUST|LONG_HEX, MKSTR(vaddr)));
+                                        LJUST|LONG_HEX, MKSTR(vaddr)),
+					(XEN() && 
+					(paddr == PADDR_NOT_AVAILABLE)) ?
+					"(page not available)" :
+					"(not mapped)");
                         	page_exists = FALSE;
                 	} else {
                          	fprintf(fp, "%s  %s\n\n", 
@@ -2176,9 +2581,13 @@
                 	uvtop(tc, vaddr, &paddr, VERBOSE);
 		} else {
 			if (!kvtop(tc, vaddr, &paddr, 0)) {
-				fprintf(fp, "%s  (not mapped)\n\n", 
+				fprintf(fp, "%s  %s\n\n", 
 					mkstring(buf1, VADDR_PRLEN,
-                                        LJUST|LONG_HEX, MKSTR(vaddr)));
+                                        LJUST|LONG_HEX, MKSTR(vaddr)),
+					(XEN() && 
+					(paddr == PADDR_NOT_AVAILABLE)) ?
+					"(page not available)" :
+					"(not mapped)");
 				page_exists = FALSE;
 			} else {
 				fprintf(fp, "%s  %s\n\n",
@@ -2568,7 +2977,7 @@
 #define VM_REF_CHECK_DECVAL(X,V) \
    (DO_REF_SEARCH(X) && ((X)->cmdflags & VM_REF_NUMBER) && ((X)->decval == (V)))
 #define VM_REF_CHECK_STRING(X,S) \
-   (DO_REF_SEARCH(X) && (S) && FILENAME_COMPONENT((S),(X)->str))
+   (DO_REF_SEARCH(X) && (string_exists(S)) && FILENAME_COMPONENT((S),(X)->str))
 #define VM_REF_FOUND(X)    ((X) && ((X)->cmdflags & VM_REF_HEADER))
 
 ulong
@@ -2839,7 +3248,8 @@
 
 			if (DO_REF_SEARCH(ref)) { 
 				if (VM_REF_CHECK_DECVAL(ref, 
-				    SWP_OFFSET(paddr))) {
+				    THIS_KERNEL_VERSION >= LINUX(2,6,0) ?
+				    __swp_offset(paddr) : SWP_OFFSET(paddr))) {
 					if (DO_REF_DISPLAY(ref))
 						display = TRUE;
 					else {
@@ -2979,7 +3386,20 @@
 	if (!task_mm(task, TRUE))
 		return;
 
-        tm->rss = ULONG(tt->mm_struct + OFFSET(mm_struct_rss));
+	if (VALID_MEMBER(mm_struct_rss))
+		/*  
+		 *  mm_struct.rss or mm_struct._rss exist. 
+		 */
+        	tm->rss = ULONG(tt->mm_struct + OFFSET(mm_struct_rss));
+	else {
+		/*  
+		 *  mm_struct._anon_rss and mm_struct._file_rss should exist. 
+		 */
+		if (VALID_MEMBER(mm_struct_anon_rss))
+			tm->rss +=  ULONG(tt->mm_struct + OFFSET(mm_struct_anon_rss));
+		if (VALID_MEMBER(mm_struct_file_rss))
+			tm->rss +=  ULONG(tt->mm_struct + OFFSET(mm_struct_file_rss));
+	}
         tm->total_vm = ULONG(tt->mm_struct + OFFSET(mm_struct_total_vm));
         tm->pgd_addr = ULONG(tt->mm_struct + OFFSET(mm_struct_pgd));
 
@@ -3036,6 +3456,12 @@
 #define GET_INACTIVE_DIRTY     (ADDRESS_SPECIFIED << 13)  /* obsolete */
 #define SLAB_GET_COUNTS        (ADDRESS_SPECIFIED << 14)
 #define SLAB_WALKTHROUGH       (ADDRESS_SPECIFIED << 15)
+#define GET_VMLIST_COUNT       (ADDRESS_SPECIFIED << 16)
+#define GET_VMLIST             (ADDRESS_SPECIFIED << 17)
+#define SLAB_DATA_NOSAVE       (ADDRESS_SPECIFIED << 18)
+#define GET_SLUB_SLABS         (ADDRESS_SPECIFIED << 19)
+#define GET_SLUB_OBJECTS       (ADDRESS_SPECIFIED << 20)
+#define VMLIST_VERIFY          (ADDRESS_SPECIFIED << 21)
 
 #define GET_ALL \
 	(GET_SHARED_PAGES|GET_TOTALRAM_PAGES|GET_BUFFERS_PAGES|GET_SLAB_PAGES)
@@ -3045,8 +3471,8 @@
 {
 	int i;
 	int c;
-	int sflag, Sflag, pflag, fflag, Fflag, vflag; 
-	int nflag, cflag, Cflag, iflag, lflag, Lflag, Pflag;
+	int sflag, Sflag, pflag, fflag, Fflag, vflag, zflag; 
+	int nflag, cflag, Cflag, iflag, lflag, Lflag, Pflag, Vflag;
 	struct meminfo meminfo;
 	ulonglong value[MAXARGS];
 	char buf[BUFSIZE];
@@ -3054,18 +3480,26 @@
 	int spec_addr;
 
 	spec_addr = 0;
-        sflag =	Sflag = pflag = fflag = Fflag = Pflag = 0;
-	vflag = Cflag = cflag = iflag = nflag = lflag = Lflag = 0;
+        sflag =	Sflag = pflag = fflag = Fflag = Pflag = zflag = 0;
+	vflag = Cflag = cflag = iflag = nflag = lflag = Lflag = Vflag = 0;
 	BZERO(&meminfo, sizeof(struct meminfo));
 	BZERO(&value[0], sizeof(ulonglong)*MAXARGS);
 
-        while ((c = getopt(argcnt, args, "I:sSFfpvcCinl:L:P")) != EOF) {
+        while ((c = getopt(argcnt, args, "I:sSFfpvczCinl:L:PV")) != EOF) {
                 switch(c)
 		{
+		case 'V':
+			Vflag = 1;
+			break;
+
 		case 'n':
 			nflag = 1;
 			break;
 
+		case 'z':
+			zflag = 1;
+			break;
+
 		case 'i': 
 			iflag = 1;
 			break;
@@ -3153,13 +3587,13 @@
 	if (argerrs)
 		cmd_usage(pc->curcmd, SYNOPSIS);
 
-        if ((sflag + Sflag + pflag + fflag + Fflag + 
+        if ((sflag + Sflag + pflag + fflag + Fflag + Vflag +
             vflag + Cflag + cflag + iflag + lflag + Lflag) > 1) {
 		error(INFO, "only one flag allowed!\n");
 		cmd_usage(pc->curcmd, SYNOPSIS);
 	} 
 
-	if (sflag || Sflag)
+	if (sflag || Sflag || !(vt->flags & KMEM_CACHE_INIT))
 		kmem_cache_init();
 
 	while (args[optind]) {
@@ -3198,8 +3632,6 @@
                 if (pflag) {
 			meminfo.spec_addr = value[i];
 			meminfo.flags = ADDRESS_SPECIFIED;
-                        if (meminfo.calls++)
-                        	fprintf(fp, "\n");
                         dump_mem_map(&meminfo);
                         pflag++;
                 }
@@ -3234,6 +3666,8 @@
 			} else {
                         	meminfo.spec_addr = value[i];
                         	meminfo.flags = ADDRESS_SPECIFIED;
+				if (Sflag && (vt->flags & KMALLOC_SLUB))
+					meminfo.flags |= VERBOSE;
 				if (meminfo.calls++)
 					fprintf(fp, "\n");
                         	vt->dump_kmem_cache(&meminfo);
@@ -3248,8 +3682,6 @@
                 if (vflag) {
 			meminfo.spec_addr = value[i];
 			meminfo.flags = ADDRESS_SPECIFIED; 
-			if (meminfo.calls++)
-				fprintf(fp, "\n");
                         dump_vmlist(&meminfo);
                         vflag++;
                 }
@@ -3275,7 +3707,7 @@
                 /* 
                  * no value arguments allowed! 
                  */
-                if (nflag || iflag || Fflag || Cflag || Lflag) {
+                if (zflag || nflag || iflag || Fflag || Cflag || Lflag || Vflag) {
 			error(INFO, 
 			    "no address arguments allowed with this option\n");
                         cmd_usage(pc->curcmd, SYNOPSIS);
@@ -3309,24 +3741,25 @@
 	}
 
 	if (sflag == 1) {
-                if (vt->flags & KMEM_CACHE_UNAVAIL)
-                     	error(FATAL, 
-			    "kmem cache slab subsystem not available\n");
 		if (STREQ(meminfo.reqname, "list"))
 			kmem_cache_list();
+                else if (vt->flags & KMEM_CACHE_UNAVAIL)
+                     	error(FATAL, 
+			    "kmem cache slab subsystem not available\n");
 		else
 			vt->dump_kmem_cache(&meminfo);
 	}
 
 	if (Sflag == 1) {
-                if (vt->flags & KMEM_CACHE_UNAVAIL)
-                     	error(FATAL, 
-			    "kmem cache slab subsystem not available\n");
-		meminfo.flags = VERBOSE;
 		if (STREQ(meminfo.reqname, "list"))
 			kmem_cache_list();
-		else
+                else if (vt->flags & KMEM_CACHE_UNAVAIL)
+                     	error(FATAL, 
+			    "kmem cache slab subsystem not available\n");
+		else {
+			meminfo.flags = VERBOSE;
 			vt->dump_kmem_cache(&meminfo);
+		}
 	}
 
 	if (vflag == 1)
@@ -3343,6 +3776,9 @@
 	if (nflag == 1)
 		dump_memory_nodes(MEMORY_NODES_DUMP);
 
+	if (zflag == 1)
+		dump_zone_stats();
+
 	if (lflag == 1) { 
 		dump_page_lists(&meminfo);
 	}
@@ -3352,7 +3788,13 @@
 		dump_page_lists(&meminfo);
 	}
 
-	if (!(sflag + Sflag + pflag + fflag + Fflag + vflag + 
+	if (Vflag == 1) {
+		dump_vm_stat(NULL, NULL, 0);
+		dump_page_states();
+		dump_vm_event_state();
+	}
+
+	if (!(sflag + Sflag + pflag + fflag + Fflag + vflag + Vflag + zflag +
               cflag + Cflag + iflag + nflag + lflag + Lflag + meminfo.calls))
 		cmd_usage(pc->curcmd, SYNOPSIS);
 
@@ -3373,12 +3815,13 @@
 	buf = (char *)GETBUF(SIZE(page));
 
 	if (!readmem(pageptr, KVADDR, buf, SIZE(page),
-            "reserved page", RETURN_ON_ERROR|QUIET))
+            "reserved page", RETURN_ON_ERROR|QUIET)) {
+		FREEBUF(buf);
 		return;
+	}
 
 	flags = ULONG(buf + OFFSET(page_flags));
 
-
 	if (count_bits_long(flags) == 1)
 		vt->PG_reserved = flags;
 	else
@@ -3386,12 +3829,64 @@
 
 	if (CRASHDEBUG(2))
 		fprintf(fp, 
-		    "PG_reserved bit: vaddr: %lx page: %lx flags: %lx => %lx\n",
+		    "PG_reserved: vaddr: %lx page: %lx flags: %lx => %lx\n",
 			vaddr, pageptr, flags, vt->PG_reserved);
 
 	FREEBUF(buf);
 }
 
+static void 
+PG_slab_flag_init(void)
+{
+	int bit;
+        ulong pageptr;
+        ulong vaddr, flags;
+        char buf[BUFSIZE];  /* safe for a page struct */
+
+	/*
+	 *  Set the old defaults in case the search below fails.
+	 */
+        if (VALID_MEMBER(page_pte)) {
+                if (THIS_KERNEL_VERSION < LINUX(2,6,0))
+                        vt->PG_slab = 10;
+                else if (THIS_KERNEL_VERSION >= LINUX(2,6,0))
+                        vt->PG_slab = 7;
+        } else if (THIS_KERNEL_VERSION >= LINUX(2,6,0))
+                vt->PG_slab = 7;
+
+	if (vt->flags & KMALLOC_SLUB) {
+		/* 
+		 *  PG_slab and the following are hardwired for 
+		 *  now -- at least until I can come up with
+		 *  better way.  (PG_slab test below fails because
+		 *  slub.c uses lower-bit PG_active and PG_error)
+		 */
+#define PG_compound             14      /* Part of a compound page */
+#define PG_reclaim              17      /* To be reclaimed asap */
+		vt->PG_head_tail_mask = ((1L << PG_compound) | (1L << PG_reclaim));
+
+		return;
+	}
+
+       	if (try_get_symbol_data("vm_area_cachep", sizeof(void *), &vaddr) &&
+            phys_to_page((physaddr_t)VTOP(vaddr), &pageptr) &&
+            readmem(pageptr, KVADDR, buf, SIZE(page),
+            "vm_area_cachep page", RETURN_ON_ERROR|QUIET)) {
+
+        	flags = ULONG(buf + OFFSET(page_flags));
+
+	        if ((bit = ffsl(flags))) {
+	                vt->PG_slab = bit - 1;
+	
+	        	if (CRASHDEBUG(2))
+	                	fprintf(fp,
+	                    "PG_slab bit: vaddr: %lx page: %lx flags: %lx => %ld\n",
+	                        vaddr, pageptr, flags, vt->PG_slab);
+	
+		}
+	}
+}
+
 /*
  *  dump_mem_map() displays basic data about each entry in the mem_map[]
  *  array, or if an address is specified, just the mem_map[] entry for that
@@ -3438,22 +3933,20 @@
 #define PGMM_CACHED (512)
 
 static void
-dump_mem_map(struct meminfo *mi)
+dump_mem_map_SPARSEMEM(struct meminfo *mi)
 {
-	long i, n;
+	ulong i;
 	long total_pages;
-	int others, page_not_mapped, phys_not_mapped;
+	int others, page_not_mapped, phys_not_mapped, page_mapping;
 	ulong pp, ppend;
 	physaddr_t phys, physend;
 	ulong tmp, reserved, shared, slabs;
         ulong PG_reserved_flag;
 	long buffers;
 	ulong inode, offset, flags, mapping, index;
-	ulong node_size;
 	uint count;
 	int print_hdr, pg_spec, phys_spec, done;
 	int v22;
-	struct node_table *nt;
 	char hdr[BUFSIZE];
 	char buf0[BUFSIZE];
 	char buf1[BUFSIZE];
@@ -3462,6 +3955,7 @@
 	char buf4[BUFSIZE];
 	char *page_cache;
 	char *pcache;
+	ulong section, section_nr, nr_mem_sections, section_size;
 
 	v22 = VALID_MEMBER(page_inode);  /* page.inode vs. page.mapping */
 
@@ -3549,22 +4043,62 @@
 	done = FALSE;
 	total_pages = 0;
 
-	for (n = 0; n < vt->numnodes; n++) {
+	nr_mem_sections = NR_MEM_SECTIONS();
+
+	/* 
+	 *  Iterate over all possible sections
+	 */
+        for (section_nr = 0; section_nr < nr_mem_sections ; section_nr++) {
+
+		if (CRASHDEBUG(2)) 
+			fprintf(fp, "section_nr = %ld\n", section_nr);
+
+		/* 
+		 *  If we are looking up a specific address, jump directly
+		 *  to the section with that page 
+		 */
+		if (mi->flags & ADDRESS_SPECIFIED) {        
+			ulong pfn;
+			physaddr_t tmp;
+
+			if (pg_spec) {
+				if (!page_to_phys(mi->spec_addr, &tmp))
+					return;
+				pfn = tmp >> PAGESHIFT();
+			} else
+				pfn = mi->spec_addr >> PAGESHIFT();
+			section_nr = pfn_to_section_nr(pfn);
+		}
+
+                if (!(section = valid_section_nr(section_nr))) {
+#ifdef NOTDEF
+                        break;    /* On a real sparsemem system we need to check
+				   * every section as gaps may exist.  But this
+				   * can be slow.  If we know we don't have gaps
+				   * just stop validating sections when we 
+				   * get to the end of the valid ones.  
+				   * In the future find a way to short circuit
+				   * this loop.
+				   */
+#endif
+			if (mi->flags & ADDRESS_SPECIFIED)
+				break;
+			continue;
+		}
+
 		if (print_hdr) {
-			fprintf(fp, "%s%s", n ? "\n" : "", hdr);
+			if (!(pc->curcmd_flags & HEADER_PRINTED))
+				fprintf(fp, "%s", hdr);
 			print_hdr = FALSE;
+			pc->curcmd_flags |= HEADER_PRINTED;
 		}
 
-		nt = &vt->node_table[n];
-		total_pages += nt->size;
-		pp = nt->mem_map;
-		phys = nt->start_paddr;
-		if ((vt->flags & V_MEM_MAP) && (vt->numnodes == 1))
-			node_size = vt->max_mapnr;
-		else
-			node_size = nt->size;
+		pp = section_mem_map_addr(section);
+		pp = sparse_decode_mem_map(pp, section_nr);
+		phys = (physaddr_t) section_nr * PAGES_PER_SECTION() * PAGESIZE();
+		section_size = PAGES_PER_SECTION();
 
-		for (i = 0; i < node_size; 
+		for (i = 0; i < section_size; 
 		     i++, pp += SIZE(page), phys += PAGESIZE()) {
 
 			if ((i % PGMM_CACHED) == 0) {
@@ -3581,7 +4115,7 @@
 					continue;
 				}  
 
-				fill_mem_map_cache(pp, page_cache);
+				fill_mem_map_cache(pp, ppend, page_cache);
 			}
 
 			pcache = page_cache + ((i%PGMM_CACHED) * SIZE(page));
@@ -3653,11 +4187,12 @@
 				}
 	                        continue;
 	                }
+			page_mapping = VALID_MEMBER(page_mapping);
 	
 			if (v22) {
 				inode = ULONG(pcache + OFFSET(page_inode));
 				offset = ULONG(pcache + OFFSET(page_offset));
-			} else { 
+			} else if (page_mapping) { 
 				mapping = ULONG(pcache + 
 					OFFSET(page_mapping));
 				index = ULONG(pcache + OFFSET(page_index));
@@ -3700,6 +4235,20 @@
                                         space(MINSPACE),
 					mkstring(buf4, 8, CENTER|RJUST, " "),
                                         " ");
+				else if (!page_mapping)
+				 fprintf(fp, "%s%s%s%s%s%s%s %2d ",
+					 mkstring(buf0, VADDR_PRLEN,
+					 LJUST|LONG_HEX, MKSTR(pp)),
+					 space(MINSPACE),
+					 mkstring(buf1, MAX(PADDR_PRLEN,
+					 strlen("PHYSICAL")),
+					 RJUST|LONGLONG_HEX, MKSTR(&phys)),
+					 space(MINSPACE),
+					 mkstring(buf3, VADDR_PRLEN,
+					 CENTER|RJUST, "-------"),
+					 space(MINSPACE),
+					 mkstring(buf4, 8, CENTER|RJUST, "-----"),
+					 count);
 				else
                                 fprintf(fp, "%s%s%s%s%s%s%8ld %2d ",
 					mkstring(buf0, VADDR_PRLEN, 
@@ -3862,6379 +4411,9961 @@
 	FREEBUF(page_cache);
 }
 
-/*
- *  Stash a chunk of PGMM_CACHED page structures, starting at addr, into the
- *  passed-in buffer.  The mem_map array is normally guaranteed to be
- *  readable except in the case of virtual mem_map usage.  When V_MEM_MAP
- *  is in place, read all pages consumed by PGMM_CACHED page structures
- *  that are currently mapped, leaving the unmapped ones just zeroed out.
- */
 static void
-fill_mem_map_cache(ulong pp, char *page_cache)
+dump_mem_map(struct meminfo *mi)
 {
-	long size, cnt;
-	ulong addr;
-        char *bufptr;
+	long i, n;
+	long total_pages;
+	int others, page_not_mapped, phys_not_mapped, page_mapping;
+	ulong pp, ppend;
+	physaddr_t phys, physend;
+	ulong tmp, reserved, shared, slabs;
+        ulong PG_reserved_flag;
+	long buffers;
+	ulong inode, offset, flags, mapping, index;
+	ulong node_size;
+	uint count;
+	int print_hdr, pg_spec, phys_spec, done;
+	int v22;
+	struct node_table *nt;
+	char hdr[BUFSIZE];
+	char buf0[BUFSIZE];
+	char buf1[BUFSIZE];
+	char buf2[BUFSIZE];
+	char buf3[BUFSIZE];
+	char buf4[BUFSIZE];
+	char *page_cache;
+	char *pcache;
 
-	/*
-	 *  Try to read it in one fell swoop.
- 	 */
-	if (readmem(pp, KVADDR, page_cache, SIZE(page) * PGMM_CACHED,
-      	    "page struct cache", RETURN_ON_ERROR|QUIET))
+	if (IS_SPARSEMEM()) {
+		dump_mem_map_SPARSEMEM(mi);
 		return;
+	}
 
-	/*
-	 *  Break it into page-size-or-less requests, warning if it's
-	 *  not a virtual mem_map.
-	 */
-        size = SIZE(page) * PGMM_CACHED;
-        addr = pp;
-        bufptr = page_cache;
+	v22 = VALID_MEMBER(page_inode);  /* page.inode vs. page.mapping */
 
-        while (size > 0) {
-		/* 
-		 *  Compute bytes till end of page.
-		 */
-		cnt = PAGESIZE() - PAGEOFFSET(addr); 
-
-                if (cnt > size)
-                        cnt = size;
+        if (v22) {
+		sprintf(hdr, "%s%s%s%s%s%s%s%sCNT FLAGS\n",
+		    mkstring(buf1, VADDR_PRLEN, CENTER, "PAGE"), 
+		    space(MINSPACE),               
+                    mkstring(buf2, MAX(PADDR_PRLEN, strlen("PHYSICAL")),
+			RJUST, "PHYSICAL"),		    
+		    space(MINSPACE),               
+		    mkstring(buf3, VADDR_PRLEN, CENTER|RJUST, "INODE"), 
+		    space(MINSPACE),               
+		    mkstring(buf4, 8, CENTER|LJUST, "OFFSET"),
+		    space(MINSPACE-1));
+        } else {
+		sprintf(hdr, "%s%s%s%s%s%s%sCNT FLAGS\n",
+		    mkstring(buf1, VADDR_PRLEN, CENTER, "PAGE"), 
+		    space(MINSPACE),             
+                    mkstring(buf2, MAX(PADDR_PRLEN, strlen("PHYSICAL")),
+                        RJUST, "PHYSICAL"),
+		    space(MINSPACE),             
+		    mkstring(buf3, VADDR_PRLEN, CENTER|RJUST, "MAPPING"),
+		    space(MINSPACE),               
+		    mkstring(buf4, 8, CENTER|RJUST, "INDEX"));
+        }
 
-		if (!readmem(addr, KVADDR, bufptr, size,
-                    "virtual page struct cache", RETURN_ON_ERROR|QUIET)) {
-			BZERO(bufptr, size);
-			if (!(vt->flags & V_MEM_MAP))
-				error(WARNING, 
-		                   "mem_map[] from %lx to %lx not accessible\n",
-					addr, addr+size);
+	pg_spec = phys_spec = print_hdr = FALSE;
+	
+	switch (mi->flags)
+	{
+	case ADDRESS_SPECIFIED: 
+		switch (mi->memtype)
+		{
+		case KVADDR:
+                        if (is_page_ptr(mi->spec_addr, NULL))
+                                pg_spec = TRUE;
+                        else {
+                                if (kvtop(NULL, mi->spec_addr, &phys, 0)) {
+                                        mi->spec_addr = phys;
+                                        phys_spec = TRUE;
+                                }
+                                else
+                                        return;
+                        }
+			break;
+		case PHYSADDR:
+			phys_spec = TRUE;
+			break;
+		default:
+			error(FATAL, "dump_mem_map: no memtype specified\n");
+			break;
 		}
+		print_hdr = TRUE;
+		break;
 
-		addr += cnt;
-                bufptr += cnt;
-                size -= cnt;
-        }
-}
+	case GET_ALL:
+		shared = 0;
+                reserved = 0;
+		buffers = 0;
+		slabs = 0;
+		break;
 
+	case GET_SHARED_PAGES:
+		shared = 0;
+		break;
 
-/*
- *  dump_page_hash_table() displays the entries in each page_hash_table.
- */
+	case GET_TOTALRAM_PAGES:
+                reserved = 0;
+		break;
 
-#define PGHASH_CACHED (1024)
+	case GET_BUFFERS_PAGES:
+		buffers = 0;
+		break;
 
-static void
-dump_page_hash_table(struct meminfo *hi)
-{
-	int i;
-	int len, entry_len;
-	ulong page_hash_table, head;
-	struct list_data list_data, *ld;
-	struct gnu_request req;
-	long total_cached;
-	long page_cache_size;
-	ulong this_addr, searchpage;
-	int errflag, found, cnt, populated, verbose;
-	uint ival;
-	ulong buffer_pages;
-	char buf[BUFSIZE];
-	char hash_table[BUFSIZE];
-	char *pcache, *pghash_cache;
+	case GET_SLAB_PAGES:
+		slabs = 0;
+		break;
 
-	if (!vt->page_hash_table) {
-		if (hi->flags & VERBOSE)
-			error(FATAL, 
-			 "address_space page cache radix tree not supported\n");
-		
-        	if (symbol_exists("nr_pagecache")) {
-			buffer_pages = nr_blockdev_pages();
-                	get_symbol_data("nr_pagecache", sizeof(int), &ival);
-                	page_cache_size = (ulong)ival;
-			page_cache_size -= buffer_pages;
-        		fprintf(fp, "page cache size: %ld\n", page_cache_size);
-			if (hi->flags & ADDRESS_SPECIFIED)
-				error(INFO, 
-    "address_space page cache radix tree not supported: %lx: ignored\n",
-					hi->spec_addr);
-		} else
-			error(FATAL, "cannot determine page cache size\n");
-		return;
+	default:
+		print_hdr = TRUE;
+		break;
 	}
 
-	ld = &list_data;
-
-	if (hi->spec_addr && (hi->flags & ADDRESS_SPECIFIED)) {
-		verbose = TRUE;
-		searchpage = hi->spec_addr;
-	} else if (hi->flags & VERBOSE) {
-		verbose = TRUE;
-		searchpage = 0;
-	} else { 
-		verbose = FALSE;
-		searchpage = 0;
-	}
+	page_cache = GETBUF(SIZE(page) * PGMM_CACHED);
+	done = FALSE;
+	total_pages = 0;
 
-	if (vt->page_hash_table_len == 0) 
-		error(FATAL, "cannot determine size of page_hash_table\n");
+	for (n = 0; n < vt->numnodes; n++) {
+		if (print_hdr) {
+			if (!(pc->curcmd_flags & HEADER_PRINTED))
+				fprintf(fp, "%s%s", n ? "\n" : "", hdr);
+			print_hdr = FALSE;
+			pc->curcmd_flags |= HEADER_PRINTED;
+		}
 
-	page_hash_table = vt->page_hash_table;
-	len = vt->page_hash_table_len;
-	entry_len = VALID_STRUCT(page_cache_bucket) ?
-		SIZE(page_cache_bucket) : sizeof(void *);
+		nt = &vt->node_table[n];
+		total_pages += nt->size;
+		pp = nt->mem_map;
+		phys = nt->start_paddr;
+		if ((vt->flags & V_MEM_MAP) && (vt->numnodes == 1))
+			node_size = vt->max_mapnr;
+		else
+			node_size = nt->size;
 
-	if (CRASHDEBUG(1)) {
-		populated = 0;
-		fprintf(fp, "page_hash_table length: %d\n", len);
-	}
+		for (i = 0; i < node_size; 
+		     i++, pp += SIZE(page), phys += PAGESIZE()) {
 
-	get_symbol_type("page_cache_size", NULL, &req);
-        if (req.length == sizeof(int)) {
-                get_symbol_data("page_cache_size", sizeof(int), &ival);
-                page_cache_size = (long)ival;
-        } else
-                get_symbol_data("page_cache_size", sizeof(long),
-                        &page_cache_size);
+			if ((i % PGMM_CACHED) == 0) {
+				ppend = pp + ((PGMM_CACHED-1) * SIZE(page));
+				physend = phys + ((PGMM_CACHED-1) * PAGESIZE());
 
-        pghash_cache = GETBUF(sizeof(void *) * PGHASH_CACHED);
+				if ((pg_spec && (mi->spec_addr > ppend)) ||
+			            (phys_spec && 
+				    (PHYSPAGEBASE(mi->spec_addr) > physend))) {
+					i += (PGMM_CACHED-1);
+					pp = ppend;
+					phys = physend;
+					continue;
+				}  
 
-	if (searchpage)
-		open_tmpfile();
+				fill_mem_map_cache(pp, ppend, page_cache);
+			}
 
-	hq_open();
-	for (i = total_cached = 0; i < len; i++, 
-	     page_hash_table += entry_len) {
+			pcache = page_cache + ((i%PGMM_CACHED) * SIZE(page));
 
-                if ((i % PGHASH_CACHED) == 0) {
-                	readmem(page_hash_table, KVADDR, pghash_cache,
-                        	entry_len * PGHASH_CACHED,
-                                "page hash cache", FAULT_ON_ERROR);
-                }
+			if (received_SIGINT())
+				restart(0);
+	
+			if ((pg_spec && (pp == mi->spec_addr)) || 
+			   (phys_spec && (phys == PHYSPAGEBASE(mi->spec_addr))))
+				done = TRUE;
 
-                pcache = pghash_cache + ((i%PGHASH_CACHED) * entry_len);
-		if (VALID_STRUCT(page_cache_bucket))
-			pcache += OFFSET(page_cache_bucket_chain);
+			if (!done && (pg_spec || phys_spec))
+				continue;
 			
-		head = ULONG(pcache);
+			flags = ULONG(pcache + OFFSET(page_flags));
+			count = UINT(pcache + OFFSET(page_count));
 
-		if (!head) 
-			continue;
+	                switch (mi->flags)
+			{
+			case GET_ALL:
+			case GET_BUFFERS_PAGES:
+				if (VALID_MEMBER(page_buffers)) {
+					tmp = ULONG(pcache + 
+						OFFSET(page_buffers));
+					if (tmp)
+						buffers++;
+				} else if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) {
+	                                if ((flags >> v26_PG_private) & 1) 
+						buffers++;
+				} else
+					error(FATAL, 
+			       "cannot determine whether pages have buffers\n");
 
-		if (verbose) 
-			fprintf(fp, "page_hash_table[%d]\n", i);
-		
-		if (CRASHDEBUG(1))
-			populated++;
+				if (mi->flags != GET_ALL)
+					continue;
 
-                BZERO(ld, sizeof(struct list_data));
-                ld->flags = verbose;
-                ld->start = head;
-		ld->searchfor = searchpage;
-		ld->member_offset = OFFSET(page_next_hash);
-                cnt = do_list(ld);
-                total_cached += cnt;
+				/* FALLTHROUGH */
 
-		if (ld->searchfor)
-			break;
+			case GET_SLAB_PAGES:
+				if (v22) {
+	                                if ((flags >> v22_PG_Slab) & 1) 
+						slabs++;
+				} else if (vt->PG_slab) {
+	                                if ((flags >> vt->PG_slab) & 1) 
+						slabs++;
+				} else {
+	                                if ((flags >> v24_PG_slab) & 1) 
+						slabs++;
+				}
+				if (mi->flags != GET_ALL)
+					continue;
 
-		if (received_SIGINT())
-			restart(0);
+				/* FALLTHROUGH */
+
+			case GET_SHARED_PAGES:
+			case GET_TOTALRAM_PAGES:
+                                if (vt->PG_reserved)
+					PG_reserved_flag = vt->PG_reserved;
+				else
+                                        PG_reserved_flag = v22 ?
+                                                1 << v22_PG_reserved :
+                                                1 << v24_PG_reserved;
+
+	                        if (flags & PG_reserved_flag) {
+	                                reserved++;
+				} else {
+					if (count > 1)
+						shared++;
+				}
+	                        continue;
+	                }
+	
+			page_mapping = VALID_MEMBER(page_mapping);
+
+			if (v22) {
+				inode = ULONG(pcache + OFFSET(page_inode));
+				offset = ULONG(pcache + OFFSET(page_offset));
+			} else if (page_mapping) {
+				mapping = ULONG(pcache + 
+					OFFSET(page_mapping));
+				index = ULONG(pcache + OFFSET(page_index));
+			}
+	
+			page_not_mapped = phys_not_mapped = FALSE;
+
+			if (v22) {
+				fprintf(fp, "%lx%s%s%s%s%s%8lx %2d%s",
+					pp,
+					space(MINSPACE),
+                    			mkstring(buf1, MAX(PADDR_PRLEN, 
+					    strlen("PHYSICAL")), 
+					    RJUST|LONGLONG_HEX, MKSTR(&phys)),
+					space(MINSPACE),
+				        mkstring(buf2, VADDR_PRLEN, 
+						RJUST|LONG_HEX, MKSTR(inode)),
+					space(MINSPACE),
+					offset,
+					count,
+					space(MINSPACE));
+			} else {
+				if ((vt->flags & V_MEM_MAP)) {
+				    	if (!machdep->verify_paddr(phys)) 
+						phys_not_mapped = TRUE;
+					if (!kvtop(NULL, pp, NULL, 0))
+						page_not_mapped = TRUE;
+				}
+				if (page_not_mapped)
+                                fprintf(fp, "%s%s%s%s%s%s%s %2s ", 
+					mkstring(buf0, VADDR_PRLEN, 
+					LJUST|LONG_HEX, MKSTR(pp)),
+                                       	space(MINSPACE),
+                                       	mkstring(buf1, MAX(PADDR_PRLEN,
+                                        strlen("PHYSICAL")),
+                                        RJUST|LONGLONG_HEX, MKSTR(&phys)),
+                                        space(MINSPACE),
+					mkstring(buf3, VADDR_PRLEN, 
+					CENTER|RJUST, " "),
+                                        space(MINSPACE),
+					mkstring(buf4, 8, CENTER|RJUST, " "),
+                                        " ");
+				else if (!page_mapping)
+                                fprintf(fp, "%s%s%s%s%s%s%s %2d ",
+                                        mkstring(buf0, VADDR_PRLEN,
+                                        LJUST|LONG_HEX, MKSTR(pp)),
+                                        space(MINSPACE),
+                                        mkstring(buf1, MAX(PADDR_PRLEN,
+                                        strlen("PHYSICAL")),
+                                        RJUST|LONGLONG_HEX, MKSTR(&phys)),
+                                        space(MINSPACE),
+                                        mkstring(buf3, VADDR_PRLEN, 
+                                        CENTER|RJUST, "-------"),
+                                        space(MINSPACE),
+                                        mkstring(buf4, 8, CENTER|RJUST, "-----"),
+                                        count);
+				else
+                                fprintf(fp, "%s%s%s%s%s%s%8ld %2d ",
+					mkstring(buf0, VADDR_PRLEN, 
+					LJUST|LONG_HEX, MKSTR(pp)),
+                                       	space(MINSPACE),
+                                       	mkstring(buf1, MAX(PADDR_PRLEN,
+                                        strlen("PHYSICAL")),
+                                        RJUST|LONGLONG_HEX, MKSTR(&phys)),
+                                        space(MINSPACE),
+                                        mkstring(buf2, VADDR_PRLEN,
+                                        RJUST|LONG_HEX, MKSTR(mapping)),
+                                        space(MINSPACE),
+                                        index,
+                                        count);
+			}
+	
+			others = 0;
+	
+			if (v22) {
+		                if ((flags >> v22_PG_DMA) & 1)
+		                        fprintf(fp, "%sDMA", 
+						others++ ? "," : "");
+				if ((flags >> v22_PG_locked) & 1)
+					fprintf(fp, "%slocked", 
+						others++ ? "," : "");
+				if ((flags >> v22_PG_error) & 1)
+					fprintf(fp, "%serror", 
+						others++ ? "," : "");
+				if ((flags >> v22_PG_referenced) & 1)
+					fprintf(fp, "%sreferenced", 
+						others++ ? "," : "");
+				if ((flags >> v22_PG_dirty) & 1)
+					fprintf(fp, "%sdirty", 
+						others++ ? "," : "");
+				if ((flags >> v22_PG_uptodate) & 1)
+					fprintf(fp, "%suptodate", 
+						others++ ? "," : "");
+				if ((flags >> v22_PG_free_after) & 1)
+					fprintf(fp, "%sfree_after", 
+						others++ ? "," : "");
+				if ((flags >> v22_PG_decr_after) & 1)
+					fprintf(fp, "%sdecr_after", 
+						others++ ? "," : "");
+				if ((flags >> v22_PG_swap_unlock_after) & 1)
+					fprintf(fp, "%sswap_unlock_after", 
+						others++ ? "," : "");
+				if ((flags >> v22_PG_Slab) & 1)
+					fprintf(fp, "%sslab", 
+						others++ ? "," : "");
+				if ((flags >> v22_PG_swap_cache) & 1)
+					fprintf(fp, "%sswap_cache", 
+						others++ ? "," : "");
+				if ((flags >> v22_PG_skip) & 1)
+					fprintf(fp, "%sskip", 
+						others++ ? "," : "");
+	                        if ((flags >> v22_PG_reserved) & 1)
+	                                fprintf(fp, "%sreserved", 
+						others++ ? "," : "");
+				fprintf(fp, "\n");
+			} else if (THIS_KERNEL_VERSION > LINUX(2,4,9)) {
+				fprintf(fp, "%lx\n", flags);
+			} else {
+	
+		                if ((flags >> v24_PG_locked) & 1)
+		                        fprintf(fp, "%slocked", 
+						others++ ? "," : "");
+				if ((flags >> v24_PG_error) & 1)
+					fprintf(fp, "%serror", 
+						others++ ? "," : "");
+				if ((flags >> v24_PG_referenced) & 1)
+					fprintf(fp, "%sreferenced", 
+						others++ ? "," : "");
+				if ((flags >> v24_PG_uptodate) & 1)
+					fprintf(fp, "%suptodate", 
+						others++ ? "," : "");
+                                if ((flags >> v24_PG_dirty) & 1)
+                                        fprintf(fp, "%sdirty",
+                                                others++ ? "," : "");
+				if ((flags >> v24_PG_decr_after) & 1)
+					fprintf(fp, "%sdecr_after", 
+						others++ ? "," : "");
+                                if ((flags >> v24_PG_active) & 1)
+                                        fprintf(fp, "%sactive",
+                                                others++ ? "," : "");
+                                if ((flags >> v24_PG_inactive_dirty) & 1)
+                                        fprintf(fp, "%sinactive_dirty",
+                                                others++ ? "," : "");
+				if ((flags >> v24_PG_slab) & 1)
+					fprintf(fp, "%sslab", 
+						others++ ? "," : "");
+				if ((flags >> v24_PG_swap_cache) & 1)
+					fprintf(fp, "%sswap_cache", 
+						others++ ? "," : "");
+				if ((flags >> v24_PG_skip) & 1)
+					fprintf(fp, "%sskip", 
+						others++ ? "," : "");
+				if ((flags >> v24_PG_inactive_clean) & 1)
+					fprintf(fp, "%sinactive_clean", 
+						others++ ? "," : "");
+				if ((flags >> v24_PG_highmem) & 1)
+					fprintf(fp, "%shighmem", 
+						others++ ? "," : "");
+				if ((flags >> v24_PG_checked) & 1)
+					fprintf(fp, "%schecked", 
+						others++ ? "," : "");
+				if ((flags >> v24_PG_bigpage) & 1)
+					fprintf(fp, "%sbigpage", 
+						others++ ? "," : "");
+                                if ((flags >> v24_PG_arch_1) & 1)
+                                        fprintf(fp, "%sarch_1",
+                                                others++ ? "," : "");
+				if ((flags >> v24_PG_reserved) & 1)
+					fprintf(fp, "%sreserved", 
+						others++ ? "," : "");
+				if (phys_not_mapped)
+					fprintf(fp, "%s[NOT MAPPED]", 
+						others++ ? " " : "");
+
+				fprintf(fp, "\n");
+			}
+	
+			if (done)
+				break;
+		}
+
+		if (done)
+			break;
+	}
+
+	switch (mi->flags)
+	{
+	case GET_TOTALRAM_PAGES:
+		mi->retval = total_pages - reserved;
+		break;
+
+	case GET_SHARED_PAGES:
+		mi->retval = shared;
+		break;
+
+	case GET_BUFFERS_PAGES:
+		mi->retval = buffers;
+		break;
+
+	case GET_SLAB_PAGES:
+		mi->retval = slabs;
+		break;
+
+	case GET_ALL:
+		mi->get_totalram = total_pages - reserved;
+		mi->get_shared = shared;
+		mi->get_buffers = buffers;
+        	mi->get_slabs = slabs;
+		break;
+
+	case ADDRESS_SPECIFIED:
+		mi->retval = done;
+		break; 
+	}
+
+	FREEBUF(page_cache);
+}
+
+/*
+ *  Stash a chunk of PGMM_CACHED page structures, starting at addr, into the
+ *  passed-in buffer.  The mem_map array is normally guaranteed to be
+ *  readable except in the case of virtual mem_map usage.  When V_MEM_MAP
+ *  is in place, read all pages consumed by PGMM_CACHED page structures
+ *  that are currently mapped, leaving the unmapped ones just zeroed out.
+ */
+static void
+fill_mem_map_cache(ulong pp, ulong ppend, char *page_cache)
+{
+	long size, cnt;
+	ulong addr;
+        char *bufptr;
+
+	/*
+	 *  Try to read it in one fell swoop.
+ 	 */
+	if (readmem(pp, KVADDR, page_cache, SIZE(page) * PGMM_CACHED,
+      	    "page struct cache", RETURN_ON_ERROR|QUIET))
+		return;
+
+	/*
+	 *  Break it into page-size-or-less requests, warning if it's
+	 *  not a virtual mem_map.
+	 */
+        size = SIZE(page) * PGMM_CACHED;
+        addr = pp;
+        bufptr = page_cache;
+
+        while (size > 0) {
+		/* 
+		 *  Compute bytes till end of page.
+		 */
+		cnt = PAGESIZE() - PAGEOFFSET(addr); 
+
+                if (cnt > size)
+                        cnt = size;
+
+		if (!readmem(addr, KVADDR, bufptr, size,
+                    "virtual page struct cache", RETURN_ON_ERROR|QUIET)) {
+			BZERO(bufptr, size);
+			if (!(vt->flags & V_MEM_MAP) && ((addr+size) < ppend)) 
+				error(WARNING, 
+		                   "mem_map[] from %lx to %lx not accessible\n",
+					addr, addr+size);
+		}
+
+		addr += cnt;
+                bufptr += cnt;
+                size -= cnt;
+        }
+}
+
+
+/*
+ *  dump_page_hash_table() displays the entries in each page_hash_table.
+ */
+
+#define PGHASH_CACHED (1024)
+
+static void
+dump_page_hash_table(struct meminfo *hi)
+{
+	int i;
+	int len, entry_len;
+	ulong page_hash_table, head;
+	struct list_data list_data, *ld;
+	struct gnu_request req;
+	long total_cached;
+	long page_cache_size;
+	ulong this_addr, searchpage;
+	int errflag, found, cnt, populated, verbose;
+	uint ival;
+	ulong buffer_pages;
+	char buf[BUFSIZE];
+	char hash_table[BUFSIZE];
+	char *pcache, *pghash_cache;
+
+	if (!vt->page_hash_table) {
+		if (hi->flags & VERBOSE)
+			error(FATAL, 
+			 "address_space page cache radix tree not supported\n");
+		
+        	if (symbol_exists("nr_pagecache")) {
+			buffer_pages = nr_blockdev_pages();
+                	get_symbol_data("nr_pagecache", sizeof(int), &ival);
+                	page_cache_size = (ulong)ival;
+			page_cache_size -= buffer_pages;
+        		fprintf(fp, "page cache size: %ld\n", page_cache_size);
+			if (hi->flags & ADDRESS_SPECIFIED)
+				error(INFO, 
+    "address_space page cache radix tree not supported: %lx: ignored\n",
+					hi->spec_addr);
+		} else
+			error(FATAL, "cannot determine page cache size\n");
+		return;
+	}
+
+	ld = &list_data;
+
+	if (hi->spec_addr && (hi->flags & ADDRESS_SPECIFIED)) {
+		verbose = TRUE;
+		searchpage = hi->spec_addr;
+	} else if (hi->flags & VERBOSE) {
+		verbose = TRUE;
+		searchpage = 0;
+	} else { 
+		verbose = FALSE;
+		searchpage = 0;
+	}
+
+	if (vt->page_hash_table_len == 0) 
+		error(FATAL, "cannot determine size of page_hash_table\n");
+
+	page_hash_table = vt->page_hash_table;
+	len = vt->page_hash_table_len;
+	entry_len = VALID_STRUCT(page_cache_bucket) ?
+		SIZE(page_cache_bucket) : sizeof(void *);
+
+	if (CRASHDEBUG(1)) {
+		populated = 0;
+		fprintf(fp, "page_hash_table length: %d\n", len);
+	}
+
+	get_symbol_type("page_cache_size", NULL, &req);
+        if (req.length == sizeof(int)) {
+                get_symbol_data("page_cache_size", sizeof(int), &ival);
+                page_cache_size = (long)ival;
+        } else
+                get_symbol_data("page_cache_size", sizeof(long),
+                        &page_cache_size);
+
+        pghash_cache = GETBUF(sizeof(void *) * PGHASH_CACHED);
+
+	if (searchpage)
+		open_tmpfile();
+
+	hq_open();
+	for (i = total_cached = 0; i < len; i++, 
+	     page_hash_table += entry_len) {
+
+                if ((i % PGHASH_CACHED) == 0) {
+                	readmem(page_hash_table, KVADDR, pghash_cache,
+                        	entry_len * PGHASH_CACHED,
+                                "page hash cache", FAULT_ON_ERROR);
+                }
+
+                pcache = pghash_cache + ((i%PGHASH_CACHED) * entry_len);
+		if (VALID_STRUCT(page_cache_bucket))
+			pcache += OFFSET(page_cache_bucket_chain);
+			
+		head = ULONG(pcache);
+
+		if (!head) 
+			continue;
+
+		if (verbose) 
+			fprintf(fp, "page_hash_table[%d]\n", i);
+		
+		if (CRASHDEBUG(1))
+			populated++;
+
+                BZERO(ld, sizeof(struct list_data));
+                ld->flags = verbose;
+                ld->start = head;
+		ld->searchfor = searchpage;
+		ld->member_offset = OFFSET(page_next_hash);
+                cnt = do_list(ld);
+                total_cached += cnt;
+
+		if (ld->searchfor)
+			break;
+
+		if (received_SIGINT())
+			restart(0);
+	}
+	hq_close();
+
+        fprintf(fp, "%spage_cache_size: %ld ", verbose ? "\n" : "",
+                page_cache_size);
+        if (page_cache_size != total_cached)
+                fprintf(fp, "(found %ld)\n", total_cached);
+        else
+                fprintf(fp, "(verified)\n");
+
+	if (CRASHDEBUG(1))
+		fprintf(fp, "heads containing page(s): %d\n", populated);
+
+	if (searchpage) {
+		rewind(pc->tmpfile);
+		found = FALSE;
+		while (fgets(buf, BUFSIZE, pc->tmpfile)) {
+			if (CRASHDEBUG(1) && STRNEQ(buf, "<readmem:"))
+				continue;
+
+			if (strstr(buf, "page_hash_table")) {
+				strcpy(hash_table, buf); 
+				continue;
+			}
+			if (strstr(buf, "page_cache_size"))
+				continue;
+
+			if (CRASHDEBUG(1) && 
+			    !hexadecimal(strip_linefeeds(buf), 0))
+				continue;
+
+                	this_addr = htol(strip_linefeeds(buf),
+                        	RETURN_ON_ERROR, &errflag);
+
+			if (this_addr == searchpage) {
+				found = TRUE;
+				break;
+			}
+		}
+		close_tmpfile();
+
+		if (found) {
+			fprintf(fp, hash_table);
+			fprintf(fp, "%lx\n", searchpage);
+			hi->retval = TRUE;
+		}
+	}
+}
+
+/*
+ *  dump_free_pages() displays basic data about pages currently resident
+ *  in the free_area[] memory lists.  If the flags contains the VERBOSE 
+ *  bit, each page slab base address is dumped.  If an address is specified
+ *  only the free_area[] data containing that page is displayed, along with
+ *  the page slab base address.  Specified addresses can either be physical 
+ *  address or page structure pointers.
+ */
+char *free_area_hdr1 = \
+	"AREA  SIZE  FREE_AREA_STRUCT  BLOCKS   PAGES\n";
+char *free_area_hdr2 = \
+	"AREA  SIZE  FREE_AREA_STRUCT\n";
+
+static void
+dump_free_pages(struct meminfo *fi)
+{
+	int i;
+	int order;
+	ulong free_area;
+	char *free_area_buf;
+	ulong *pp;
+	int nr_mem_lists;
+	struct list_data list_data, *ld;
+	long cnt, total_free, chunk_size;
+	int nr_free_pages;
+	char buf[BUFSIZE];
+	char last_free[BUFSIZE];
+	char last_free_hdr[BUFSIZE];
+	int verbose, errflag, found;
+	physaddr_t searchphys;
+	ulong this_addr; 
+	physaddr_t this_phys;
+	int do_search;
+	ulong kfp, offset;
+	int flen, dimension;
+
+        if (vt->flags & (NODES|ZONES)) 
+		error(FATAL, "dump_free_pages called with (NODES|ZONES)\n");
+
+	nr_mem_lists = ARRAY_LENGTH(free_area);
+	dimension = ARRAY_LENGTH(free_area_DIMENSION);
+
+	if (nr_mem_lists == 0)
+		error(FATAL, "cannot determine size/dimensions of free_area\n");
+
+	if (dimension) 
+		error(FATAL, 
+		    "dump_free_pages called with multidimensional free area\n");
+
+	ld = &list_data;
+	total_free = 0;
+	searchphys = 0;
+	do_search = FALSE;
+	get_symbol_data("nr_free_pages", sizeof(int), &nr_free_pages);
+	
+	switch (fi->flags)
+	{
+	case GET_FREE_HIGHMEM_PAGES:
+                error(FATAL, "GET_FREE_HIGHMEM_PAGES invalid in this kernel\n");
+
+	case GET_FREE_PAGES:
+		fi->retval = (ulong)nr_free_pages;
+		return;
+
+	case ADDRESS_SPECIFIED:
+		switch (fi->memtype)
+		{
+		case KVADDR:
+                        if (!page_to_phys(fi->spec_addr, &searchphys)) {
+                                if (!kvtop(NULL, fi->spec_addr, &searchphys, 0))
+                                        return;
+                        }
+			break;
+		case PHYSADDR:
+			searchphys = fi->spec_addr;
+			break;
+		default:
+			error(FATAL, "dump_free_pages: no memtype specified\n");
+		}
+		do_search = TRUE;
+		break;
+	} 
+
+	verbose = (do_search || (fi->flags & VERBOSE)) ? TRUE : FALSE;
+
+	free_area_buf = GETBUF(nr_mem_lists * SIZE(free_area_struct));
+	kfp = free_area = symbol_value("free_area");
+	flen = MAX(VADDR_PRLEN, strlen("FREE_AREA_STRUCT"));
+	readmem(free_area, KVADDR, free_area_buf, 
+		SIZE(free_area_struct) * nr_mem_lists, 
+		"free_area_struct", FAULT_ON_ERROR);
+
+	if (do_search)
+		open_tmpfile();
+
+	if (!verbose)
+		fprintf(fp, free_area_hdr1);
+
+       	hq_open();
+	for (i = 0; i < nr_mem_lists; i++) {
+		pp = (ulong *)(free_area_buf + (SIZE(free_area_struct)*i));
+
+		chunk_size = power(2, i);
+
+		if (verbose)
+			fprintf(fp, free_area_hdr2);
+
+		fprintf(fp, "%3d  ", i);
+		sprintf(buf, "%ldk", (chunk_size * PAGESIZE())/1024);
+		fprintf(fp, "%5s  ", buf);
+
+		fprintf(fp, "%s  %s", 
+			mkstring(buf, flen, CENTER|LONG_HEX, MKSTR(kfp)),
+			verbose ? "\n" : "");
+
+		if (is_page_ptr(*pp, NULL)) {
+			BZERO(ld, sizeof(struct list_data));
+			ld->flags = verbose;
+			ld->start = *pp;
+			ld->end = free_area;
+        		cnt = do_list(ld);
+			total_free += (cnt * chunk_size);
+		} else 
+			cnt = 0;
+
+		if (!verbose)
+			fprintf(fp, "%6ld  %6ld\n", cnt, cnt * chunk_size );
+
+		free_area += SIZE(free_area_struct);
+		kfp += SIZE(free_area_struct);
+	}
+       	hq_close();
+
+	fprintf(fp, "\nnr_free_pages: %d ", nr_free_pages);
+	if (total_free != nr_free_pages)
+		fprintf(fp, "(found %ld)\n", total_free);
+	else
+		fprintf(fp, "(verified)\n");
+
+	if (!do_search)
+		return;
+
+	found = FALSE;
+        rewind(pc->tmpfile);
+	order = offset = 0;
+
+        while (fgets(buf, BUFSIZE, pc->tmpfile)) {
+		if (CRASHDEBUG(1) && STRNEQ(buf, "<readmem"))
+			continue;
+
+		if (strstr(buf, "nr_free_pages") ||
+		    STREQ(buf, "\n"))
+			continue;
+
+		if (strstr(buf, "AREA")) {
+			strcpy(last_free_hdr, buf);
+			continue;
+		}
+
+		if (strstr(buf, "k")) {
+			strcpy(last_free, buf);
+			chunk_size = power(2, order) * PAGESIZE();
+			order++;
+			continue;
+		}
+
+		if (CRASHDEBUG(1) && !hexadecimal(strip_linefeeds(buf), 0))
+			continue;
+
+		errflag = 0;
+		this_addr = htol(strip_linefeeds(buf), 
+			RETURN_ON_ERROR, &errflag);
+                if (errflag) 
+			continue;
+
+		if (!page_to_phys(this_addr, &this_phys))
+			continue;
+
+		if ((searchphys >= this_phys) && 
+		    (searchphys < (this_phys+chunk_size))) {
+			if (searchphys > this_phys) 
+				offset = (searchphys - this_phys)/PAGESIZE();
+			found = TRUE;
+			break;
+		}
+	}
+        close_tmpfile();
+
+	if (found) {
+		order--;
+
+		fprintf(fp, last_free_hdr);
+		fprintf(fp, last_free);
+		fprintf(fp, "%lx  ", this_addr);
+		if (order) {
+                	switch (fi->memtype)
+                	{
+                	case KVADDR:
+				fprintf(fp, "(%lx is ", (ulong)fi->spec_addr);
+                        	break;
+                	case PHYSADDR:
+				fprintf(fp, "(%llx is %s", fi->spec_addr,
+				    PAGEOFFSET(fi->spec_addr) ?  "in " : "");
+                        	break;
+			}
+			fprintf(fp, "%s of %ld pages) ",
+				ordinal(offset+1, buf), power(2, order));
+		}
+
+		fi->retval = TRUE;
+		fprintf(fp, "\n");
+	}
+}
+
+/*
+ *  Dump free pages on kernels with a multi-dimensional free_area array.
+ */
+char *free_area_hdr5 = \
+	"  AREA    SIZE  FREE_AREA_STRUCT  BLOCKS   PAGES\n";
+char *free_area_hdr6 = \
+	"  AREA    SIZE  FREE_AREA_STRUCT\n";
+
+static void
+dump_multidimensional_free_pages(struct meminfo *fi)
+{
+	int i, j;
+	struct list_data list_data, *ld;
+	long cnt, total_free;
+	ulong kfp, free_area;
+	physaddr_t searchphys;
+	int flen, errflag, verbose, nr_free_pages;
+	int nr_mem_lists, dimension, order, do_search;
+	ulong sum, found, offset;
+	char *free_area_buf, *p;
+	ulong *pp;
+	long chunk_size;
+        ulong this_addr; 
+	physaddr_t this_phys;
+	char buf[BUFSIZE];
+	char last_area[BUFSIZE];
+	char last_area_hdr[BUFSIZE];
+
+
+        if (vt->flags & (NODES|ZONES)) 
+                error(FATAL, 
+		"dump_multidimensional_free_pages called with (NODES|ZONES)\n");
+
+        ld = &list_data;
+	if (SIZE(free_area_struct) % sizeof(ulong))
+		error(FATAL, "free_area_struct not long-word aligned?\n");
+
+        total_free = 0;
+        searchphys = 0;
+	do_search = FALSE;
+        get_symbol_data("nr_free_pages", sizeof(int), &nr_free_pages);
+
+        switch (fi->flags)
+        {
+        case GET_FREE_HIGHMEM_PAGES:
+                error(FATAL, "GET_FREE_HIGHMEM_PAGES invalid in this kernel\n");
+
+        case GET_FREE_PAGES:
+                fi->retval = (ulong)nr_free_pages;
+                return;
+
+	case ADDRESS_SPECIFIED:
+		switch (fi->memtype)
+                {
+                case KVADDR:
+                        if (!page_to_phys(fi->spec_addr, &searchphys)) {
+                                if (!kvtop(NULL, fi->spec_addr, &searchphys, 0))
+                                        return;
+                        }
+                        break;
+                case PHYSADDR:
+                        searchphys = fi->spec_addr;
+                        break;
+                default:
+                        error(FATAL, 
+		    "dump_multidimensional_free_pages: no memtype specified\n");
+                }
+		do_search = TRUE;
+		break;
+	}
+
+        verbose = (do_search || (fi->flags & VERBOSE)) ? TRUE : FALSE;
+
+	flen = MAX(VADDR_PRLEN, strlen("FREE_AREA_STRUCT"));
+        nr_mem_lists = ARRAY_LENGTH(free_area);
+	dimension = ARRAY_LENGTH(free_area_DIMENSION);
+	if (!nr_mem_lists || !dimension)
+		error(FATAL, "cannot determine free_area dimensions\n");
+        free_area_buf = 
+		GETBUF((nr_mem_lists * SIZE(free_area_struct)) * dimension);
+        kfp = free_area = symbol_value("free_area");
+        readmem(free_area, KVADDR, free_area_buf, 
+		(SIZE(free_area_struct) * nr_mem_lists) * dimension,
+                "free_area arrays", FAULT_ON_ERROR);
+
+        if (do_search)
+                open_tmpfile();
+
+        hq_open();
+        for (i = sum = found = 0; i < dimension; i++) {
+        	if (!verbose)
+                	fprintf(fp, free_area_hdr5);
+               	pp = (ulong *)(free_area_buf + 
+			((SIZE(free_area_struct)*nr_mem_lists)*i));
+		for (j = 0; j < nr_mem_lists; j++) {
+                        if (verbose)
+                                fprintf(fp, free_area_hdr6);
+
+			sprintf(buf, "[%d][%d]", i, j);
+			fprintf(fp, "%7s  ", buf);
+
+                	chunk_size = power(2, j);
+
+                	sprintf(buf, "%ldk", (chunk_size * PAGESIZE())/1024);
+                	fprintf(fp, "%5s  ", buf);
+
+                	fprintf(fp, "%s  %s",  
+			    mkstring(buf, flen, CENTER|LONG_HEX, MKSTR(kfp)),
+			    verbose ? "\n" : "");
+
+                	if (is_page_ptr(*pp, NULL)) {
+                        	BZERO(ld, sizeof(struct list_data));
+                        	ld->flags = verbose;
+                        	ld->start = *pp;
+                        	ld->end = free_area;
+                        	cnt = do_list(ld);
+                        	total_free += (cnt * chunk_size);
+                	} else
+                        	cnt = 0;
+
+                	if (!verbose)
+                        	fprintf(fp, 
+					"%6ld  %6ld\n", cnt, cnt * chunk_size );
+
+			pp += (SIZE(free_area_struct)/sizeof(ulong));
+			free_area += SIZE(free_area_struct);
+			kfp += SIZE(free_area_struct);
+		}
+		fprintf(fp, "\n");
+	}
+	hq_close();
+
+        fprintf(fp, "nr_free_pages: %d ", nr_free_pages);
+        if (total_free != nr_free_pages)
+                fprintf(fp, "(found %ld)\n", total_free);
+        else
+                fprintf(fp, "(verified)\n");
+
+        if (!do_search)
+                return;
+
+        found = FALSE;
+        rewind(pc->tmpfile);
+        order = offset = 0;
+
+        while (fgets(buf, BUFSIZE, pc->tmpfile)) {
+		if (CRASHDEBUG(1) && STRNEQ(buf, "<readmem:"))
+			continue;
+
+		if (STRNEQ(buf, "nr_free_pages:"))
+			continue;
+
+		if (strstr(buf, "AREA")) {
+                        strcpy(last_area_hdr, buf);
+                        fgets(buf, BUFSIZE, pc->tmpfile);
+                        strcpy(last_area, strip_linefeeds(buf));
+			p = strstr(buf, "k");
+			*p = NULLCHAR;
+			while (*p != ' ')
+				p--;
+			chunk_size = atol(p+1) * 1024;
+			if (chunk_size == PAGESIZE())
+				order = 0;
+			else
+				order++;
+                        continue;
+                }
+
+                errflag = 0;
+                this_addr = htol(strip_linefeeds(buf),
+                        RETURN_ON_ERROR, &errflag);
+                if (errflag)
+                        continue;
+
+                if (!page_to_phys(this_addr, &this_phys))
+                        continue;
+
+                if ((searchphys >= this_phys) &&
+                    (searchphys < (this_phys+chunk_size))) {
+                        if (searchphys > this_phys)
+                                offset = (searchphys - this_phys)/PAGESIZE();
+                        found = TRUE;
+                        break;
+                }
+
+	}
+	close_tmpfile();
+
+	if (found) {
+		fprintf(fp, last_area_hdr);
+		fprintf(fp, "%s\n", last_area);
+		fprintf(fp, "%lx  ", this_addr);
+                if (order) {
+                	switch (fi->memtype)
+                	{
+                	case KVADDR:
+                                fprintf(fp, "(%lx is ", (ulong)fi->spec_addr);
+                        	break;
+                	case PHYSADDR:
+                                fprintf(fp, "(%llx is %s", fi->spec_addr,
+                                    PAGEOFFSET(fi->spec_addr) ?  "in " : "");
+                        	break;
+			}
+                        fprintf(fp, "%s of %ld pages) ",
+                                ordinal(offset+1, buf), power(2, order));
+                }
+
+		fi->retval = TRUE;
+                fprintf(fp, "\n");
+	}
+}
+
+
+/*
+ *  Dump free pages in newer kernels that have zones.  This is a work in
+ *  progress, because although the framework for memory nodes has been laid
+ *  down, complete support has not been put in place.
+ */
+static char *zone_hdr = "ZONE  NAME        SIZE    FREE";
+
+static void
+dump_free_pages_zones_v1(struct meminfo *fi)
+{
+	int i, n;
+	ulong node_zones;
+	ulong size;
+	long zone_size_offset;
+	long chunk_size;
+	int order, errflag, do_search;
+	ulong offset, verbose, value, sum, found; 
+	ulong this_addr;
+	physaddr_t this_phys, searchphys;
+        ulong zone_mem_map;
+        ulong zone_start_paddr;
+        ulong zone_start_mapnr;
+	struct node_table *nt;
+	char buf[BUFSIZE], *p;
+	char buf1[BUFSIZE];
+	char buf2[BUFSIZE];
+	char buf3[BUFSIZE];
+	char last_node[BUFSIZE];
+	char last_zone[BUFSIZE];
+	char last_area[BUFSIZE];
+	char last_area_hdr[BUFSIZE];
+
+       if (!(vt->flags & (NODES|ZONES)))
+		error(FATAL, 
+		    "dump_free_pages_zones_v1 called without (NODES|ZONES)\n");
+
+        if (fi->flags & ADDRESS_SPECIFIED) {
+                switch (fi->memtype)
+                {
+                case KVADDR:
+                        if (!page_to_phys(fi->spec_addr, &searchphys)) {
+                                if (!kvtop(NULL, fi->spec_addr, &searchphys, 0))
+                                        return;
+                        }
+                        break;
+                case PHYSADDR:
+                        searchphys = fi->spec_addr;
+                        break;
+                default:
+                        error(FATAL, 
+			    "dump_free_pages_zones_v1: no memtype specified\n");
+                }
+		do_search = TRUE;
+        } else {
+                searchphys = 0;
+		do_search = FALSE;
+	}
+        verbose = (do_search || (fi->flags & VERBOSE)) ? TRUE : FALSE;
+
+	if (VALID_MEMBER(zone_struct_size))
+		zone_size_offset =  OFFSET(zone_struct_size);
+	else if (VALID_MEMBER(zone_struct_memsize))
+		zone_size_offset =  OFFSET(zone_struct_memsize);
+	else
+		error(FATAL, 
+			"zone_struct has neither size nor memsize field\n");
+
+	if (do_search)
+		open_tmpfile();
+
+	hq_open();
+
+	for (n = sum = found = 0; n < vt->numnodes; n++) {
+                nt = &vt->node_table[n];
+		node_zones = nt->pgdat + OFFSET(pglist_data_node_zones);
+
+		for (i = 0; i < vt->nr_zones; i++) {
+	
+			if (fi->flags == GET_FREE_PAGES) {
+	                	readmem(node_zones+
+					OFFSET(zone_struct_free_pages), 
+					KVADDR, &value, sizeof(ulong),
+	                        	"node_zones free_pages", 
+					FAULT_ON_ERROR);
+				sum += value;
+				node_zones += SIZE(zone_struct);
+				continue;
+			}
+	
+	                if (fi->flags == GET_FREE_HIGHMEM_PAGES) {
+	                        if (i == vt->ZONE_HIGHMEM) {
+	                                readmem(node_zones+
+						OFFSET(zone_struct_free_pages),
+						KVADDR, &value, sizeof(ulong),
+	                                        "node_zones free_pages",
+	                                        FAULT_ON_ERROR);
+	                                sum += value;
+	                        }
+	                        node_zones += SIZE(zone_struct);
+	                        continue;
+	                }
+	
+			if (fi->flags == GET_ZONE_SIZES) {
+	                	readmem(node_zones+zone_size_offset, 
+					KVADDR, &size, sizeof(ulong),
+	                        	"node_zones {mem}size", FAULT_ON_ERROR);
+	                        sum += size;
+	                        node_zones += SIZE(zone_struct);
+	                        continue;
+			}
+
+			if ((i == 0) && (vt->flags & NODES)) {
+				if (n) {
+					fprintf(fp, "\n");
+                                	pad_line(fp, 
+						VADDR_PRLEN > 8 ? 74 : 66, '-');
+                                	fprintf(fp, "\n");
+				}
+				fprintf(fp, "%sNODE\n %2d\n", 
+					n ? "\n" : "", nt->node_id);
+			}
+
+	                fprintf(fp, "%s%s  %s  START_PADDR  START_MAPNR\n",
+				i > 0 ? "\n" : "",
+	                        zone_hdr,
+	                        mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, 
+				    "MEM_MAP"));
+	
+			fprintf(fp, "%3d   ", i);
+	
+	        	readmem(node_zones+OFFSET(zone_struct_name), KVADDR, 
+				&value, sizeof(void *), 
+				"node_zones name", FAULT_ON_ERROR);
+	                if (read_string(value, buf, BUFSIZE-1))
+	                	fprintf(fp, "%-9s ", buf);
+			else
+				fprintf(fp, "(unknown) ");
+	
+	        	readmem(node_zones+zone_size_offset, KVADDR, 
+				&size, sizeof(ulong), 
+				"node_zones {mem}size", FAULT_ON_ERROR);
+	                fprintf(fp, "%6ld  ", size);
+	
+	        	readmem(node_zones+OFFSET(zone_struct_free_pages), 
+				KVADDR, &value, sizeof(ulong), 
+				"node_zones free_pages", FAULT_ON_ERROR);
+	
+	                fprintf(fp, "%6ld  ", value);
+	
+	                readmem(node_zones+OFFSET(zone_struct_zone_start_paddr),
+	                        KVADDR, &zone_start_paddr, sizeof(ulong),
+	                        "node_zones zone_start_paddr", FAULT_ON_ERROR);
+	                readmem(node_zones+OFFSET(zone_struct_zone_start_mapnr),
+	                        KVADDR, &zone_start_mapnr, sizeof(ulong),
+	                        "node_zones zone_start_mapnr", FAULT_ON_ERROR);
+	                readmem(node_zones+OFFSET(zone_struct_zone_mem_map),
+	                        KVADDR, &zone_mem_map, sizeof(ulong),
+	                        "node_zones zone_mem_map", FAULT_ON_ERROR);
+	
+	                fprintf(fp, "%s  %s  %s\n",
+	                	mkstring(buf1, VADDR_PRLEN,
+	                            CENTER|LONG_HEX,MKSTR(zone_mem_map)),
+	                	mkstring(buf2, strlen("START_PADDR"),
+	                            CENTER|LONG_HEX|RJUST,
+					MKSTR(zone_start_paddr)),
+	                	mkstring(buf3, strlen("START_MAPNR"),
+	                            CENTER|LONG_DEC|RJUST,
+					MKSTR(zone_start_mapnr)));
+	
+			sum += value;
+
+			if (value)
+				found += dump_zone_free_area(node_zones+
+					OFFSET(zone_struct_free_area), 
+					vt->nr_free_areas, verbose);
+
+			node_zones += SIZE(zone_struct);
+		}
+	}
+
+	hq_close();
+
+        if (fi->flags & (GET_FREE_PAGES|GET_ZONE_SIZES|GET_FREE_HIGHMEM_PAGES)) {
+                fi->retval = sum;
+                return;
+        }
+
+	fprintf(fp, "\nnr_free_pages: %ld  ", sum);
+	if (sum == found)
+		fprintf(fp, "(verified)\n");
+	else
+		fprintf(fp, "(found %ld)\n", found);
+
+	if (!do_search)
+		return;
+
+        found = FALSE;
+        rewind(pc->tmpfile);
+        order = offset = 0;
+	last_node[0] = NULLCHAR;
+        last_zone[0] = NULLCHAR;
+        last_area[0] = NULLCHAR;
+        last_area_hdr[0] = NULLCHAR;
+
+
+        while (fgets(buf, BUFSIZE, pc->tmpfile)) {
+		if (CRASHDEBUG(1) && STRNEQ(buf, "<readmem"))
+			continue;
+
+		if (STRNEQ(buf, "nr_free_pages:"))
+			continue;
+
+		if (STRNEQ(buf, "NODE")) { 
+			fgets(buf, BUFSIZE, pc->tmpfile);
+			strcpy(last_node, strip_linefeeds(buf));
+			continue;
+		}
+		if (STRNEQ(buf, "ZONE")) {
+			fgets(buf, BUFSIZE, pc->tmpfile);
+			strcpy(last_zone, strip_linefeeds(buf));
+			continue;
+		}
+		if (STRNEQ(buf, "AREA")) {
+                        strcpy(last_area_hdr, buf);
+                        fgets(buf, BUFSIZE, pc->tmpfile);
+                        strcpy(last_area, strip_linefeeds(buf));
+			p = strstr(buf, "k");
+			*p = NULLCHAR;
+			while (*p != ' ')
+				p--;
+			chunk_size = atol(p+1) * 1024;
+			if (chunk_size == PAGESIZE())
+				order = 0;
+			else
+				order++;
+                        continue;
+                }
+
+                if (CRASHDEBUG(0) &&
+                    !hexadecimal(strip_linefeeds(buf), 0))
+                        continue;
+
+                errflag = 0;
+                this_addr = htol(strip_linefeeds(buf),
+                        RETURN_ON_ERROR, &errflag);
+                if (errflag)
+                        continue;
+
+                if (!page_to_phys(this_addr, &this_phys))
+                        continue;
+
+                if ((searchphys >= this_phys) &&
+                    (searchphys < (this_phys+chunk_size))) {
+                        if (searchphys > this_phys)
+                                offset = (searchphys - this_phys)/PAGESIZE();
+                        found = TRUE;
+                        break;
+                }
+
+	}
+	close_tmpfile();
+
+	if (found) {
+		if (strlen(last_node)) 
+			fprintf(fp, "NODE\n%s\n", last_node); 
+                fprintf(fp, "%s  %s  START_PADDR  START_MAPNR\n",
+                        zone_hdr,
+                        mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "MEM_MAP"));
+		fprintf(fp, "%s\n", last_zone);
+		fprintf(fp, last_area_hdr);
+		fprintf(fp, "%s\n", last_area);
+		fprintf(fp, "%lx  ", this_addr);
+                if (order) {
+                	switch (fi->memtype)
+                	{
+                	case KVADDR:
+                                fprintf(fp, "(%lx is ", (ulong)fi->spec_addr);
+                        	break;
+                	case PHYSADDR:
+                                fprintf(fp, "(%llx is %s", fi->spec_addr,
+                                    PAGEOFFSET(fi->spec_addr) ?  "in " : "");
+                        	break;
+			}
+                        fprintf(fp, "%s of %ld pages) ",
+                                ordinal(offset+1, buf), power(2, order));
+                }
+
+		fi->retval = TRUE;
+                fprintf(fp, "\n");
+	}
+}
+
+
+/*
+ *  Same as dump_free_pages_zones_v1(), but updated for numerous 2.6 zone 
+ *  and free_area related data structure changes.
+ */
+static void
+dump_free_pages_zones_v2(struct meminfo *fi)
+{
+	int i, n;
+	ulong node_zones;
+	ulong size;
+	long zone_size_offset;
+	long chunk_size;
+	int order, errflag, do_search;
+	ulong offset, verbose, value, sum, found; 
+	ulong this_addr;
+	physaddr_t phys, this_phys, searchphys;
+	ulong pp;
+        ulong zone_mem_map;
+        ulong zone_start_paddr;
+	ulong zone_start_pfn;
+        ulong zone_start_mapnr;
+	struct node_table *nt;
+	char buf[BUFSIZE], *p;
+	char buf1[BUFSIZE];
+	char buf2[BUFSIZE];
+	char buf3[BUFSIZE];
+	char last_node[BUFSIZE];
+	char last_zone[BUFSIZE];
+	char last_area[BUFSIZE];
+	char last_area_hdr[BUFSIZE];
+
+       if (!(vt->flags & (NODES|ZONES)))
+		error(FATAL, 
+		    "dump_free_pages_zones_v2 called without (NODES|ZONES)\n");
+
+        if (fi->flags & ADDRESS_SPECIFIED) {
+                switch (fi->memtype)
+                {
+                case KVADDR:
+                        if (!page_to_phys(fi->spec_addr, &searchphys)) {
+                                if (!kvtop(NULL, fi->spec_addr, &searchphys, 0))
+                                        return;
+                        }
+                        break;
+                case PHYSADDR:
+                        searchphys = fi->spec_addr;
+                        break;
+                default:
+                        error(FATAL, 
+			    "dump_free_pages_zones_v2: no memtype specified\n");
+                }
+		do_search = TRUE;
+        } else {
+                searchphys = 0;
+		do_search = FALSE;
+	}
+
+        verbose = (do_search || (fi->flags & VERBOSE)) ? TRUE : FALSE;
+
+	if (VALID_MEMBER(zone_spanned_pages))
+		zone_size_offset =  OFFSET(zone_spanned_pages);
+	else
+		error(FATAL, "zone struct has no spanned_pages field\n");
+
+	if (do_search)
+		open_tmpfile();
+
+	hq_open();
+
+	for (n = sum = found = 0; n < vt->numnodes; n++) {
+                nt = &vt->node_table[n];
+		node_zones = nt->pgdat + OFFSET(pglist_data_node_zones);
+
+		for (i = 0; i < vt->nr_zones; i++) {
+			if (fi->flags == GET_FREE_PAGES) {
+	                	readmem(node_zones+
+					OFFSET(zone_free_pages), 
+					KVADDR, &value, sizeof(ulong),
+	                        	"node_zones free_pages", 
+					FAULT_ON_ERROR);
+				sum += value;
+				node_zones += SIZE(zone);
+				continue;
+			}
+	
+	                if (fi->flags == GET_FREE_HIGHMEM_PAGES) {
+	                        if (i == vt->ZONE_HIGHMEM) {
+	                                readmem(node_zones+
+						OFFSET(zone_free_pages),
+						KVADDR, &value, sizeof(ulong),
+	                                        "node_zones free_pages",
+	                                        FAULT_ON_ERROR);
+	                                sum += value;
+	                        }
+	                        node_zones += SIZE(zone);
+	                        continue;
+	                }
+	
+			if (fi->flags == GET_ZONE_SIZES) {
+	                	readmem(node_zones+zone_size_offset, 
+					KVADDR, &size, sizeof(ulong),
+	                        	"node_zones size", FAULT_ON_ERROR);
+	                        sum += size;
+	                        node_zones += SIZE(zone);
+	                        continue;
+			}
+
+			if ((i == 0) && ((vt->flags & NODES) || (vt->numnodes > 1))) {
+				if (n) {
+					fprintf(fp, "\n");
+					pad_line(fp, 
+						VADDR_PRLEN > 8 ? 74 : 66, '-');
+					fprintf(fp, "\n");
+				}
+				fprintf(fp, "%sNODE\n %2d\n", 
+					n ? "\n" : "", nt->node_id);
+			}
+
+	                fprintf(fp, "%s%s  %s  START_PADDR  START_MAPNR\n",
+				i > 0 ? "\n" : "",
+	                        zone_hdr,
+	                        mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, 
+				    "MEM_MAP"));
+	
+			fprintf(fp, "%3d   ", i);
+	
+	        	readmem(node_zones+OFFSET(zone_name), KVADDR, 
+				&value, sizeof(void *), 
+				"node_zones name", FAULT_ON_ERROR);
+	                if (read_string(value, buf, BUFSIZE-1))
+	                	fprintf(fp, "%-9s ", buf);
+			else
+				fprintf(fp, "(unknown) ");
+	
+	        	readmem(node_zones+zone_size_offset, KVADDR, 
+				&size, sizeof(ulong), 
+				"node_zones size", FAULT_ON_ERROR);
+	                fprintf(fp, "%6ld  ", size);
+	
+	        	readmem(node_zones+OFFSET(zone_free_pages), 
+				KVADDR, &value, sizeof(ulong), 
+				"node_zones free_pages", FAULT_ON_ERROR);
+	
+	                fprintf(fp, "%6ld  ", value);
+	
+			if (VALID_MEMBER(zone_zone_mem_map)) {
+                        	readmem(node_zones+OFFSET(zone_zone_mem_map),
+                                	KVADDR, &zone_mem_map, sizeof(ulong),
+                                	"node_zones zone_mem_map", FAULT_ON_ERROR);
+			}
+
+			readmem(node_zones+ OFFSET(zone_zone_start_pfn),
+                                KVADDR, &zone_start_pfn, sizeof(ulong),
+                                "node_zones zone_start_pfn", FAULT_ON_ERROR);
+                        zone_start_paddr = PTOB(zone_start_pfn);
+
+			if (!VALID_MEMBER(zone_zone_mem_map)) {
+				if (IS_SPARSEMEM() || IS_DISCONTIGMEM()) {
+					zone_mem_map = 0;
+					if (size) {
+						phys = PTOB(zone_start_pfn);
+                                        	if (phys_to_page(phys, &pp))
+                                                	zone_mem_map = pp;
+					}
+				} else if (vt->flags & FLATMEM) {
+					zone_mem_map = 0;
+					if (size)
+						zone_mem_map = nt->mem_map +
+							(zone_start_pfn * SIZE(page));
+				} else
+					error(FATAL, "\ncannot determine zone mem_map: TBD\n");
+			}
+
+                        if (zone_mem_map) 
+                        	zone_start_mapnr = 
+					(zone_mem_map - nt->mem_map) / 
+						SIZE(page);
+                        else
+                                zone_start_mapnr = 0;
+	
+	                fprintf(fp, "%s  %s  %s\n",
+	                	mkstring(buf1, VADDR_PRLEN,
+	                            CENTER|LONG_HEX,MKSTR(zone_mem_map)),
+	                	mkstring(buf2, strlen("START_PADDR"),
+	                            CENTER|LONG_HEX|RJUST,
+					MKSTR(zone_start_paddr)),
+	                	mkstring(buf3, strlen("START_MAPNR"),
+	                            CENTER|LONG_DEC|RJUST,
+					MKSTR(zone_start_mapnr)));
+	
+			sum += value;
+
+			if (value)
+				found += dump_zone_free_area(node_zones+
+					OFFSET(zone_free_area), 
+					vt->nr_free_areas, verbose);
+
+			node_zones += SIZE(zone);
+		}
+	}
+
+	hq_close();
+
+        if (fi->flags & (GET_FREE_PAGES|GET_ZONE_SIZES|GET_FREE_HIGHMEM_PAGES)) {
+                fi->retval = sum;
+                return;
+        }
+
+	fprintf(fp, "\nnr_free_pages: %ld  ", sum);
+	if (sum == found)
+		fprintf(fp, "(verified)\n");
+	else
+		fprintf(fp, "(found %ld)\n", found);
+
+	if (!do_search)
+		return;
+
+        found = FALSE;
+        rewind(pc->tmpfile);
+        order = offset = 0;
+	last_node[0] = NULLCHAR;
+        last_zone[0] = NULLCHAR;
+        last_area[0] = NULLCHAR;
+        last_area_hdr[0] = NULLCHAR;
+
+
+        while (fgets(buf, BUFSIZE, pc->tmpfile)) {
+		if (CRASHDEBUG(1) && STRNEQ(buf, "<readmem"))
+			continue;
+
+		if (STRNEQ(buf, "nr_free_pages:"))
+			continue;
+
+		if (STRNEQ(buf, "NODE")) { 
+			fgets(buf, BUFSIZE, pc->tmpfile);
+			strcpy(last_node, strip_linefeeds(buf));
+			continue;
+		}
+		if (STRNEQ(buf, "ZONE")) {
+			fgets(buf, BUFSIZE, pc->tmpfile);
+			strcpy(last_zone, strip_linefeeds(buf));
+			continue;
+		}
+		if (STRNEQ(buf, "AREA")) {
+                        strcpy(last_area_hdr, buf);
+                        fgets(buf, BUFSIZE, pc->tmpfile);
+                        strcpy(last_area, strip_linefeeds(buf));
+			p = strstr(buf, "k");
+			*p = NULLCHAR;
+			while (*p != ' ')
+				p--;
+			chunk_size = atol(p+1) * 1024;
+			if (chunk_size == PAGESIZE())
+				order = 0;
+			else
+				order++;
+                        continue;
+                }
+
+                if (CRASHDEBUG(0) &&
+                    !hexadecimal(strip_linefeeds(buf), 0)) 
+                        continue;
+
+                errflag = 0;
+                this_addr = htol(strip_linefeeds(buf),
+                        RETURN_ON_ERROR, &errflag);
+                if (errflag)
+                        continue;
+
+                if (!page_to_phys(this_addr, &this_phys)) 
+                        continue;
+
+                if ((searchphys >= this_phys) &&
+                    (searchphys < (this_phys+chunk_size))) {
+                        if (searchphys > this_phys)
+                                offset = (searchphys - this_phys)/PAGESIZE();
+                        found = TRUE;
+                        break;
+                }
+
+	}
+	close_tmpfile();
+
+	if (found) {
+		if (strlen(last_node)) 
+			fprintf(fp, "NODE\n%s\n", last_node); 
+                fprintf(fp, "%s  %s  START_PADDR  START_MAPNR\n",
+                        zone_hdr,
+                        mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "MEM_MAP"));
+		fprintf(fp, "%s\n", last_zone);
+		fprintf(fp, last_area_hdr);
+		fprintf(fp, "%s\n", last_area);
+		fprintf(fp, "%lx  ", this_addr);
+                if (order) {
+                	switch (fi->memtype)
+                	{
+                	case KVADDR:
+                                fprintf(fp, "(%lx is ", (ulong)fi->spec_addr);
+                        	break;
+                	case PHYSADDR:
+                                fprintf(fp, "(%llx is %s", fi->spec_addr,
+                                    PAGEOFFSET(fi->spec_addr) ?  "in " : "");
+                        	break;
+			}
+                        fprintf(fp, "%s of %ld pages)",
+                                ordinal(offset+1, buf), chunk_size/PAGESIZE());
+                }
+
+		fi->retval = TRUE;
+                fprintf(fp, "\n");
+	}
+}
+
+
+static char *
+page_usage_hdr = "ZONE  NAME        FREE   ACTIVE  INACTIVE_DIRTY  INACTIVE_CLEAN  MIN/LOW/HIGH";
+
+/*
+ *  Display info about the non-free pages in each zone.
+ */
+static int
+dump_zone_page_usage(void)
+{
+	int i, n;
+	ulong value, node_zones;
+	struct node_table *nt;
+	ulong inactive_dirty_pages, inactive_clean_pages, active_pages; 
+	ulong free_pages, pages_min, pages_low, pages_high;
+	char namebuf[BUFSIZE];
+	char buf1[BUFSIZE];
+	char buf2[BUFSIZE];
+	char buf3[BUFSIZE];
+
+	if (!VALID_MEMBER(zone_struct_inactive_dirty_pages) ||
+	    !VALID_MEMBER(zone_struct_inactive_clean_pages) ||
+	    !VALID_MEMBER(zone_struct_active_pages) ||
+	    !VALID_MEMBER(zone_struct_pages_min) ||
+	    !VALID_MEMBER(zone_struct_pages_low) ||
+	    !VALID_MEMBER(zone_struct_pages_high))
+		return FALSE;
+
+	fprintf(fp, "\n");
+
+        for (n = 0; n < vt->numnodes; n++) {
+                nt = &vt->node_table[n];
+                node_zones = nt->pgdat + OFFSET(pglist_data_node_zones);
+                
+		if ((i == 0) && (vt->flags & NODES)) {
+                	fprintf(fp, "%sNODE\n %2d\n",
+                        	n ? "\n" : "", nt->node_id);
+                }
+		fprintf(fp, "%s\n", page_usage_hdr);
+
+                for (i = 0; i < vt->nr_zones; i++) {
+			readmem(node_zones+OFFSET(zone_struct_free_pages),
+                                KVADDR, &free_pages, sizeof(ulong),
+                                "node_zones free_pages", FAULT_ON_ERROR);
+		        readmem(node_zones+
+				OFFSET(zone_struct_inactive_dirty_pages),
+		                KVADDR, &inactive_dirty_pages, sizeof(ulong),
+		                "node_zones inactive_dirty_pages", 
+				FAULT_ON_ERROR);
+		        readmem(node_zones+
+				OFFSET(zone_struct_inactive_clean_pages),
+		                KVADDR, &inactive_clean_pages, sizeof(ulong),
+		                "node_zones inactive_clean_pages", 
+				FAULT_ON_ERROR);
+		        readmem(node_zones+OFFSET(zone_struct_active_pages),
+		                KVADDR, &active_pages, sizeof(ulong),
+		                "node_zones active_pages", FAULT_ON_ERROR);
+		        readmem(node_zones+OFFSET(zone_struct_pages_min),
+		                KVADDR, &pages_min, sizeof(ulong),
+		                "node_zones pages_min", FAULT_ON_ERROR);
+		        readmem(node_zones+OFFSET(zone_struct_pages_low),
+		                KVADDR, &pages_low, sizeof(ulong),
+		                "node_zones pages_low", FAULT_ON_ERROR);
+		        readmem(node_zones+OFFSET(zone_struct_pages_high),
+		                KVADDR, &pages_high, sizeof(ulong),
+		                "node_zones pages_high", FAULT_ON_ERROR);
+
+                        readmem(node_zones+OFFSET(zone_struct_name), KVADDR,
+                                &value, sizeof(void *),
+                                "node_zones name", FAULT_ON_ERROR);
+                        if (read_string(value, buf1, BUFSIZE-1))
+                                sprintf(namebuf, "%-8s", buf1);
+                        else
+                                sprintf(namebuf, "(unknown)");
+
+		        sprintf(buf2, "%ld/%ld/%ld", 
+				pages_min, pages_low, pages_high);
+		        fprintf(fp, "%3d   %s %7ld  %7ld %15ld %15ld  %s\n",
+				i,
+				namebuf,
+		                free_pages,
+		                active_pages,
+		                inactive_dirty_pages,
+		                inactive_clean_pages,
+		                mkstring(buf3, strlen("MIN/LOW/HIGH"), 
+				CENTER, buf2));
+
+			node_zones += SIZE(zone_struct);
+		}
+	}
+
+	return TRUE;
+}
+
+
+/*
+ *  Dump the num "order" contents of the zone_t free_area array.
+ */
+char *free_area_hdr3 = "AREA    SIZE  FREE_AREA_STRUCT\n";
+char *free_area_hdr4 = "AREA    SIZE  FREE_AREA_STRUCT  BLOCKS  PAGES\n";
+
+static int
+dump_zone_free_area(ulong free_area, int num, ulong verbose)
+{
+	int i, j;
+	long chunk_size;
+	int flen, total_free, cnt;
+	char buf[BUFSIZE];
+	ulong free_area_buf[3];
+	char *free_area_buf2;
+	char *free_list_buf;
+	ulong free_list;
+	struct list_data list_data, *ld;
+	int list_count;
+	ulong *free_ptr;
+
+	if (VALID_STRUCT(free_area_struct)) {
+		if (SIZE(free_area_struct) != (3 * sizeof(ulong)))
+			error(FATAL, 
+			    "unrecognized free_area_struct size: %ld\n", 
+				SIZE(free_area_struct));
+		list_count = 1;
+	} else if (VALID_STRUCT(free_area)) {
+                if (SIZE(free_area) == (3 * sizeof(ulong)))
+			list_count = 1;
+		else {
+			list_count = MEMBER_SIZE("free_area", 
+				"free_list")/SIZE(list_head);
+			free_area_buf2 = GETBUF(SIZE(free_area));
+			free_list_buf = GETBUF(SIZE(list_head));
+			readmem(free_area, KVADDR, free_area_buf2,
+				SIZE(free_area), "free_area struct", 
+				FAULT_ON_ERROR);
+		}
+	} else error(FATAL, 
+		"neither free_area_struct or free_area structures exist\n");
+
+	ld = &list_data;
+
+	if (!verbose)
+		fprintf(fp, free_area_hdr4);
+
+	total_free = 0;
+	flen = MAX(VADDR_PRLEN, strlen("FREE_AREA_STRUCT"));
+
+	if (list_count > 1)
+		goto multiple_lists;
+
+	for (i = 0; i < num; i++, 
+	     free_area += SIZE_OPTION(free_area_struct, free_area)) {
+		if (verbose)
+			fprintf(fp, free_area_hdr3);
+		fprintf(fp, "%3d ", i);
+		chunk_size = power(2, i);
+		sprintf(buf, "%ldk", (chunk_size * PAGESIZE())/1024);
+                fprintf(fp, " %7s  ", buf);
+
+                readmem(free_area, KVADDR, free_area_buf,
+                        sizeof(ulong) * 3, "free_area_struct", FAULT_ON_ERROR);
+
+		fprintf(fp, "%s  ",
+			mkstring(buf, flen, CENTER|LONG_HEX, MKSTR(free_area)));
+
+		if (free_area_buf[0] == free_area) {
+			if (verbose)
+				fprintf(fp, "\n");
+			else
+				fprintf(fp, "%6d %6d\n", 0, 0);
+			continue;
+		}
+	
+		if (verbose)
+			fprintf(fp, "\n");
+
+                BZERO(ld, sizeof(struct list_data));
+                ld->flags = verbose | RETURN_ON_DUPLICATE;
+                ld->start = free_area_buf[0];
+                ld->end = free_area;
+		if (VALID_MEMBER(page_list_next))
+			ld->list_head_offset = OFFSET(page_list);
+        	else if (VALID_MEMBER(page_lru))
+			ld->list_head_offset = OFFSET(page_lru)+
+				OFFSET(list_head_next);
+		else error(FATAL, 
+			"neither page.list or page.lru exist?\n");
+
+                cnt = do_list(ld);
+		if (cnt < 0) 
+			error(FATAL, 
+			    "corrupted free list from free_area_struct: %lx\n", 
+				free_area);
+
+		if (!verbose)
+			fprintf(fp, "%6d %6ld\n", cnt, cnt*chunk_size);
+
+                total_free += (cnt * chunk_size);
+	}
+
+	return total_free;
+
+multiple_lists:
+
+	for (i = 0; i < num; i++, 
+	     free_area += SIZE_OPTION(free_area_struct, free_area)) {
+
+		readmem(free_area, KVADDR, free_area_buf2,
+			SIZE(free_area), "free_area struct", FAULT_ON_ERROR);
+
+		for (j = 0, free_list = free_area; j < list_count; 
+		     j++, free_list += SIZE(list_head)) {
+
+			if (verbose)
+				fprintf(fp, free_area_hdr3);
+
+			fprintf(fp, "%3d ", i);
+			chunk_size = power(2, i);
+			sprintf(buf, "%ldk", (chunk_size * PAGESIZE())/1024);
+			fprintf(fp, " %7s  ", buf);
+
+			readmem(free_list, KVADDR, free_list_buf,
+				SIZE(list_head), "free_area free_list", 
+				FAULT_ON_ERROR);
+			fprintf(fp, "%s  ",
+				mkstring(buf, flen, CENTER|LONG_HEX, MKSTR(free_list)));
+
+			free_ptr = (ulong *)free_list_buf;
+
+			if (*free_ptr == free_list) {
+				if (verbose)
+					fprintf(fp, "\n");
+				else
+					fprintf(fp, "%6d %6d\n", 0, 0);
+				continue;
+			}
+
+			BZERO(ld, sizeof(struct list_data));
+			ld->flags = verbose | RETURN_ON_DUPLICATE;
+			ld->start = *free_ptr;
+			ld->end = free_list;
+			ld->list_head_offset = OFFSET(page_lru) + 
+				OFFSET(list_head_next);
+
+			cnt = do_list(ld);
+			if (cnt < 0) 
+				error(FATAL, 
+				    "corrupted free list %d from free_area struct: %lx\n", 
+					j, free_area);
+
+			if (!verbose)
+				fprintf(fp, "%6d %6ld\n", cnt, cnt*chunk_size);
+
+			total_free += (cnt * chunk_size);
+		}
+	}
+
+	FREEBUF(free_area_buf2);
+	FREEBUF(free_list_buf);
+	return total_free;
+}
+
+/*
+ *  dump_kmeminfo displays basic memory use information typically shown 
+ *  by /proc/meminfo, and then some...
+ */
+
+char *kmeminfo_hdr = "              PAGES        TOTAL      PERCENTAGE\n";
+
+static void
+dump_kmeminfo(void)
+{
+	ulong totalram_pages;
+	ulong freeram_pages;
+	ulong used_pages;
+	ulong shared_pages;
+	ulong buffer_pages;
+	ulong subtract_buffer_pages;
+	ulong totalswap_pages, totalused_pages;
+        ulong totalhigh_pages;
+        ulong freehighmem_pages;
+        ulong totallowmem_pages;
+        ulong freelowmem_pages;
+	long nr_file_pages, nr_slab;
+	ulong swapper_space_nrpages;
+	ulong pct;
+	ulong value1, value2;
+	uint tmp;
+	struct meminfo meminfo;
+	struct gnu_request req;
+	long page_cache_size;
+        ulong get_totalram;
+        ulong get_buffers;
+        ulong get_slabs;
+        struct syment *sp_array[2];
+	char buf[BUFSIZE];
+
+
+	BZERO(&meminfo, sizeof(struct meminfo));
+	meminfo.flags = GET_ALL;
+	dump_mem_map(&meminfo);
+	get_totalram = meminfo.get_totalram;
+	shared_pages = meminfo.get_shared;
+	get_buffers = meminfo.get_buffers;
+	get_slabs = meminfo.get_slabs;
+
+	/*
+	 *  If vm_stat array exists, override page search info.
+	 */
+	if (vm_stat_init()) {
+		if (dump_vm_stat("NR_SLAB", &nr_slab, 0))
+			get_slabs = nr_slab;
+		else if (dump_vm_stat("NR_SLAB_RECLAIMABLE", &nr_slab, 0)) {
+			get_slabs = nr_slab;
+			if (dump_vm_stat("NR_SLAB_UNRECLAIMABLE", &nr_slab, 0))
+				get_slabs += nr_slab;
+		}
+	}
+
+	fprintf(fp, kmeminfo_hdr);
+	/*
+	 *  Get total RAM based upon how the various versions of si_meminfo()
+         *  have done it, latest to earliest:
+	 *
+         *    Prior to 2.3.36, count all mem_map pages minus the reserved ones.
+         *    From 2.3.36 onwards, use "totalram_pages" if set.
+	 */
+	if (symbol_exists("totalram_pages")) {  
+		totalram_pages = vt->totalram_pages ? 
+			vt->totalram_pages : get_totalram; 
+	} else 
+		totalram_pages = get_totalram;
+
+	fprintf(fp, "%10s  %7ld  %11s         ----\n", "TOTAL MEM", 
+		totalram_pages, pages_to_size(totalram_pages, buf));
+
+	/*
+	 *  Get free pages from dump_free_pages() or its associates.
+	 *  Used pages are a free-bee...
+	 */
+	meminfo.flags = GET_FREE_PAGES;
+	vt->dump_free_pages(&meminfo);
+	freeram_pages = meminfo.retval;
+        pct = (freeram_pages * 100)/totalram_pages;
+	fprintf(fp, "%10s  %7ld  %11s  %3ld%% of TOTAL MEM\n", 
+		"FREE", freeram_pages, pages_to_size(freeram_pages, buf), pct);
+
+	used_pages = totalram_pages - freeram_pages;
+        pct = (used_pages * 100)/totalram_pages;
+        fprintf(fp, "%10s  %7ld  %11s  %3ld%% of TOTAL MEM\n", 
+		"USED", used_pages, pages_to_size(used_pages, buf), pct);
+
+	/*
+	 *  Get shared pages from dump_mem_map().  Note that this is done
+         *  differently than the kernel -- it just tallies the non-reserved
+         *  pages that have a count of greater than 1.
+	 */
+        pct = (shared_pages * 100)/totalram_pages;
+        fprintf(fp, "%10s  %7ld  %11s  %3ld%% of TOTAL MEM\n", 
+		"SHARED", shared_pages, pages_to_size(shared_pages, buf), pct);
+
+	subtract_buffer_pages = 0;
+	if (symbol_exists("buffermem_pages")) { 
+                get_symbol_data("buffermem_pages", sizeof(int), &tmp);
+		buffer_pages = (ulong)tmp;
+	} else if (symbol_exists("buffermem")) {
+                get_symbol_data("buffermem", sizeof(int), &tmp);
+		buffer_pages = BTOP(tmp);
+	} else if ((THIS_KERNEL_VERSION >= LINUX(2,6,0)) && 
+		symbol_exists("nr_blockdev_pages")) {
+		subtract_buffer_pages = buffer_pages = nr_blockdev_pages();
+	} else
+		buffer_pages = 0;
+
+        pct = (buffer_pages * 100)/totalram_pages;
+        fprintf(fp, "%10s  %7ld  %11s  %3ld%% of TOTAL MEM\n", 
+		"BUFFERS", buffer_pages, pages_to_size(buffer_pages, buf), pct);
+
+	if (CRASHDEBUG(1)) 
+        	error(NOTE, "pages with buffers: %ld\n", get_buffers);
+
+	/*
+	 *  page_cache_size has evolved from a long to an atomic_t to
+	 *  not existing at all.
+	 */
+	
+	if (symbol_exists("page_cache_size")) {
+		get_symbol_type("page_cache_size", NULL, &req);
+        	if (req.length == sizeof(int)) {
+                	get_symbol_data("page_cache_size", sizeof(int), &tmp);
+                	page_cache_size = (long)tmp;
+        	} else
+                	get_symbol_data("page_cache_size", sizeof(long),
+                        	&page_cache_size);
+		page_cache_size -= subtract_buffer_pages;
+	} else if (symbol_exists("nr_pagecache")) {
+               	get_symbol_data("nr_pagecache", sizeof(int), &tmp);
+               	page_cache_size = (long)tmp;
+		page_cache_size -= subtract_buffer_pages;
+	} else if (dump_vm_stat("NR_FILE_PAGES", &nr_file_pages, 0)) {
+		char *swapper_space = GETBUF(SIZE(address_space));
+		
+                if (!readmem(symbol_value("swapper_space"), KVADDR, swapper_space,
+                    SIZE(address_space), "swapper_space", RETURN_ON_ERROR))
+			swapper_space_nrpages = 0;
+		else
+			swapper_space_nrpages = ULONG(swapper_space + 
+				OFFSET(address_space_nrpages));
+			
+		page_cache_size = nr_file_pages - swapper_space_nrpages -
+			buffer_pages;
+		FREEBUF(swapper_space);
+	}
+
+
+        pct = (page_cache_size * 100)/totalram_pages;
+        fprintf(fp, "%10s  %7ld  %11s  %3ld%% of TOTAL MEM\n", 
+		"CACHED", page_cache_size, 
+		pages_to_size(page_cache_size, buf), pct);
+
+	/*
+ 	 *  Although /proc/meminfo doesn't show it, show how much memory
+	 *  the slabs take up.
+	 */
+
+        pct = (get_slabs * 100)/totalram_pages;
+	fprintf(fp, "%10s  %7ld  %11s  %3ld%% of TOTAL MEM\n",
+		"SLAB", get_slabs, pages_to_size(get_slabs, buf), pct);
+
+        if (symbol_exists("totalhigh_pages")) {
+	        switch (get_syment_array("totalhigh_pages", sp_array, 2))
+	        {
+	        case 1:
+	                get_symbol_data("totalhigh_pages", sizeof(ulong),
+	                        &totalhigh_pages);
+	                break;
+	        case 2:
+	                if (!(readmem(sp_array[0]->value, KVADDR,
+	                    &value1, sizeof(ulong),
+	                    "totalhigh_pages #1", RETURN_ON_ERROR)))
+	                        break;
+	                if (!(readmem(sp_array[1]->value, KVADDR,
+	                    &value2, sizeof(ulong),
+	                    "totalhigh_pages #2", RETURN_ON_ERROR)))
+	                        break;
+	                totalhigh_pages = MAX(value1, value2);
+	                break;
+	        }
+
+		pct = totalhigh_pages ?
+			(totalhigh_pages * 100)/totalram_pages : 0;
+                fprintf(fp, "\n%10s  %7ld  %11s  %3ld%% of TOTAL MEM\n", 
+			"TOTAL HIGH", totalhigh_pages, 
+			pages_to_size(totalhigh_pages, buf), pct);
+
+		meminfo.flags = GET_FREE_HIGHMEM_PAGES;
+                vt->dump_free_pages(&meminfo);
+		freehighmem_pages = meminfo.retval;
+        	pct = freehighmem_pages ?  
+			(freehighmem_pages * 100)/totalhigh_pages : 0;
+                fprintf(fp, "%10s  %7ld  %11s  %3ld%% of TOTAL HIGH\n", 
+			"FREE HIGH", freehighmem_pages, 
+			pages_to_size(freehighmem_pages, buf), pct);
+
+                totallowmem_pages = totalram_pages - totalhigh_pages;
+		pct = (totallowmem_pages * 100)/totalram_pages;
+                fprintf(fp, "%10s  %7ld  %11s  %3ld%% of TOTAL MEM\n", 
+			"TOTAL LOW", totallowmem_pages, 
+			pages_to_size(totallowmem_pages, buf), pct);
+
+                freelowmem_pages = freeram_pages - freehighmem_pages;
+        	pct = (freelowmem_pages * 100)/totallowmem_pages;
+                fprintf(fp, "%10s  %7ld  %11s  %3ld%% of TOTAL LOW\n", 
+			"FREE LOW", freelowmem_pages, 
+			pages_to_size(freelowmem_pages, buf), pct);
+        }
+
+        /*
+         *  get swap data from dump_swap_info().
+         */
+	fprintf(fp, "\n");
+        if (dump_swap_info(RETURN_ON_ERROR, &totalswap_pages, 
+	    &totalused_pages)) {
+	        fprintf(fp, "%10s  %7ld  %11s         ----\n", 
+			"TOTAL SWAP", totalswap_pages, 
+			pages_to_size(totalswap_pages, buf));
+	        pct = totalswap_pages ? (totalused_pages * 100) /
+			totalswap_pages : 100;
+	        fprintf(fp, "%10s  %7ld  %11s  %3ld%% of TOTAL SWAP\n",
+	                "SWAP USED", totalused_pages,
+	                pages_to_size(totalused_pages, buf), pct);
+	        pct = totalswap_pages ? ((totalswap_pages - totalused_pages) *
+			100) / totalswap_pages : 0;
+	        fprintf(fp, "%10s  %7ld  %11s  %3ld%% of TOTAL SWAP\n", 
+			"SWAP FREE",
+	                totalswap_pages - totalused_pages,
+	                pages_to_size(totalswap_pages - totalused_pages, buf), 
+			pct);
+	} else
+		error(INFO, "swap_info[%ld].swap_map at %lx is unaccessible\n",
+			totalused_pages, totalswap_pages);
+
+	dump_zone_page_usage();
+}
+
+/*
+ *  Emulate 2.6 nr_blockdev_pages() function.
+ */
+static ulong
+nr_blockdev_pages(void)
+{
+        struct list_data list_data, *ld;
+	ulong *bdevlist;
+	int i, bdevcnt;
+	ulong inode, address_space;
+	ulong nrpages;
+	char *block_device_buf, *inode_buf, *address_space_buf;
+
+        ld = &list_data;
+        BZERO(ld, sizeof(struct list_data));
+	get_symbol_data("all_bdevs", sizeof(void *), &ld->start);
+	if (empty_list(ld->start))
+		return 0;
+	ld->end = symbol_value("all_bdevs");
+        ld->list_head_offset = OFFSET(block_device_bd_list);
+
+	block_device_buf = GETBUF(SIZE(block_device));
+	inode_buf = GETBUF(SIZE(inode));
+	address_space_buf = GETBUF(SIZE(address_space));
+
+        hq_open();
+        bdevcnt = do_list(ld);
+        bdevlist = (ulong *)GETBUF(bdevcnt * sizeof(ulong));
+        bdevcnt = retrieve_list(bdevlist, bdevcnt);
+        hq_close();
+
+	/*
+	 *  go through the block_device list, emulating:
+	 *
+	 *      ret += bdev->bd_inode->i_mapping->nrpages;
+	 */
+	for (i = nrpages = 0; i < bdevcnt; i++) {
+                readmem(bdevlist[i], KVADDR, block_device_buf, 
+			SIZE(block_device), "block_device buffer", 
+			FAULT_ON_ERROR);
+		inode = ULONG(block_device_buf + OFFSET(block_device_bd_inode));
+                readmem(inode, KVADDR, inode_buf, SIZE(inode), "inode buffer", 
+			FAULT_ON_ERROR);
+		address_space = ULONG(inode_buf + OFFSET(inode_i_mapping));
+                readmem(address_space, KVADDR, address_space_buf, 
+			SIZE(address_space), "address_space buffer", 
+			FAULT_ON_ERROR);
+		nrpages += ULONG(address_space_buf + 
+			OFFSET(address_space_nrpages));
+	}
+
+	FREEBUF(bdevlist);
+	FREEBUF(block_device_buf);
+	FREEBUF(inode_buf);
+	FREEBUF(address_space_buf);
+
+	return nrpages;
+} 
+
+/*
+ *  dump_vmlist() displays information from the vmlist.
+ */
+
+static void
+dump_vmlist(struct meminfo *vi)
+{
+	char buf[BUFSIZE];
+	char buf1[BUFSIZE];
+	char buf2[BUFSIZE];
+	ulong vmlist;
+	ulong addr, size, next, pcheck, count, verified; 
+	physaddr_t paddr;
+
+	get_symbol_data("vmlist", sizeof(void *), &vmlist);
+	next = vmlist;
+	count = verified = 0;
+
+	while (next) {
+		if (!(pc->curcmd_flags & HEADER_PRINTED) && (next == vmlist) && 
+		    !(vi->flags & (GET_HIGHEST|GET_PHYS_TO_VMALLOC|
+		      GET_VMLIST_COUNT|GET_VMLIST|VMLIST_VERIFY))) {
+			fprintf(fp, "%s  ", 
+			    mkstring(buf, MAX(strlen("VM_STRUCT"), VADDR_PRLEN),
+			    	CENTER|LJUST, "VM_STRUCT"));
+			fprintf(fp, "%s    SIZE\n",
+			    mkstring(buf, (VADDR_PRLEN * 2) + strlen(" - "),
+				CENTER|LJUST, "ADDRESS RANGE"));
+			pc->curcmd_flags |= HEADER_PRINTED;
+		}
+
+                readmem(next+OFFSET(vm_struct_addr), KVADDR, 
+			&addr, sizeof(void *),
+                        "vmlist addr", FAULT_ON_ERROR);
+                readmem(next+OFFSET(vm_struct_size), KVADDR, 
+			&size, sizeof(ulong),
+                        "vmlist size", FAULT_ON_ERROR);
+
+		if (vi->flags & (GET_VMLIST_COUNT|GET_VMLIST)) {
+			/*
+			 *  Preceding GET_VMLIST_COUNT set vi->retval.
+			 */
+			if (vi->flags & GET_VMLIST) {
+				if (count < vi->retval) {
+					vi->vmlist[count].addr = addr;
+					vi->vmlist[count].size = size;
+				}
+			}
+			count++;
+			goto next_entry;
+		}
+
+		if (!(vi->flags & ADDRESS_SPECIFIED) || 
+		    ((vi->memtype == KVADDR) &&
+		    ((vi->spec_addr >= addr) && (vi->spec_addr < (addr+size))))) {
+			if (vi->flags & VMLIST_VERIFY) {
+				verified++;
+				break;
+			}	
+			fprintf(fp, "%s%s  %s - %s  %6ld\n",
+				mkstring(buf,VADDR_PRLEN, LONG_HEX|CENTER|LJUST,
+				MKSTR(next)), space(MINSPACE-1),
+				mkstring(buf1, VADDR_PRLEN, LONG_HEX|RJUST,
+				MKSTR(addr)),
+				mkstring(buf2, VADDR_PRLEN, LONG_HEX|LJUST,
+				MKSTR(addr+size)),
+				size);
+		}
+
+		if ((vi->flags & ADDRESS_SPECIFIED) && 
+		     (vi->memtype == PHYSADDR)) {
+			for (pcheck = addr; pcheck < (addr+size); 
+			     pcheck += PAGESIZE()) {
+				if (!kvtop(NULL, pcheck, &paddr, 0))
+					continue;
+		    		if ((vi->spec_addr >= paddr) && 
+				    (vi->spec_addr < (paddr+PAGESIZE()))) {
+					if (vi->flags & GET_PHYS_TO_VMALLOC) {
+						vi->retval = pcheck +
+						    PAGEOFFSET(paddr);
+						return;
+				        } else
+						fprintf(fp,
+						"%s%s  %s - %s  %6ld\n",
+						mkstring(buf, VADDR_PRLEN,
+						LONG_HEX|CENTER|LJUST,
+						MKSTR(next)), space(MINSPACE-1),
+						mkstring(buf1, VADDR_PRLEN,
+						LONG_HEX|RJUST, MKSTR(addr)),
+						mkstring(buf2, VADDR_PRLEN,
+						LONG_HEX|LJUST,
+						MKSTR(addr+size)), size);
+					break;
+				}
+			}
+
+		}
+next_entry:
+                readmem(next+OFFSET(vm_struct_next), 
+			KVADDR, &next, sizeof(void *),
+                        "vmlist next", FAULT_ON_ERROR);
+	}
+
+	if (vi->flags & GET_HIGHEST)
+		vi->retval = addr+size;
+
+	if (vi->flags & GET_VMLIST_COUNT)
+		vi->retval = count;
+
+	if (vi->flags & VMLIST_VERIFY)
+		vi->retval = verified;
+}
+
+/*
+ *  dump_page_lists() displays information from the active_list,
+ *  inactive_dirty_list and inactive_clean_list from each zone.
+ */
+static int
+dump_page_lists(struct meminfo *mi)
+{
+	int i, c, n, retval;
+        ulong node_zones, pgdat;
+	struct node_table *nt;
+	struct list_data list_data, *ld;
+	char buf[BUFSIZE];
+	ulong value;
+	ulong inactive_clean_pages, inactive_clean_list;
+	int nr_active_pages, nr_inactive_pages;
+	int nr_inactive_dirty_pages;
+
+	ld = &list_data;
+
+	retval = FALSE;
+	nr_active_pages = nr_inactive_dirty_pages = -1;
+
+	BZERO(ld, sizeof(struct list_data));
+	ld->list_head_offset = OFFSET(page_lru);
+	if (mi->flags & ADDRESS_SPECIFIED)
+		ld->searchfor = mi->spec_addr;
+	else if (mi->flags & VERBOSE)
+		ld->flags |= VERBOSE;
+	
+	if (mi->flags & GET_ACTIVE_LIST) {
+		if (!symbol_exists("active_list"))
+			error(FATAL, 
+			    "active_list does not exist in this kernel\n");
+
+		if (symbol_exists("nr_active_pages"))
+			get_symbol_data("nr_active_pages", sizeof(int), 
+				&nr_active_pages);
+		else
+			error(FATAL, 
+			    "nr_active_pages does not exist in this kernel\n");
+
+		ld->end = symbol_value("active_list");
+                readmem(ld->end, KVADDR, &ld->start, sizeof(void *),
+                	"LIST_HEAD contents", FAULT_ON_ERROR);
+		
+		if (mi->flags & VERBOSE)
+			fprintf(fp, "active_list:\n");
+
+                if (ld->start == ld->end) {
+                       c = 0;
+                       ld->searchfor = 0;
+                       if (mi->flags & VERBOSE)
+                               fprintf(fp, "(empty)\n");
+                } else {
+                	hq_open();
+                	c = do_list(ld);
+                	hq_close();
+		}
+
+		if ((mi->flags & ADDRESS_SPECIFIED) && ld->searchfor) {
+			fprintf(fp, "%lx\n", ld->searchfor);
+			retval = TRUE;
+                } else {
+                        fprintf(fp, "%snr_active_pages: %d ", 
+				mi->flags & VERBOSE ? "\n" : "",
+                                nr_active_pages);
+                        if (c != nr_active_pages)
+                                fprintf(fp, "(found %d)\n", c);
+                        else
+                                fprintf(fp, "(verified)\n");
+		}
+	}
+
+	if (mi->flags & GET_INACTIVE_LIST) {
+		if (!symbol_exists("inactive_list"))
+			error(FATAL, 
+			    "inactive_list does not exist in this kernel\n");
+
+		if (symbol_exists("nr_inactive_pages"))
+			get_symbol_data("nr_inactive_pages", sizeof(int), 
+				&nr_inactive_pages);
+		else
+			error(FATAL, 
+			    "nr_active_pages does not exist in this kernel\n");
+
+		ld->end = symbol_value("inactive_list");
+                readmem(ld->end, KVADDR, &ld->start, sizeof(void *),
+                	"LIST_HEAD contents", FAULT_ON_ERROR);
+		
+		if (mi->flags & VERBOSE)
+			fprintf(fp, "inactive_list:\n");
+
+                if (ld->start == ld->end) {
+                       c = 0;
+                       ld->searchfor = 0;
+                       if (mi->flags & VERBOSE)
+                               fprintf(fp, "(empty)\n");
+                } else {
+                	hq_open();
+                	c = do_list(ld);
+                	hq_close();
+		}
+
+		if ((mi->flags & ADDRESS_SPECIFIED) && ld->searchfor) {
+			fprintf(fp, "%lx\n", ld->searchfor);
+			retval = TRUE;
+                } else {
+                        fprintf(fp, "%snr_inactive_pages: %d ", 
+				mi->flags & VERBOSE ? "\n" : "",
+                                nr_inactive_pages);
+                        if (c != nr_inactive_pages)
+                                fprintf(fp, "(found %d)\n", c);
+                        else
+                                fprintf(fp, "(verified)\n");
+		}
+	}
+
+        if (mi->flags & GET_INACTIVE_DIRTY) {
+		if (!symbol_exists("inactive_dirty_list"))
+			error(FATAL, 
+		        "inactive_dirty_list does not exist in this kernel\n");
+
+                if (symbol_exists("nr_inactive_dirty_pages"))
+                        get_symbol_data("nr_inactive_dirty_pages", sizeof(int), 
+                                &nr_inactive_dirty_pages);
+		else
+			error(FATAL,
+                     "nr_inactive_dirty_pages does not exist in this kernel\n");
+
+		ld->end = symbol_value("inactive_dirty_list");
+                readmem(ld->end, KVADDR, &ld->start, sizeof(void *),
+                	"LIST_HEAD contents", FAULT_ON_ERROR);
+
+		if (mi->flags & VERBOSE)
+			fprintf(fp, "%sinactive_dirty_list:\n",
+				mi->flags & GET_ACTIVE_LIST ? "\n" : "");
+
+                if (ld->start == ld->end) {
+                       c = 0;
+                       ld->searchfor = 0;
+                       if (mi->flags & VERBOSE)
+                               fprintf(fp, "(empty)\n");
+                } else {
+			hq_open();
+        		c = do_list(ld);
+        		hq_close();
+		}
+
+                if ((mi->flags & ADDRESS_SPECIFIED) && ld->searchfor) { 
+                        fprintf(fp, "%lx\n", ld->searchfor);
+			retval = TRUE;
+		} else {
+			fprintf(fp, "%snr_inactive_dirty_pages: %d ", 
+				mi->flags & VERBOSE ? "\n" : "",
+				nr_inactive_dirty_pages);
+        		if (c != nr_inactive_dirty_pages)
+                		fprintf(fp, "(found %d)\n", c);
+        		else
+                		fprintf(fp, "(verified)\n");
+		}
+        }
+
+        if (mi->flags & GET_INACTIVE_CLEAN) {
+		if (INVALID_MEMBER(zone_struct_inactive_clean_list))
+			error(FATAL, 
+		        "inactive_clean_list(s) do not exist in this kernel\n");
+
+        	get_symbol_data("pgdat_list", sizeof(void *), &pgdat);
+
+                if ((mi->flags & VERBOSE) && 
+		    (mi->flags & (GET_ACTIVE_LIST|GET_INACTIVE_DIRTY)))
+			fprintf(fp, "\n");
+
+        	for (n = 0; pgdat; n++) {
+                	nt = &vt->node_table[n];
+
+                	node_zones = nt->pgdat + OFFSET(pglist_data_node_zones);
+
+                	for (i = 0; i < vt->nr_zones; i++) {
+                        	readmem(node_zones+OFFSET(zone_struct_name), 
+					KVADDR, &value, sizeof(void *),
+                                	"zone_struct name", FAULT_ON_ERROR);
+                        	if (!read_string(value, buf, BUFSIZE-1))
+                                	sprintf(buf, "(unknown) ");
+
+                		if (mi->flags & VERBOSE) {
+					if (vt->numnodes > 1)
+                        			fprintf(fp, "NODE %d ", n);
+                        		fprintf(fp, 
+				            "\"%s\" inactive_clean_list:\n", 
+						buf);
+				}
+
+				readmem(node_zones +
+				    OFFSET(zone_struct_inactive_clean_pages),
+                                    KVADDR, &inactive_clean_pages, 
+				    sizeof(ulong), "inactive_clean_pages", 
+				    FAULT_ON_ERROR);
+
+                                readmem(node_zones +
+                                    OFFSET(zone_struct_inactive_clean_list),
+                                    KVADDR, &inactive_clean_list, 
+                                    sizeof(ulong), "inactive_clean_list", 
+                                    FAULT_ON_ERROR);
+
+				ld->start = inactive_clean_list;
+				ld->end = node_zones +
+                                    OFFSET(zone_struct_inactive_clean_list);
+        			if (mi->flags & ADDRESS_SPECIFIED)
+                			ld->searchfor = mi->spec_addr;
+
+				if (ld->start == ld->end) {
+					c = 0;
+					ld->searchfor = 0;
+					if (mi->flags & VERBOSE)
+						fprintf(fp, "(empty)\n");
+				} else {
+                			hq_open();
+                			c = do_list(ld);
+                			hq_close();
+				}
+
+		                if ((mi->flags & ADDRESS_SPECIFIED) && 
+				    ld->searchfor) {
+		                        fprintf(fp, "%lx\n", ld->searchfor);
+		                        retval = TRUE;
+		                } else {
+					if (vt->numnodes > 1)
+						fprintf(fp, "NODE %d ", n);
+					fprintf(fp, "\"%s\" ", buf);
+		                        fprintf(fp, 
+					    "inactive_clean_pages: %ld ",
+		                                inactive_clean_pages);
+		                        if (c != inactive_clean_pages)
+		                                fprintf(fp, "(found %d)\n", c);
+		                        else
+		                                fprintf(fp, "(verified)\n");
+		                }
+
+				node_zones += SIZE(zone_struct);
+			}
+
+                	readmem(pgdat + OFFSET_OPTION(pglist_data_node_next,
+				pglist_data_pgdat_next), KVADDR,
+                        	&pgdat, sizeof(void *), "pglist_data node_next",
+                        	FAULT_ON_ERROR);
+		}
+        }
+
+	return retval;
+}
+
+
+
+/*
+ *  Check whether an address is a kmem_cache_t address, and if so, return
+ *  a pointer to the static buffer containing its name string.  Otherwise
+ *  return NULL on failure.
+ */
+
+#define PERCPU_NOT_SUPPORTED "per-cpu slab format not supported yet\n"
+
+static char * 
+is_kmem_cache_addr(ulong vaddr, char *kbuf)
+{
+        ulong cache, cache_cache, name;
+	long next_offset, name_offset;
+	char *cache_buf;
+
+	if (vt->flags & KMEM_CACHE_UNAVAIL) {
+		error(INFO, "kmem cache slab subsystem not available\n");
+		return NULL;
+	}
+
+	if (vt->flags & KMALLOC_SLUB) 
+		return is_kmem_cache_addr_slub(vaddr, kbuf);
+
+        name_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ?
+                OFFSET(kmem_cache_s_name) : OFFSET(kmem_cache_s_c_name);
+        next_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ?
+                OFFSET(kmem_cache_s_next) : OFFSET(kmem_cache_s_c_nextp);
+
+        cache = cache_cache = symbol_value("cache_cache");
+
+	cache_buf = GETBUF(SIZE(kmem_cache_s));
+
+        do {
+	        readmem(cache, KVADDR, cache_buf, SIZE(kmem_cache_s),
+	        	"kmem_cache_s buffer", FAULT_ON_ERROR);
+
+		if (cache == vaddr) {
+	                if (vt->kmem_cache_namelen) {
+				BCOPY(cache_buf+name_offset, kbuf, 
+					vt->kmem_cache_namelen);
+	                } else {
+				name = ULONG(cache_buf + name_offset);
+	                        if (!read_string(name, kbuf, BUFSIZE-1)) {
+					if (vt->flags & 
+					  (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2))
+	                                	error(WARNING,
+	                      "cannot read kmem_cache_s.name string at %lx\n",
+	                                        	name);
+					else
+	                                	error(WARNING,
+	                      "cannot read kmem_cache_s.c_name string at %lx\n",
+	                                        	name);
+					sprintf(kbuf, "(unknown)");
+				}
+	                }
+			FREEBUF(cache_buf);
+			return kbuf;
+		}
+
+		cache = ULONG(cache_buf + next_offset);
+
+		if (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2))
+			cache -= next_offset;
+
+        } while (cache != cache_cache);
+
+	FREEBUF(cache_buf);
+	return NULL;
+}
+
+/*
+ *  Note same functionality as above, but instead it just
+ *  dumps all slab cache names and their addresses.
+ */
+static void
+kmem_cache_list(void)
+{
+        ulong cache, cache_cache, name;
+	long next_offset, name_offset;
+	char *cache_buf;
+	char buf[BUFSIZE];
+
+	if (vt->flags & KMEM_CACHE_UNAVAIL) {
+		error(INFO, "kmem cache slab subsystem not available\n");
+		return;
+	}
+
+	if (vt->flags & KMALLOC_SLUB) {
+		kmem_cache_list_slub();
+		return;		
+	}
+
+        name_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ?
+                OFFSET(kmem_cache_s_name) : OFFSET(kmem_cache_s_c_name);
+        next_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ?
+                OFFSET(kmem_cache_s_next) : OFFSET(kmem_cache_s_c_nextp);
+
+        cache = cache_cache = symbol_value("cache_cache");
+
+	cache_buf = GETBUF(SIZE(kmem_cache_s));
+
+        do {
+	        readmem(cache, KVADDR, cache_buf, SIZE(kmem_cache_s),
+	        	"kmem_cache_s buffer", FAULT_ON_ERROR);
+
+	        if (vt->kmem_cache_namelen) {
+			BCOPY(cache_buf+name_offset, buf, 
+				vt->kmem_cache_namelen);
+	        } else {
+			name = ULONG(cache_buf + name_offset);
+	                if (!read_string(name, buf, BUFSIZE-1)) {
+				if (vt->flags & 
+				    (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2))
+	                               	error(WARNING,
+	                      "cannot read kmem_cache_s.name string at %lx\n",
+	                                       	name);
+				else
+	                               	error(WARNING,
+	                      "cannot read kmem_cache_s.c_name string at %lx\n",
+	                                       	name);
+				sprintf(buf, "(unknown)");
+			}
+	        }
+
+		fprintf(fp, "%lx %s\n", cache, buf);
+
+		cache = ULONG(cache_buf + next_offset);
+
+		if (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2))
+			cache -= next_offset;
+
+        } while (cache != cache_cache);
+
+	FREEBUF(cache_buf);
+}
+
+/*
+ *  Translate an address to its physical page number, verify that the
+ *  page in fact belongs to the slab subsystem, and if so, return the 
+ *  name of the cache to which it belongs.
+ */
+static char *
+vaddr_to_kmem_cache(ulong vaddr, char *buf, int verbose)
+{
+	physaddr_t paddr;
+	ulong page;
+	ulong cache;
+
+        if (!kvtop(NULL, vaddr, &paddr, 0)) {
+		if (verbose)
+		 	error(WARNING, 
+ 		            "cannot make virtual-to-physical translation: %lx\n", 
+				vaddr);
+		return NULL;
 	}
-	hq_close();
 
-        fprintf(fp, "%spage_cache_size: %ld ", verbose ? "\n" : "",
-                page_cache_size);
-        if (page_cache_size != total_cached)
-                fprintf(fp, "(found %ld)\n", total_cached);
+	if (!phys_to_page(paddr, &page)) {
+		if (verbose)
+			error(WARNING, 
+			    "cannot find mem_map page for address: %lx\n", 
+				vaddr);
+		return NULL;
+	}
+
+	if (vt->flags & KMALLOC_SLUB) {
+                readmem(compound_head(page)+OFFSET(page_slab),
+                        KVADDR, &cache, sizeof(void *),
+                        "page.slab", FAULT_ON_ERROR);
+	} else if (VALID_MEMBER(page_next))
+                readmem(page+OFFSET(page_next),
+                        KVADDR, &cache, sizeof(void *),
+                        "page.next", FAULT_ON_ERROR);
+	else if (VALID_MEMBER(page_list_next))
+                readmem(page+OFFSET(page_list_next),
+                        KVADDR, &cache, sizeof(void *),
+                        "page.list.next", FAULT_ON_ERROR);
+	else if (VALID_MEMBER(page_lru))
+                readmem(page+OFFSET(page_lru)+OFFSET(list_head_next),
+                        KVADDR, &cache, sizeof(void *),
+                        "page.lru.next", FAULT_ON_ERROR);
+	else
+		error(FATAL, "cannot determine slab cache from page struct\n");
+
+	return(is_kmem_cache_addr(cache, buf)); 
+}
+
+/*
+ *  Translate an address to its physical page number, verify that the
+ *  page in fact belongs to the slab subsystem, and if so, return the
+ *  address of the slab to which it belongs.
+ */
+static ulong
+vaddr_to_slab(ulong vaddr)
+{
+        physaddr_t paddr;
+        ulong page;
+        ulong slab;
+
+        if (!kvtop(NULL, vaddr, &paddr, 0)) {
+                error(WARNING,
+                    "cannot make virtual-to-physical translation: %lx\n",
+                        vaddr);
+                return 0;
+        }
+
+        if (!phys_to_page(paddr, &page)) {
+                error(WARNING, "cannot find mem_map page for address: %lx\n",
+                        vaddr);
+                return 0;
+        }
+
+	slab = 0;
+
+        if (vt->flags & KMALLOC_SLUB)
+		slab = compound_head(page);
+        else if (VALID_MEMBER(page_prev))
+                readmem(page+OFFSET(page_prev),
+                        KVADDR, &slab, sizeof(void *),
+                        "page.prev", FAULT_ON_ERROR);
+        else if (VALID_MEMBER(page_list_prev))
+                readmem(page+OFFSET(page_list_prev),
+                        KVADDR, &slab, sizeof(void *),
+                        "page.list.prev", FAULT_ON_ERROR);
+	else if (VALID_MEMBER(page_lru))
+                readmem(page+OFFSET(page_lru)+OFFSET(list_head_prev),
+                        KVADDR, &slab, sizeof(void *),
+                        "page.lru.prev", FAULT_ON_ERROR);
         else
-                fprintf(fp, "(verified)\n");
+                error(FATAL, "unknown definition of struct page?\n");
 
-	if (CRASHDEBUG(1))
-		fprintf(fp, "heads containing page(s): %d\n", populated);
+	return slab;
+}
 
-	if (searchpage) {
-		rewind(pc->tmpfile);
-		found = FALSE;
-		while (fgets(buf, BUFSIZE, pc->tmpfile)) {
-			if (CRASHDEBUG(1) && STRNEQ(buf, "<readmem:"))
-				continue;
 
-			if (strstr(buf, "page_hash_table")) {
-				strcpy(hash_table, buf); 
-				continue;
-			}
-			if (strstr(buf, "page_cache_size"))
-				continue;
+/*
+ *  Initialize any data required for scouring the kmalloc subsystem more
+ *  efficiently.
+ */
+char slab_hdr[100] = { 0 };
+char kmem_cache_hdr[100] = { 0 };
+char free_inuse_hdr[100] = { 0 };
+
+static void
+kmem_cache_init(void)
+{
+	ulong cache, cache_end, max_cnum, max_limit, max_cpus, tmp, tmp2;
+	long cache_count, num_offset, next_offset;
+	char *cache_buf;
+
+	if (vt->flags & KMEM_CACHE_UNAVAIL)
+		return;
+
+	if ((vt->flags & KMEM_CACHE_DELAY) && !(pc->flags & RUNTIME))
+		return;
+
+	if (DUMPFILE() && (vt->flags & KMEM_CACHE_INIT))
+		return; 
+
+	please_wait("gathering kmem slab cache data");
+
+        if (!strlen(slab_hdr)) {
+		if (vt->flags & KMALLOC_SLUB) 
+			sprintf(slab_hdr, 
+			    "SLAB%sMEMORY%sNODE  TOTAL  ALLOCATED  FREE\n",
+				space(VADDR_PRLEN > 8 ? 14 : 6),
+				space(VADDR_PRLEN > 8 ? 12 : 4));
+		else
+			sprintf(slab_hdr, 
+			    "SLAB%sMEMORY%sTOTAL  ALLOCATED  FREE\n",
+				space(VADDR_PRLEN > 8 ? 14 : 6),
+				space(VADDR_PRLEN > 8 ? 12 : 4));
+	}
+
+	if (!strlen(kmem_cache_hdr)) 
+		sprintf(kmem_cache_hdr,
+     "CACHE%sNAME                 OBJSIZE  ALLOCATED     TOTAL  SLABS  SSIZE\n",
+			space(VADDR_PRLEN > 8 ? 12 : 4));
+
+	if (!strlen(free_inuse_hdr)) 
+		sprintf(free_inuse_hdr, "FREE / [ALLOCATED]\n");
+
+	if (vt->flags & KMALLOC_SLUB) {
+		kmem_cache_init_slub();
+		return;
+	}
+
+	num_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? 
+		OFFSET(kmem_cache_s_num) : OFFSET(kmem_cache_s_c_num);
+	next_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ?
+		OFFSET(kmem_cache_s_next) : OFFSET(kmem_cache_s_c_nextp);
+        max_cnum = max_limit = max_cpus = cache_count = 0;
+
+	/*
+	 *  Pre-2.6 versions used the "cache_cache" as the head of the
+	 *  slab chain list.  2.6 uses the "cache_chain" list_head.
+	 */
+        if (vt->flags & PERCPU_KMALLOC_V2) {
+                get_symbol_data("cache_chain", sizeof(ulong), &cache);
+		cache -= next_offset;
+                cache_end = symbol_value("cache_chain");
+        } else
+                cache = cache_end = symbol_value("cache_cache");
+
+	cache_buf = GETBUF(SIZE(kmem_cache_s));
+
+        do {
+		cache_count++;
+
+                if (!readmem(cache, KVADDR, cache_buf, SIZE(kmem_cache_s),
+                        "kmem_cache_s buffer", RETURN_ON_ERROR)) {
+			FREEBUF(cache_buf);
+			vt->flags |= KMEM_CACHE_UNAVAIL;
+			error(INFO, 
+		          "%sunable to initialize kmem slab cache subsystem\n\n",
+				DUMPFILE() ? "\n" : "");
+			return;
+		}
+
+		tmp = (ulong)(UINT(cache_buf + num_offset));
+
+                if (tmp > max_cnum)
+                        max_cnum = tmp;
+
+		if ((tmp = max_cpudata_limit(cache, &tmp2)) > max_limit)
+			max_limit = tmp;
+		/*
+		 *  Recognize and bail out on any max_cpudata_limit() failures.
+		 */
+		if (vt->flags & KMEM_CACHE_UNAVAIL) {
+			FREEBUF(cache_buf);
+			return;
+		}
+
+		if (tmp2 > max_cpus)
+			max_cpus = tmp2;
+
+		cache = ULONG(cache_buf + next_offset);
+
+		switch (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) 
+		{
+		case PERCPU_KMALLOC_V1:
+			cache -= next_offset;
+			break;
+		case PERCPU_KMALLOC_V2:
+			if (cache != cache_end)
+				cache -= next_offset;
+			break;
+		}
+
+        } while (cache != cache_end);
+
+	FREEBUF(cache_buf);
+
+	vt->kmem_max_c_num = max_cnum;
+	vt->kmem_max_limit = max_limit;
+	vt->kmem_max_cpus = max_cpus;
+	vt->kmem_cache_count = cache_count;
+
+	if (CRASHDEBUG(2)) {
+		fprintf(fp, "kmem_cache_init:\n");
+		fprintf(fp, "  kmem_max_c_num: %ld\n", vt->kmem_max_c_num);
+		fprintf(fp, "  kmem_max_limit: %ld\n", vt->kmem_max_limit);
+		fprintf(fp, "  kmem_max_cpus: %ld\n", vt->kmem_max_cpus);
+		fprintf(fp, "  kmem_cache_count: %ld\n", vt->kmem_cache_count);
+	}
+
+	if (!(vt->flags & KMEM_CACHE_INIT)) {
+		if (vt->flags & PERCPU_KMALLOC_V1)
+			ARRAY_LENGTH_INIT(vt->kmem_cache_namelen,
+				kmem_cache_s_name, "kmem_cache_s.name", 
+				NULL, sizeof(char));
+		else if (vt->flags & PERCPU_KMALLOC_V2)
+			vt->kmem_cache_namelen = 0;
+		else
+			ARRAY_LENGTH_INIT(vt->kmem_cache_namelen,
+				kmem_cache_s_c_name, "kmem_cache_s.c_name", 
+				NULL, 0);
+	}
+
+	please_wait_done();
+
+	vt->flags |= KMEM_CACHE_INIT;
+}
+
+/*
+ *  Determine the largest cpudata limit for a given cache.
+ */
+static ulong
+max_cpudata_limit(ulong cache, ulong *cpus)
+{
+	int i;
+	ulong cpudata[NR_CPUS];
+	int limit; 
+	ulong max_limit;
+	ulong shared; 
+	ulong *start_address;
+	
+	if (vt->flags & PERCPU_KMALLOC_V2_NODES)
+		goto kmem_cache_s_array_nodes;
+	
+	if (vt->flags & PERCPU_KMALLOC_V2)
+		goto kmem_cache_s_array;
+	
+	 if (INVALID_MEMBER(kmem_cache_s_cpudata)) {
+		*cpus = 0;
+		return 0;
+	}
+
+	if (!readmem(cache+OFFSET(kmem_cache_s_cpudata),
+            KVADDR, &cpudata[0], 
+	    sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_cpudata),
+            "cpudata array", RETURN_ON_ERROR))
+		goto bail_out;
+
+	for (i = max_limit = 0; (i < ARRAY_LENGTH(kmem_cache_s_cpudata)) && 
+	     cpudata[i]; i++) {
+		if (!readmem(cpudata[i]+OFFSET(cpucache_s_limit),
+        	    KVADDR, &limit, sizeof(int),
+                    "cpucache limit", RETURN_ON_ERROR))
+			goto bail_out;
+		if (limit > max_limit)
+			max_limit = limit;
+	}
+
+	*cpus = i;
+
+	return max_limit;
+
+kmem_cache_s_array:
+
+	if (!readmem(cache+OFFSET(kmem_cache_s_array),
+            KVADDR, &cpudata[0], 
+	    sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_array),
+            "array cache array", RETURN_ON_ERROR))
+		goto bail_out;
+
+	for (i = max_limit = 0; (i < ARRAY_LENGTH(kmem_cache_s_array)) && 
+	     cpudata[i]; i++) {
+                if (!readmem(cpudata[i]+OFFSET(array_cache_limit),
+                    KVADDR, &limit, sizeof(int),
+                    "array cache limit", RETURN_ON_ERROR))
+			goto bail_out;
+                if (limit > max_limit)
+                        max_limit = limit;
+        }
+
+	/*
+	 *  If the shared list can be accessed, check its size as well.
+	 */
+	if (VALID_MEMBER(kmem_list3_shared) &&
+	    VALID_MEMBER(kmem_cache_s_lists) &&
+            readmem(cache+OFFSET(kmem_cache_s_lists)+OFFSET(kmem_list3_shared),
+	    KVADDR, &shared, sizeof(void *), "kmem_list3 shared", 
+	    RETURN_ON_ERROR|QUIET) &&
+	    readmem(shared+OFFSET(array_cache_limit), 
+	    KVADDR, &limit, sizeof(int), "shared array_cache limit",
+	    RETURN_ON_ERROR|QUIET)) {
+		if (limit > max_limit)
+			max_limit = limit;
+	}
+		   
+	*cpus = i;
+	return max_limit;
+
+kmem_cache_s_array_nodes:
+
+	if (!readmem(cache+OFFSET(kmem_cache_s_array),
+            KVADDR, &cpudata[0], 
+	    sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_array),
+            "array cache array", RETURN_ON_ERROR))
+		goto bail_out;
 
-			if (CRASHDEBUG(1) && 
-			    !hexadecimal(strip_linefeeds(buf), 0))
-				continue;
+	for (i = max_limit = 0; (i < ARRAY_LENGTH(kmem_cache_s_array)) && 
+	     cpudata[i]; i++) {
+                if (!readmem(cpudata[i]+OFFSET(array_cache_limit),
+                    KVADDR, &limit, sizeof(int),
+                    "array cache limit", RETURN_ON_ERROR))
+			goto bail_out;
+                if (limit > max_limit)
+                        max_limit = limit;
+        }
 
-                	this_addr = htol(strip_linefeeds(buf),
-                        	RETURN_ON_ERROR, &errflag);
+	*cpus = i;
 
-			if (this_addr == searchpage) {
-				found = TRUE;
+	/*
+	 *  Check the shared list of all the nodes.
+	 */
+	start_address = (ulong *)GETBUF(sizeof(ulong) * vt->kmem_cache_len_nodes);
+	
+	if (VALID_MEMBER(kmem_list3_shared) && VALID_MEMBER(kmem_cache_s_lists) &&
+	    readmem(cache+OFFSET(kmem_cache_s_lists), KVADDR, &start_address[0], 
+	    sizeof(ulong) * vt->kmem_cache_len_nodes, "array nodelist array", 
+	    RETURN_ON_ERROR)) {  
+		for (i = 0; i < vt->kmem_cache_len_nodes && start_address[i]; i++) {
+			if (readmem(start_address[i] + OFFSET(kmem_list3_shared), 
+			    KVADDR, &shared, sizeof(void *),
+			    "kmem_list3 shared", RETURN_ON_ERROR|QUIET)) {
+				if (!shared)
+					break;
+			} 
+			if (readmem(shared + OFFSET(array_cache_limit),
+	       		    KVADDR, &limit, sizeof(int), "shared array_cache limit",
+		            RETURN_ON_ERROR|QUIET)) {
+				if (limit > max_limit)
+					max_limit = limit;
 				break;
 			}
 		}
-		close_tmpfile();
+	}
+	FREEBUF(start_address);
+	return max_limit;
 
-		if (found) {
-			fprintf(fp, hash_table);
-			fprintf(fp, "%lx\n", searchpage);
-			hi->retval = TRUE;
-		}
+bail_out:
+	vt->flags |= KMEM_CACHE_UNAVAIL;
+	error(INFO, "unable to initialize kmem slab cache subsystem\n\n");
+	*cpus = 0;
+	return 0;
+}
+
+/*
+ *  Determine whether the current slab cache is contained in
+ *  the comma-separated list from a "kmem -I list1,list2 ..."
+ *  command entry.
+ */
+static int
+ignore_cache(struct meminfo *si, char *name)
+{
+	int i, argc;
+	char *p1;
+	char *arglist[MAXARGS];
+	char buf[BUFSIZE];
+
+	if (!si->ignore)
+		return FALSE;
+
+	strcpy(buf, si->ignore);
+
+	p1 = buf;
+	while (*p1) {
+		if (*p1 == ',')
+			*p1 = ' ';
+		p1++;
+	}
+
+	argc = parse_line(buf, arglist);
+
+	for (i = 0; i < argc; i++) {
+		if (STREQ(name, arglist[i]))
+			return TRUE;
 	}
+
+	return FALSE;
 }
 
+
 /*
- *  dump_free_pages() displays basic data about pages currently resident
- *  in the free_area[] memory lists.  If the flags contains the VERBOSE 
- *  bit, each page slab base address is dumped.  If an address is specified
- *  only the free_area[] data containing that page is displayed, along with
- *  the page slab base address.  Specified addresses can either be physical 
- *  address or page structure pointers.
+ *  dump_kmem_cache() displays basic information about kmalloc() slabs.
+ *  At this point, only kmem_cache_s structure data for each slab is dumped.
+ *
+ *  TBD: Given a specified physical address, and determine which slab it came
+ *  from, and whether it's in use or not.
  */
-char *free_area_hdr1 = \
-	"AREA  SIZE  FREE_AREA_STRUCT  BLOCKS   PAGES\n";
-char *free_area_hdr2 = \
-	"AREA  SIZE  FREE_AREA_STRUCT\n";
+
+#define SLAB_C_MAGIC            0x4F17A36DUL
+#define SLAB_MAGIC_ALLOC        0xA5C32F2BUL    /* slab is alive */
+#define SLAB_MAGIC_DESTROYED    0xB2F23C5AUL    /* slab has been destroyed */
+
+#define SLAB_CFLGS_BUFCTL       0x020000UL      /* bufctls in own cache */
+
+#define KMEM_SLAB_ADDR          (1)
+#define KMEM_BUFCTL_ADDR        (2)
+#define KMEM_OBJECT_ADDR_FREE   (3)
+#define KMEM_OBJECT_ADDR_INUSE  (4)
+#define KMEM_OBJECT_ADDR_CACHED (5)
+#define KMEM_ON_SLAB            (6)
+#define KMEM_OBJECT_ADDR_SHARED (7)
+
+#define DUMP_KMEM_CACHE_INFO_V1() \
+      {  \
+	char b1[BUFSIZE]; \
+	fprintf(fp, "%s %-18s  %8ld  ", \
+		mkstring(b1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(si->cache)), \
+        	buf, si->size); \
+        fprintf(fp, "%9ld  %8ld  %5ld   %3ldk\n", \
+		vt->flags & PERCPU_KMALLOC_V1 ? \
+		si->inuse - si->cpucached_cache : \
+                si->inuse, si->num_slabs * si->c_num, \
+                si->num_slabs, si->slabsize/1024); \
+      }
+
+#define DUMP_KMEM_CACHE_INFO_V2()  dump_kmem_cache_info_v2(si) 
 
 static void
-dump_free_pages(struct meminfo *fi)
+dump_kmem_cache_info_v2(struct meminfo *si)
 {
-	int i;
-	int order;
-	ulong free_area;
-	char *free_area_buf;
-	ulong *pp;
-	int nr_mem_lists;
-	struct list_data list_data, *ld;
-	long cnt, total_free, chunk_size;
-	int nr_free_pages;
-	char buf[BUFSIZE];
-	char last_free[BUFSIZE];
-	char last_free_hdr[BUFSIZE];
-	int verbose, errflag, found;
-	physaddr_t searchphys;
-	ulong this_addr; 
-	physaddr_t this_phys;
-	int do_search;
-	ulong kfp, offset;
-	int flen, dimension;
+	char b1[BUFSIZE];
+	char b2[BUFSIZE];
+	int namelen, sizelen, spacelen;
 
-        if (vt->flags & (NODES|ZONES)) 
-		error(FATAL, "dump_free_pages called with (NODES|ZONES)\n");
+	fprintf(fp, "%s ",
+		mkstring(b1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(si->cache))); 
 
-	nr_mem_lists = ARRAY_LENGTH(free_area);
-	dimension = ARRAY_LENGTH(free_area_DIMENSION);
+	namelen = strlen(si->curname);
+	sprintf(b2, "%ld", si->size);
+	sizelen = strlen(b2);
+	spacelen = 0;
 
-	if (nr_mem_lists == 0)
-		error(FATAL, "cannot determine size/dimensions of free_area\n");
+	if (namelen++ > 18) {
+		spacelen = 29 - namelen - sizelen;
+		fprintf(fp, "%s%s%ld  ", si->curname,
+			space(spacelen <= 0 ? 1 : spacelen), si->size); 
+		if (spacelen > 0)
+			spacelen = 1;
+		sprintf(b1, "%c%dld  ", '%', 9 + spacelen - 1);
+	} else {
+		fprintf(fp, "%-18s  %8ld  ", si->curname, si->size); 
+		sprintf(b1, "%c%dld  ", '%', 9);
+	}
 
-	if (dimension) 
+        fprintf(fp, b1, vt->flags & (PERCPU_KMALLOC_V2) ?
+                si->inuse - si->cpucached_cache : si->inuse); 
+
+        fprintf(fp, "%8ld  %5ld   %3ldk\n",  
+		si->num_slabs * si->c_num, 
+                si->num_slabs, si->slabsize/1024); 
+}
+
+#define DUMP_SLAB_INFO() \
+      { \
+        char b1[BUFSIZE], b2[BUFSIZE]; \
+        ulong allocated, freeobjs; \
+        if (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) { \
+                allocated = si->s_inuse - si->cpucached_slab; \
+                freeobjs = si->c_num - allocated - si->cpucached_slab; \
+        } else { \
+                allocated = si->s_inuse; \
+                freeobjs = si->c_num - si->s_inuse; \
+        } \
+        fprintf(fp, "%s  %s  %5ld  %9ld  %4ld\n", \
+                mkstring(b1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(si->slab)), \
+                mkstring(b2, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(si->s_mem)), \
+                si->c_num, allocated, \
+                vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? \
+		freeobjs + si->cpucached_slab : freeobjs); \
+      }
+
+static void
+dump_kmem_cache(struct meminfo *si)
+{
+	char buf[BUFSIZE];
+	char kbuf[BUFSIZE];
+	char *reqname;
+	ulong cache_cache;
+	ulong name, magic;
+	int cnt;
+	char *p1;
+
+	if (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) 
 		error(FATAL, 
-		    "dump_free_pages called with multidimensional free area\n");
+		    "dump_kmem_cache called with PERCPU_KMALLOC_V[12] set\n");
 
-	ld = &list_data;
-	total_free = 0;
-	searchphys = 0;
-	do_search = FALSE;
-	get_symbol_data("nr_free_pages", sizeof(int), &nr_free_pages);
-	
-	switch (fi->flags)
-	{
-	case GET_FREE_HIGHMEM_PAGES:
-                error(FATAL, "GET_FREE_HIGHMEM_PAGES invalid in this kernel\n");
+	si->found = si->retval = 0;
+	reqname = NULL;
 
-	case GET_FREE_PAGES:
-		fi->retval = (ulong)nr_free_pages;
-		return;
+	if ((!(si->flags & VERBOSE) || si->reqname) &&
+	     !(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES)))
+		fprintf(fp, kmem_cache_hdr);
 
-	case ADDRESS_SPECIFIED:
-		switch (fi->memtype)
-		{
-		case KVADDR:
-                        if (!page_to_phys(fi->spec_addr, &searchphys)) {
-                                if (!kvtop(NULL, fi->spec_addr, &searchphys, 0))
-                                        return;
-                        }
-			break;
-		case PHYSADDR:
-			searchphys = fi->spec_addr;
-			break;
-		default:
-			error(FATAL, "dump_free_pages: no memtype specified\n");
+	si->addrlist = (ulong *)GETBUF((vt->kmem_max_c_num+1) * sizeof(ulong));
+	cnt = 0;
+	si->cache = cache_cache = symbol_value("cache_cache");
+
+	if (si->flags & ADDRESS_SPECIFIED) {
+	        if (!(p1 = vaddr_to_kmem_cache(si->spec_addr, kbuf, VERBOSE))) {
+			error(INFO, 
+			   "address is not allocated in slab subsystem: %lx\n",
+				si->spec_addr);
+			return;
 		}
-		do_search = TRUE;
-		break;
-	} 
+		
+		if (si->reqname && (si->reqname != p1)) 
+			error(INFO, 
+			    "ignoring pre-selected %s cache for address: %lx\n",
+				si->reqname, si->spec_addr, si->reqname);
 
-	verbose = (do_search || (fi->flags & VERBOSE)) ? TRUE : FALSE;
+		reqname = p1;
+	} else
+		reqname = si->reqname;
 
-	free_area_buf = GETBUF(nr_mem_lists * SIZE(free_area_struct));
-	kfp = free_area = symbol_value("free_area");
-	flen = MAX(VADDR_PRLEN, strlen("FREE_AREA_STRUCT"));
-	readmem(free_area, KVADDR, free_area_buf, 
-		SIZE(free_area_struct) * nr_mem_lists, 
-		"free_area_struct", FAULT_ON_ERROR);
+	si->cache_buf = GETBUF(SIZE(kmem_cache_s));
 
-	if (do_search)
-		open_tmpfile();
+	do {
+		if ((si->flags & VERBOSE) && !si->reqname &&
+		    !(si->flags & ADDRESS_SPECIFIED))
+			fprintf(fp, "%s%s", cnt++ ? "\n" : "", kmem_cache_hdr);
 
-	if (!verbose)
-		fprintf(fp, free_area_hdr1);
+                readmem(si->cache, KVADDR, si->cache_buf, SIZE(kmem_cache_s),
+                	"kmem_cache_s buffer", FAULT_ON_ERROR);
 
-       	hq_open();
-	for (i = 0; i < nr_mem_lists; i++) {
-		pp = (ulong *)(free_area_buf + (SIZE(free_area_struct)*i));
+		if (vt->kmem_cache_namelen) {
+			BCOPY(si->cache_buf + OFFSET(kmem_cache_s_c_name),
+				buf, vt->kmem_cache_namelen);
+		} else {
+			name = ULONG(si->cache_buf + 
+				OFFSET(kmem_cache_s_c_name));
+                	if (!read_string(name, buf, BUFSIZE-1)) {
+				error(WARNING, 
+			      "cannot read kmem_cache_s.c_name string at %lx\n",
+					name);
+				sprintf(buf, "(unknown)");
+			}
+		}
 
-		chunk_size = power(2, i);
+		if (reqname && !STREQ(reqname, buf)) 
+			goto next_cache;
 
-		if (verbose)
-			fprintf(fp, free_area_hdr2);
+		if (ignore_cache(si, buf)) {
+			fprintf(fp, "%lx %-18s [IGNORED]\n", si->cache, buf);
+			goto next_cache;
+		}
 
-		fprintf(fp, "%3d  ", i);
-		sprintf(buf, "%ldk", (chunk_size * PAGESIZE())/1024);
-		fprintf(fp, "%5s  ", buf);
+		si->curname = buf;
 
-		fprintf(fp, "%s  %s", 
-			mkstring(buf, flen, CENTER|LONG_HEX, MKSTR(kfp)),
-			verbose ? "\n" : "");
+		if (CRASHDEBUG(1))
+			fprintf(fp, "cache: %lx %s\n", si->cache, si->curname);
+		console("cache: %lx %s\n", si->cache, si->curname);
 
-		if (is_page_ptr(*pp, NULL)) {
-			BZERO(ld, sizeof(struct list_data));
-			ld->flags = verbose;
-			ld->start = *pp;
-			ld->end = free_area;
-        		cnt = do_list(ld);
-			total_free += (cnt * chunk_size);
-		} else 
-			cnt = 0;
+		magic = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_magic));
 
-		if (!verbose)
-			fprintf(fp, "%6ld  %6ld\n", cnt, cnt * chunk_size );
+		if (magic == SLAB_C_MAGIC) {
 
-		free_area += SIZE(free_area_struct);
-		kfp += SIZE(free_area_struct);
-	}
-       	hq_close();
+			si->size = ULONG(si->cache_buf + 
+				OFFSET(kmem_cache_s_c_org_size));
+			if (!si->size) {
+				if (STREQ(si->curname, "kmem_cache"))
+					si->size = SIZE(kmem_cache_s);
+				else {
+					error(INFO, 
+					    "\"%s\" cache: c_org_size: %ld\n",
+						si->curname, si->size);
+					si->errors++;
+				}
+			}
+			si->c_flags = ULONG(si->cache_buf +
+				OFFSET(kmem_cache_s_c_flags));
+			si->c_offset = ULONG(si->cache_buf + 
+				OFFSET(kmem_cache_s_c_offset));
+			si->order = ULONG(si->cache_buf + 
+				OFFSET(kmem_cache_s_c_gfporder));
+			si->c_num = ULONG(si->cache_buf +
+				OFFSET(kmem_cache_s_c_num));
 
-	fprintf(fp, "\nnr_free_pages: %d ", nr_free_pages);
-	if (total_free != nr_free_pages)
-		fprintf(fp, "(found %ld)\n", total_free);
-	else
-		fprintf(fp, "(verified)\n");
+			do_slab_chain(SLAB_GET_COUNTS, si);
 
-	if (!do_search)
-		return;
+			if (!(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) 
+				DUMP_KMEM_CACHE_INFO_V1();
 
-	found = FALSE;
-        rewind(pc->tmpfile);
-	order = offset = 0;
+			if (si->flags == GET_SLAB_PAGES) 
+				si->retval += (si->num_slabs * 
+				    	(si->slabsize/PAGESIZE()));
 
-        while (fgets(buf, BUFSIZE, pc->tmpfile)) {
-		if (CRASHDEBUG(1) && STRNEQ(buf, "<readmem"))
-			continue;
+			if (si->flags & (VERBOSE|ADDRESS_SPECIFIED)) {
+				si->slab = (si->flags & ADDRESS_SPECIFIED) ?
+					vaddr_to_slab(si->spec_addr) : 0;
+			
+				do_slab_chain(SLAB_WALKTHROUGH, si);
 
-		if (strstr(buf, "nr_free_pages") ||
-		    STREQ(buf, "\n"))
-			continue;
+				if (si->found) {
+					fprintf(fp, kmem_cache_hdr);
+					DUMP_KMEM_CACHE_INFO_V1();
+					fprintf(fp, slab_hdr);
+					DUMP_SLAB_INFO();
 
-		if (strstr(buf, "AREA")) {
-			strcpy(last_free_hdr, buf);
-			continue;
-		}
+					switch (si->found)
+					{
+					case KMEM_BUFCTL_ADDR:
+						fprintf(fp, "   %lx ", 
+							(ulong)si->spec_addr);
+						fprintf(fp, 
+						   "(ON-SLAB kmem_bufctl_t)\n");
+						break;
 
-		if (strstr(buf, "k")) {
-			strcpy(last_free, buf);
-			chunk_size = power(2, order) * PAGESIZE();
-			order++;
-			continue;
-		}
+					case KMEM_SLAB_ADDR:
+						fprintf(fp, "   %lx ", 
+							(ulong)si->spec_addr);
+						fprintf(fp,
+					            "(ON-SLAB kmem_slab_t)\n");
+						break;
 
-		if (CRASHDEBUG(1) && !hexadecimal(strip_linefeeds(buf), 0))
-			continue;
+					case KMEM_ON_SLAB:
+						fprintf(fp, "   %lx ", 
+							(ulong)si->spec_addr);
+						fprintf(fp, 
+						    "(unused part of slab)\n");
+						break;
+						
+					case KMEM_OBJECT_ADDR_FREE:
+                                                fprintf(fp, free_inuse_hdr);
+						fprintf(fp, "   %lx\n", 
+							si->container ? si->container :
+                                                        (ulong)si->spec_addr);
+						break;
 
-		errflag = 0;
-		this_addr = htol(strip_linefeeds(buf), 
-			RETURN_ON_ERROR, &errflag);
-                if (errflag) 
-			continue;
+                                        case KMEM_OBJECT_ADDR_INUSE:
+                                                fprintf(fp, free_inuse_hdr);
+                                                fprintf(fp, "  [%lx]\n",
+							si->container ? si->container :
+                                                        (ulong)si->spec_addr);
+                                                break;
+					}
 
-		if (!page_to_phys(this_addr, &this_phys))
-			continue;
+					break;
+				}
+			}
 
-		if ((searchphys >= this_phys) && 
-		    (searchphys < (this_phys+chunk_size))) {
-			if (searchphys > this_phys) 
-				offset = (searchphys - this_phys)/PAGESIZE();
-			found = TRUE;
-			break;
+		} else {
+			error(INFO, "\"%s\" cache: invalid c_magic: %lx\n", 
+				si->curname, magic);
+			si->errors++;
 		}
-	}
-        close_tmpfile();
 
-	if (found) {
-		order--;
+next_cache:
+		si->cache = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_nextp));
 
-		fprintf(fp, last_free_hdr);
-		fprintf(fp, last_free);
-		fprintf(fp, "%lx  ", this_addr);
-		if (order) {
-                	switch (fi->memtype)
-                	{
-                	case KVADDR:
-				fprintf(fp, "(%lx is ", (ulong)fi->spec_addr);
-                        	break;
-                	case PHYSADDR:
-				fprintf(fp, "(%llx is %s", fi->spec_addr,
-				    PAGEOFFSET(fi->spec_addr) ?  "in " : "");
-                        	break;
-			}
-			fprintf(fp, "%s of %ld pages) ",
-				ordinal(offset+1, buf), power(2, order));
-		}
+	} while (si->cache != cache_cache);
 
-		fi->retval = TRUE;
-		fprintf(fp, "\n");
-	}
+	FREEBUF(si->cache_buf);
+
+        if ((si->flags & ADDRESS_SPECIFIED) && !si->found)
+		error(INFO, "%s: address not found in cache: %lx\n", 
+			reqname, si->spec_addr);
+ 
+	if (si->errors)
+		error(INFO, "%ld error%s encountered\n", 
+			si->errors, si->errors > 1 ? "s" : "");
+
+	FREEBUF(si->addrlist);
 }
 
 /*
- *  Dump free pages on kernels with a multi-dimensional free_area array.
+ *  dump_kmem_cache() adapted for newer percpu slab format.
  */
-char *free_area_hdr5 = \
-	"  AREA    SIZE  FREE_AREA_STRUCT  BLOCKS   PAGES\n";
-char *free_area_hdr6 = \
-	"  AREA    SIZE  FREE_AREA_STRUCT\n";
 
 static void
-dump_multidimensional_free_pages(struct meminfo *fi)
+dump_kmem_cache_percpu_v1(struct meminfo *si)
 {
-	int i, j;
-	struct list_data list_data, *ld;
-	long cnt, total_free;
-	ulong kfp, free_area;
-	physaddr_t searchphys;
-	int flen, errflag, verbose, nr_free_pages;
-	int nr_mem_lists, dimension, order, do_search;
-	ulong sum, found, offset;
-	char *free_area_buf, *p;
-	ulong *pp;
-	long chunk_size;
-        ulong this_addr; 
-	physaddr_t this_phys;
+	int i;
 	char buf[BUFSIZE];
-	char last_area[BUFSIZE];
-	char last_area_hdr[BUFSIZE];
+	char kbuf[BUFSIZE];
+	char *reqname;
+	ulong cache_cache;
+	ulong name;
+	int cnt;
+	uint tmp_val;  /* Used as temporary variable to read sizeof(int) and 
+			assigned to ulong variable. We are doing this to mask
+			the endian issue */
+	char *p1;
+
+        if (!(vt->flags & PERCPU_KMALLOC_V1)) 
+                error(FATAL, 
+                   "dump_kmem_cache_percpu called without PERCPU_KMALLOC_V1\n");
+
+	si->found = si->retval = 0;
+	reqname = NULL;
+
+	if ((!(si->flags & VERBOSE) || si->reqname) &&
+	     !(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES)))
+		fprintf(fp, kmem_cache_hdr);
 
+	si->addrlist = (ulong *)GETBUF((vt->kmem_max_c_num+1) * sizeof(ulong));
+	si->kmem_bufctl = (int *)GETBUF((vt->kmem_max_c_num+1) * sizeof(int));
+	for (i = 0; i < vt->kmem_max_cpus; i++) 
+		si->cpudata[i] = (ulong *)
+			GETBUF(vt->kmem_max_limit * sizeof(ulong)); 
 
-        if (vt->flags & (NODES|ZONES)) 
-                error(FATAL, 
-		"dump_multidimensional_free_pages called with (NODES|ZONES)\n");
+	cnt = 0;
+	si->cache = cache_cache = symbol_value("cache_cache");
 
-        ld = &list_data;
-	if (SIZE(free_area_struct) % sizeof(ulong))
-		error(FATAL, "free_area_struct not long-word aligned?\n");
+	if (si->flags & ADDRESS_SPECIFIED) {
+	        if (!(p1 = vaddr_to_kmem_cache(si->spec_addr, kbuf, VERBOSE))) {
+			error(INFO, 
+			   "address is not allocated in slab subsystem: %lx\n",
+				si->spec_addr);
+			return;
+		}
+		
+		if (si->reqname && (si->reqname != p1)) 
+			error(INFO, 
+			    "ignoring pre-selected %s cache for address: %lx\n",
+				si->reqname, si->spec_addr, si->reqname);
+		reqname = p1;
+	} else
+		reqname = si->reqname;
 
-        total_free = 0;
-        searchphys = 0;
-	do_search = FALSE;
-        get_symbol_data("nr_free_pages", sizeof(int), &nr_free_pages);
+	do {
+		if ((si->flags & VERBOSE) && !si->reqname &&
+		    !(si->flags & ADDRESS_SPECIFIED))
+			fprintf(fp, "%s%s", cnt++ ? "\n" : "", kmem_cache_hdr);
 
-        switch (fi->flags)
-        {
-        case GET_FREE_HIGHMEM_PAGES:
-                error(FATAL, "GET_FREE_HIGHMEM_PAGES invalid in this kernel\n");
+		if (vt->kmem_cache_namelen) {
+                        readmem(si->cache+OFFSET(kmem_cache_s_name), 
+				KVADDR, buf, vt->kmem_cache_namelen,
+                                "name array", FAULT_ON_ERROR);
+		} else {
+                	readmem(si->cache+OFFSET(kmem_cache_s_name), 
+				KVADDR, &name, sizeof(ulong),
+                        	"name", FAULT_ON_ERROR);
+                	if (!read_string(name, buf, BUFSIZE-1)) {
+				error(WARNING, 
+			      "cannot read kmem_cache_s.name string at %lx\n",
+					name);
+				sprintf(buf, "(unknown)");
+			}
+		}
 
-        case GET_FREE_PAGES:
-                fi->retval = (ulong)nr_free_pages;
-                return;
+		if (reqname && !STREQ(reqname, buf)) 
+			goto next_cache;
 
-	case ADDRESS_SPECIFIED:
-		switch (fi->memtype)
-                {
-                case KVADDR:
-                        if (!page_to_phys(fi->spec_addr, &searchphys)) {
-                                if (!kvtop(NULL, fi->spec_addr, &searchphys, 0))
-                                        return;
-                        }
-                        break;
-                case PHYSADDR:
-                        searchphys = fi->spec_addr;
-                        break;
-                default:
-                        error(FATAL, 
-		    "dump_multidimensional_free_pages: no memtype specified\n");
+                if (ignore_cache(si, buf)) {
+                        fprintf(fp, "%lx %-18s [IGNORED]\n", si->cache, buf);
+                        goto next_cache;
                 }
-		do_search = TRUE;
-		break;
-	}
 
-        verbose = (do_search || (fi->flags & VERBOSE)) ? TRUE : FALSE;
+		si->curname = buf;
 
-	flen = MAX(VADDR_PRLEN, strlen("FREE_AREA_STRUCT"));
-        nr_mem_lists = ARRAY_LENGTH(free_area);
-	dimension = ARRAY_LENGTH(free_area_DIMENSION);
-	if (!nr_mem_lists || !dimension)
-		error(FATAL, "cannot determine free_area dimensions\n");
-        free_area_buf = 
-		GETBUF((nr_mem_lists * SIZE(free_area_struct)) * dimension);
-        kfp = free_area = symbol_value("free_area");
-        readmem(free_area, KVADDR, free_area_buf, 
-		(SIZE(free_area_struct) * nr_mem_lists) * dimension,
-                "free_area arrays", FAULT_ON_ERROR);
+	        readmem(si->cache+OFFSET(kmem_cache_s_objsize),
+	        	KVADDR, &tmp_val, sizeof(uint),
+	                "objsize", FAULT_ON_ERROR);
+		si->size = (ulong)tmp_val;
 
-        if (do_search)
-                open_tmpfile();
+		if (!si->size) {
+			if (STREQ(si->curname, "kmem_cache"))
+				si->size = SIZE(kmem_cache_s);
+			else {
+				error(INFO, "\"%s\" cache: objsize: %ld\n",
+					si->curname, si->size);
+				si->errors++;
+			}
+		}
 
-        hq_open();
-        for (i = sum = found = 0; i < dimension; i++) {
-        	if (!verbose)
-                	fprintf(fp, free_area_hdr5);
-               	pp = (ulong *)(free_area_buf + 
-			((SIZE(free_area_struct)*nr_mem_lists)*i));
-		for (j = 0; j < nr_mem_lists; j++) {
-                        if (verbose)
-                                fprintf(fp, free_area_hdr6);
+	        readmem(si->cache+OFFSET(kmem_cache_s_flags), 
+			KVADDR, &tmp_val, sizeof(uint),
+	                "kmem_cache_s flags", FAULT_ON_ERROR);
+		si->c_flags = (ulong)tmp_val;
 
-			sprintf(buf, "[%d][%d]", i, j);
-			fprintf(fp, "%7s  ", buf);
+                readmem(si->cache+OFFSET(kmem_cache_s_gfporder),
+                        KVADDR, &tmp_val, sizeof(uint),
+                        "gfporder", FAULT_ON_ERROR);
+		si->order = (ulong)tmp_val;
 
-                	chunk_size = power(2, j);
+        	readmem(si->cache+OFFSET(kmem_cache_s_num),
+                	KVADDR, &tmp_val, sizeof(uint),
+                	"kmem_cache_s num", FAULT_ON_ERROR);
+		si->c_num = (ulong)tmp_val;
 
-                	sprintf(buf, "%ldk", (chunk_size * PAGESIZE())/1024);
-                	fprintf(fp, "%5s  ", buf);
+		do_slab_chain_percpu_v1(SLAB_GET_COUNTS, si);
 
-                	fprintf(fp, "%s  %s",  
-			    mkstring(buf, flen, CENTER|LONG_HEX, MKSTR(kfp)),
-			    verbose ? "\n" : "");
+		if (!(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) {
+			DUMP_KMEM_CACHE_INFO_V1();
+			if (CRASHDEBUG(3))
+				dump_struct("kmem_cache_s", si->cache, 0);
+		}
 
-                	if (is_page_ptr(*pp, NULL)) {
-                        	BZERO(ld, sizeof(struct list_data));
-                        	ld->flags = verbose;
-                        	ld->start = *pp;
-                        	ld->end = free_area;
-                        	cnt = do_list(ld);
-                        	total_free += (cnt * chunk_size);
-                	} else
-                        	cnt = 0;
+		if (si->flags == GET_SLAB_PAGES) 
+			si->retval += (si->num_slabs * 
+				(si->slabsize/PAGESIZE()));
 
-                	if (!verbose)
-                        	fprintf(fp, 
-					"%6ld  %6ld\n", cnt, cnt * chunk_size );
+		if (si->flags & (VERBOSE|ADDRESS_SPECIFIED)) {
 
-			pp += (SIZE(free_area_struct)/sizeof(ulong));
-			free_area += SIZE(free_area_struct);
-			kfp += SIZE(free_area_struct);
-		}
-		fprintf(fp, "\n");
-	}
-	hq_close();
+			gather_cpudata_list_v1(si);
 
-        fprintf(fp, "nr_free_pages: %d ", nr_free_pages);
-        if (total_free != nr_free_pages)
-                fprintf(fp, "(found %ld)\n", total_free);
-        else
-                fprintf(fp, "(verified)\n");
+                        si->slab = (si->flags & ADDRESS_SPECIFIED) ?
+                        	vaddr_to_slab(si->spec_addr) : 0;
 
-        if (!do_search)
-                return;
+			do_slab_chain_percpu_v1(SLAB_WALKTHROUGH, si);
 
-        found = FALSE;
-        rewind(pc->tmpfile);
-        order = offset = 0;
+			if (si->found) {
+				fprintf(fp, kmem_cache_hdr);
+				DUMP_KMEM_CACHE_INFO_V1();
+				fprintf(fp, slab_hdr);
+        			gather_slab_cached_count(si);
+				DUMP_SLAB_INFO();
 
-        while (fgets(buf, BUFSIZE, pc->tmpfile)) {
-		if (CRASHDEBUG(1) && STRNEQ(buf, "<readmem:"))
-			continue;
+				switch (si->found)
+				{
+				case KMEM_BUFCTL_ADDR:
+					fprintf(fp, "   %lx ", 
+						(ulong)si->spec_addr);
+					fprintf(fp,"(kmem_bufctl_t)\n");
+					break;
 
-		if (STRNEQ(buf, "nr_free_pages:"))
-			continue;
+				case KMEM_SLAB_ADDR:
+					fprintf(fp, "   %lx ", 
+						(ulong)si->spec_addr);
+					fprintf(fp, "(slab_s)\n");
+					break;
 
-		if (strstr(buf, "AREA")) {
-                        strcpy(last_area_hdr, buf);
-                        fgets(buf, BUFSIZE, pc->tmpfile);
-                        strcpy(last_area, strip_linefeeds(buf));
-			p = strstr(buf, "k");
-			*p = NULLCHAR;
-			while (*p != ' ')
-				p--;
-			chunk_size = atol(p+1) * 1024;
-			if (chunk_size == PAGESIZE())
-				order = 0;
-			else
-				order++;
-                        continue;
-                }
+				case KMEM_ON_SLAB:
+					fprintf(fp, "   %lx ", 
+						(ulong)si->spec_addr);
+					fprintf(fp, "(unused part of slab)\n");
+					break;
+						
+				case KMEM_OBJECT_ADDR_FREE:
+                                        fprintf(fp, free_inuse_hdr);
+					fprintf(fp, "   %lx\n", 
+						si->container ? si->container :
+						(ulong)si->spec_addr);
+					break;
 
-                errflag = 0;
-                this_addr = htol(strip_linefeeds(buf),
-                        RETURN_ON_ERROR, &errflag);
-                if (errflag)
-                        continue;
+                                case KMEM_OBJECT_ADDR_INUSE:
+                                        fprintf(fp, free_inuse_hdr);
+					fprintf(fp, "  [%lx]\n", 
+						si->container ? si->container :
+						(ulong)si->spec_addr);
+                                        break;
 
-                if (!page_to_phys(this_addr, &this_phys))
-                        continue;
+                                case KMEM_OBJECT_ADDR_CACHED:
+                                        fprintf(fp, free_inuse_hdr);
+                                        fprintf(fp, 
+					    "   %lx  (cpu %d cache)\n", 
+						si->container ? si->container :
+						(ulong)si->spec_addr, si->cpu);
+                                        break;
+				}
+
+				break;
+			}
+		}
+
+next_cache:
+                readmem(si->cache+OFFSET(kmem_cache_s_next), 
+		        KVADDR, &si->cache, sizeof(ulong),
+                        "kmem_cache_s next", FAULT_ON_ERROR);
 
-                if ((searchphys >= this_phys) &&
-                    (searchphys < (this_phys+chunk_size))) {
-                        if (searchphys > this_phys)
-                                offset = (searchphys - this_phys)/PAGESIZE();
-                        found = TRUE;
-                        break;
-                }
+		si->cache -= OFFSET(kmem_cache_s_next);
 
-	}
-	close_tmpfile();
+	} while (si->cache != cache_cache);
 
-	if (found) {
-		fprintf(fp, last_area_hdr);
-		fprintf(fp, "%s\n", last_area);
-		fprintf(fp, "%lx  ", this_addr);
-                if (order) {
-                	switch (fi->memtype)
-                	{
-                	case KVADDR:
-                                fprintf(fp, "(%lx is ", (ulong)fi->spec_addr);
-                        	break;
-                	case PHYSADDR:
-                                fprintf(fp, "(%llx is %s", fi->spec_addr,
-                                    PAGEOFFSET(fi->spec_addr) ?  "in " : "");
-                        	break;
-			}
-                        fprintf(fp, "%s of %ld pages) ",
-                                ordinal(offset+1, buf), power(2, order));
-                }
+        if ((si->flags & ADDRESS_SPECIFIED) && !si->found)
+		error(INFO, "%s: address not found in cache: %lx\n", 
+			reqname, si->spec_addr);
+ 
+	if (si->errors)
+		error(INFO, "%ld error%s encountered\n", 
+			si->errors, si->errors > 1 ? "s" : "");
 
-		fi->retval = TRUE;
-                fprintf(fp, "\n");
-	}
-}
+	FREEBUF(si->addrlist);
+	FREEBUF(si->kmem_bufctl);
+        for (i = 0; i < vt->kmem_max_cpus; i++)
+                FREEBUF(si->cpudata[i]);
 
+}
 
-/*
- *  Dump free pages in newer kernels that have zones.  This is a work in
- *  progress, because although the framework for memory nodes has been laid
- *  down, complete support has not been put in place.
- */
-static char *zone_hdr = "ZONE  NAME        SIZE    FREE";
 
 /*
- *  From linux/mmzone.h
+ *  Updated for 2.6 slab substructure. 
  */
-#define ZONE_DMA                0
-#define ZONE_NORMAL             1
-#define ZONE_HIGHMEM            2
-
 static void
-dump_free_pages_zones_v1(struct meminfo *fi)
+dump_kmem_cache_percpu_v2(struct meminfo *si)
 {
-	int i, n;
-	ulong node_zones;
-	ulong size;
-	long zone_size_offset;
-	long chunk_size;
-	int order, errflag, do_search;
-	ulong offset, verbose, value, sum, found; 
-	ulong this_addr;
-	physaddr_t this_phys, searchphys;
-        ulong zone_mem_map;
-        ulong zone_start_paddr;
-        ulong zone_start_mapnr;
-	struct node_table *nt;
-	char buf[BUFSIZE], *p;
-	char buf1[BUFSIZE];
-	char buf2[BUFSIZE];
-	char buf3[BUFSIZE];
-	char last_node[BUFSIZE];
-	char last_zone[BUFSIZE];
-	char last_area[BUFSIZE];
-	char last_area_hdr[BUFSIZE];
-
-       if (!(vt->flags & (NODES|ZONES)))
-		error(FATAL, 
-		    "dump_free_pages_zones_v1 called without (NODES|ZONES)\n");
-
-        if (fi->flags & ADDRESS_SPECIFIED) {
-                switch (fi->memtype)
-                {
-                case KVADDR:
-                        if (!page_to_phys(fi->spec_addr, &searchphys)) {
-                                if (!kvtop(NULL, fi->spec_addr, &searchphys, 0))
-                                        return;
-                        }
-                        break;
-                case PHYSADDR:
-                        searchphys = fi->spec_addr;
-                        break;
-                default:
-                        error(FATAL, 
-			    "dump_free_pages_zones_v1: no memtype specified\n");
-                }
-		do_search = TRUE;
-        } else {
-                searchphys = 0;
-		do_search = FALSE;
-	}
-        verbose = (do_search || (fi->flags & VERBOSE)) ? TRUE : FALSE;
+	int i;
+	char buf[BUFSIZE];
+	char kbuf[BUFSIZE];
+	char *reqname;
+	ulong cache_end;
+	ulong name;
+	int cnt;
+	uint tmp_val; /* Used as temporary variable to read sizeof(int) and
+			assigned to ulong variable. We are doing this to mask
+			the endian issue */
+	char *p1;
 
-	if (VALID_MEMBER(zone_struct_size))
-		zone_size_offset =  OFFSET(zone_struct_size);
-	else if (VALID_MEMBER(zone_struct_memsize))
-		zone_size_offset =  OFFSET(zone_struct_memsize);
-	else
-		error(FATAL, 
-			"zone_struct has neither size nor memsize field\n");
+        if (!(vt->flags & PERCPU_KMALLOC_V2)) 
+                error(FATAL, 
+                   "dump_kmem_cache_percpu called without PERCPU_KMALLOC_V2\n");
 
-	if (do_search)
-		open_tmpfile();
+	si->found = si->retval = 0;
+	reqname = NULL;
 
-	hq_open();
+	if ((!(si->flags & VERBOSE) || si->reqname) &&
+	     !(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES)))
+		fprintf(fp, kmem_cache_hdr);
 
-	for (n = sum = found = 0; n < vt->numnodes; n++) {
-                nt = &vt->node_table[n];
-		node_zones = nt->pgdat + OFFSET(pglist_data_node_zones);
+	si->addrlist = (ulong *)GETBUF((vt->kmem_max_c_num+1) * sizeof(ulong));
+	si->kmem_bufctl = (int *)GETBUF((vt->kmem_max_c_num+1) * sizeof(int));
+	for (i = 0; i < vt->kmem_max_cpus; i++) 
+		si->cpudata[i] = (ulong *)
+			GETBUF(vt->kmem_max_limit * sizeof(ulong)); 
+	if(vt->flags & PERCPU_KMALLOC_V2_NODES)
+		si->shared_array_cache = (ulong *)
+			GETBUF(vt->kmem_cache_len_nodes * 
+				(vt->kmem_max_limit+1) * sizeof(ulong)); 
+	else
+		si->shared_array_cache = (ulong *)
+			GETBUF((vt->kmem_max_limit+1) * sizeof(ulong)); 
 
-		for (i = 0; i < vt->nr_zones; i++) {
-	
-			if (fi->flags == GET_FREE_PAGES) {
-	                	readmem(node_zones+
-					OFFSET(zone_struct_free_pages), 
-					KVADDR, &value, sizeof(ulong),
-	                        	"node_zones free_pages", 
-					FAULT_ON_ERROR);
-				sum += value;
-				node_zones += SIZE(zone_struct);
-				continue;
-			}
-	
-	                if (fi->flags == GET_FREE_HIGHMEM_PAGES) {
-	                        if (i == ZONE_HIGHMEM) {
-	                                readmem(node_zones+
-						OFFSET(zone_struct_free_pages),
-						KVADDR, &value, sizeof(ulong),
-	                                        "node_zones free_pages",
-	                                        FAULT_ON_ERROR);
-	                                sum += value;
-	                        }
-	                        node_zones += SIZE(zone_struct);
-	                        continue;
-	                }
-	
-			if (fi->flags == GET_ZONE_SIZES) {
-	                	readmem(node_zones+zone_size_offset, 
-					KVADDR, &size, sizeof(ulong),
-	                        	"node_zones {mem}size", FAULT_ON_ERROR);
-	                        sum += size;
-	                        node_zones += SIZE(zone_struct);
-	                        continue;
-			}
+	cnt = 0;
 
-			if ((i == 0) && (vt->flags & NODES)) {
-				if (n) {
-					fprintf(fp, "\n");
-                                	pad_line(fp, 
-						VADDR_PRLEN > 8 ? 74 : 66, '-');
-                                	fprintf(fp, "\n");
-				}
-				fprintf(fp, "%sNODE\n %2d\n", 
-					n ? "\n" : "", nt->node_id);
-			}
+        get_symbol_data("cache_chain", sizeof(ulong), &si->cache);
+        si->cache -= OFFSET(kmem_cache_s_next);
+        cache_end = symbol_value("cache_chain");
 
-	                fprintf(fp, "%s%s  %s  START_PADDR  START_MAPNR\n",
-				i > 0 ? "\n" : "",
-	                        zone_hdr,
-	                        mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, 
-				    "MEM_MAP"));
-	
-			fprintf(fp, "%3d   ", i);
-	
-	        	readmem(node_zones+OFFSET(zone_struct_name), KVADDR, 
-				&value, sizeof(void *), 
-				"node_zones name", FAULT_ON_ERROR);
-	                if (read_string(value, buf, BUFSIZE-1))
-	                	fprintf(fp, "%-9s ", buf);
-			else
-				fprintf(fp, "(unknown) ");
-	
-	        	readmem(node_zones+zone_size_offset, KVADDR, 
-				&size, sizeof(ulong), 
-				"node_zones {mem}size", FAULT_ON_ERROR);
-	                fprintf(fp, "%6ld  ", size);
-	
-	        	readmem(node_zones+OFFSET(zone_struct_free_pages), 
-				KVADDR, &value, sizeof(ulong), 
-				"node_zones free_pages", FAULT_ON_ERROR);
-	
-	                fprintf(fp, "%6ld  ", value);
-	
-	                readmem(node_zones+OFFSET(zone_struct_zone_start_paddr),
-	                        KVADDR, &zone_start_paddr, sizeof(ulong),
-	                        "node_zones zone_start_paddr", FAULT_ON_ERROR);
-	                readmem(node_zones+OFFSET(zone_struct_zone_start_mapnr),
-	                        KVADDR, &zone_start_mapnr, sizeof(ulong),
-	                        "node_zones zone_start_mapnr", FAULT_ON_ERROR);
-	                readmem(node_zones+OFFSET(zone_struct_zone_mem_map),
-	                        KVADDR, &zone_mem_map, sizeof(ulong),
-	                        "node_zones zone_mem_map", FAULT_ON_ERROR);
-	
-	                fprintf(fp, "%s  %s  %s\n",
-	                	mkstring(buf1, VADDR_PRLEN,
-	                            CENTER|LONG_HEX,MKSTR(zone_mem_map)),
-	                	mkstring(buf2, strlen("START_PADDR"),
-	                            CENTER|LONG_HEX|RJUST,
-					MKSTR(zone_start_paddr)),
-	                	mkstring(buf3, strlen("START_MAPNR"),
-	                            CENTER|LONG_DEC|RJUST,
-					MKSTR(zone_start_mapnr)));
-	
-			sum += value;
+	if (si->flags & ADDRESS_SPECIFIED) {
+	        if (!(p1 = vaddr_to_kmem_cache(si->spec_addr, kbuf, VERBOSE))) {
+			error(INFO, 
+			   "address is not allocated in slab subsystem: %lx\n",
+				si->spec_addr);
+			return;
+		}
+		
+		if (si->reqname && (si->reqname != p1)) 
+			error(INFO, 
+			    "ignoring pre-selected %s cache for address: %lx\n",
+				si->reqname, si->spec_addr, si->reqname);
+		reqname = p1;
+	} else
+		reqname = si->reqname;
 
-			if (value)
-				found += dump_zone_free_area(node_zones+
-					OFFSET(zone_struct_free_area), 
-					vt->nr_free_areas, verbose);
+	do {
+		if ((si->flags & VERBOSE) && !si->reqname &&
+		    !(si->flags & ADDRESS_SPECIFIED))
+			fprintf(fp, "%s%s", cnt++ ? "\n" : "", kmem_cache_hdr);
 
-			node_zones += SIZE(zone_struct);
+		if (vt->kmem_cache_namelen) {
+                        readmem(si->cache+OFFSET(kmem_cache_s_name), 
+				KVADDR, buf, vt->kmem_cache_namelen,
+                                "name array", FAULT_ON_ERROR);
+		} else {
+                	readmem(si->cache+OFFSET(kmem_cache_s_name), 
+				KVADDR, &name, sizeof(ulong),
+                        	"name", FAULT_ON_ERROR);
+                	if (!read_string(name, buf, BUFSIZE-1)) {
+				error(WARNING, 
+			      "cannot read kmem_cache_s.name string at %lx\n",
+					name);
+				sprintf(buf, "(unknown)");
+			}
 		}
-	}
 
-	hq_close();
+		if (reqname && !STREQ(reqname, buf)) 
+			goto next_cache;
 
-        if (fi->flags & (GET_FREE_PAGES|GET_ZONE_SIZES|GET_FREE_HIGHMEM_PAGES)){
-                fi->retval = sum;
-                return;
-        }
+                if (ignore_cache(si, buf)) {
+                        fprintf(fp, "%lx %-18s [IGNORED]\n", si->cache, buf);
+                        goto next_cache;
+                }
 
-	fprintf(fp, "\nnr_free_pages: %ld  ", sum);
-	if (sum == found)
-		fprintf(fp, "(verified)\n");
-	else
-		fprintf(fp, "(found %ld)\n", found);
+		si->curname = buf;
 
-	if (!do_search)
-		return;
+	        readmem(si->cache+OFFSET(kmem_cache_s_objsize),
+	        	KVADDR, &tmp_val, sizeof(uint),
+	                "objsize", FAULT_ON_ERROR);
+		si->size = (ulong)tmp_val;
 
-        found = FALSE;
-        rewind(pc->tmpfile);
-        order = offset = 0;
-	last_node[0] = NULLCHAR;
-        last_zone[0] = NULLCHAR;
-        last_area[0] = NULLCHAR;
-        last_area_hdr[0] = NULLCHAR;
+		if (!si->size) {
+			if (STREQ(si->curname, "kmem_cache"))
+				si->size = SIZE(kmem_cache_s);
+			else {
+				error(INFO, "\"%s\" cache: objsize: %ld\n",
+					si->curname, si->size);
+				si->errors++;
+			}
+		}
 
+	        readmem(si->cache+OFFSET(kmem_cache_s_flags), 
+			KVADDR, &tmp_val, sizeof(uint),
+	                "kmem_cache_s flags", FAULT_ON_ERROR);
+		si->c_flags = (ulong)tmp_val;
 
-        while (fgets(buf, BUFSIZE, pc->tmpfile)) {
-		if (CRASHDEBUG(1) && STRNEQ(buf, "<readmem"))
-			continue;
+                readmem(si->cache+OFFSET(kmem_cache_s_gfporder),
+                        KVADDR, &tmp_val, sizeof(uint),
+                        "gfporder", FAULT_ON_ERROR);
+		si->order = (ulong)tmp_val;
 
-		if (STRNEQ(buf, "nr_free_pages:"))
-			continue;
+        	readmem(si->cache+OFFSET(kmem_cache_s_num),
+                	KVADDR, &tmp_val, sizeof(uint),
+                	"kmem_cache_s num", FAULT_ON_ERROR);
+		si->c_num = (ulong)tmp_val;
 
-		if (STRNEQ(buf, "NODE")) { 
-			fgets(buf, BUFSIZE, pc->tmpfile);
-			strcpy(last_node, strip_linefeeds(buf));
-			continue;
-		}
-		if (STRNEQ(buf, "ZONE")) {
-			fgets(buf, BUFSIZE, pc->tmpfile);
-			strcpy(last_zone, strip_linefeeds(buf));
-			continue;
+		if( vt->flags & PERCPU_KMALLOC_V2_NODES )
+			do_slab_chain_percpu_v2_nodes(SLAB_GET_COUNTS, si);
+		else
+			do_slab_chain_percpu_v2(SLAB_GET_COUNTS, si);
+
+		if (!(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) {
+			DUMP_KMEM_CACHE_INFO_V2();
+			if (CRASHDEBUG(3))
+				dump_struct("kmem_cache_s", si->cache, 0);
 		}
-		if (STRNEQ(buf, "AREA")) {
-                        strcpy(last_area_hdr, buf);
-                        fgets(buf, BUFSIZE, pc->tmpfile);
-                        strcpy(last_area, strip_linefeeds(buf));
-			p = strstr(buf, "k");
-			*p = NULLCHAR;
-			while (*p != ' ')
-				p--;
-			chunk_size = atol(p+1) * 1024;
-			if (chunk_size == PAGESIZE())
-				order = 0;
-			else
-				order++;
-                        continue;
-                }
 
-                if (CRASHDEBUG(0) &&
-                    !hexadecimal(strip_linefeeds(buf), 0))
-                        continue;
+		if (si->flags == GET_SLAB_PAGES) 
+			si->retval += (si->num_slabs * 
+				(si->slabsize/PAGESIZE()));
 
-                errflag = 0;
-                this_addr = htol(strip_linefeeds(buf),
-                        RETURN_ON_ERROR, &errflag);
-                if (errflag)
-                        continue;
+		if (si->flags & (VERBOSE|ADDRESS_SPECIFIED)) {
 
-                if (!page_to_phys(this_addr, &this_phys))
-                        continue;
+			if (!(vt->flags & PERCPU_KMALLOC_V2_NODES))
+				gather_cpudata_list_v2(si);
 
-                if ((searchphys >= this_phys) &&
-                    (searchphys < (this_phys+chunk_size))) {
-                        if (searchphys > this_phys)
-                                offset = (searchphys - this_phys)/PAGESIZE();
-                        found = TRUE;
-                        break;
-                }
+                        si->slab = (si->flags & ADDRESS_SPECIFIED) ?
+                        	vaddr_to_slab(si->spec_addr) : 0;
 
-	}
-	close_tmpfile();
+			if (vt->flags & PERCPU_KMALLOC_V2_NODES)
+				do_slab_chain_percpu_v2_nodes(SLAB_WALKTHROUGH, si);
+			else 
+				do_slab_chain_percpu_v2(SLAB_WALKTHROUGH, si);
 
-	if (found) {
-		if (strlen(last_node)) 
-			fprintf(fp, "NODE\n%s\n", last_node); 
-                fprintf(fp, "%s  %s  START_PADDR  START_MAPNR\n",
-                        zone_hdr,
-                        mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "MEM_MAP"));
-		fprintf(fp, "%s\n", last_zone);
-		fprintf(fp, last_area_hdr);
-		fprintf(fp, "%s\n", last_area);
-		fprintf(fp, "%lx  ", this_addr);
-                if (order) {
-                	switch (fi->memtype)
-                	{
-                	case KVADDR:
-                                fprintf(fp, "(%lx is ", (ulong)fi->spec_addr);
-                        	break;
-                	case PHYSADDR:
-                                fprintf(fp, "(%llx is %s", fi->spec_addr,
-                                    PAGEOFFSET(fi->spec_addr) ?  "in " : "");
-                        	break;
-			}
-                        fprintf(fp, "%s of %ld pages) ",
-                                ordinal(offset+1, buf), power(2, order));
-                }
+			if (si->found) {
+				fprintf(fp, kmem_cache_hdr);
+				DUMP_KMEM_CACHE_INFO_V2();
+				fprintf(fp, slab_hdr);
+        			gather_slab_cached_count(si);
+				DUMP_SLAB_INFO();
 
-		fi->retval = TRUE;
-                fprintf(fp, "\n");
-	}
-}
+				switch (si->found)
+				{
+				case KMEM_BUFCTL_ADDR:
+					fprintf(fp, "   %lx ", 
+						(ulong)si->spec_addr);
+					fprintf(fp,"(kmem_bufctl_t)\n");
+					break;
 
+				case KMEM_SLAB_ADDR:
+					fprintf(fp, "   %lx ", 
+						(ulong)si->spec_addr);
+					fprintf(fp, "(slab)\n");
+					break;
 
-/*
- *  Same as dump_free_pages_zones_v1(), but updated for numerous 2.6 zone 
- *  and free_area related data structure changes.
- */
-static void
-dump_free_pages_zones_v2(struct meminfo *fi)
-{
-	int i, n;
-	ulong node_zones;
-	ulong size;
-	long zone_size_offset;
-	long chunk_size;
-	int order, errflag, do_search;
-	ulong offset, verbose, value, sum, found; 
-	ulong this_addr;
-	physaddr_t this_phys, searchphys;
-        ulong zone_mem_map;
-        ulong zone_start_paddr;
-	ulong zone_start_pfn;
-        ulong zone_start_mapnr;
-	struct node_table *nt;
-	char buf[BUFSIZE], *p;
-	char buf1[BUFSIZE];
-	char buf2[BUFSIZE];
-	char buf3[BUFSIZE];
-	char last_node[BUFSIZE];
-	char last_zone[BUFSIZE];
-	char last_area[BUFSIZE];
-	char last_area_hdr[BUFSIZE];
+				case KMEM_ON_SLAB:
+					fprintf(fp, "   %lx ", 
+						(ulong)si->spec_addr);
+					fprintf(fp, "(unused part of slab)\n");
+					break;
+						
+				case KMEM_OBJECT_ADDR_FREE:
+                                        fprintf(fp, free_inuse_hdr);
+					fprintf(fp, "   %lx\n", 
+						si->container ? si->container :
+						(ulong)si->spec_addr);
+					break;
 
-       if (!(vt->flags & (NODES|ZONES)))
-		error(FATAL, 
-		    "dump_free_pages_zones_v2 called without (NODES|ZONES)\n");
+                                case KMEM_OBJECT_ADDR_INUSE:
+                                        fprintf(fp, free_inuse_hdr);
+                                        fprintf(fp, "  [%lx]\n", 
+						si->container ? si->container :
+						(ulong)si->spec_addr);
+                                        break;
 
-        if (fi->flags & ADDRESS_SPECIFIED) {
-                switch (fi->memtype)
-                {
-                case KVADDR:
-                        if (!page_to_phys(fi->spec_addr, &searchphys)) {
-                                if (!kvtop(NULL, fi->spec_addr, &searchphys, 0))
-                                        return;
-                        }
-                        break;
-                case PHYSADDR:
-                        searchphys = fi->spec_addr;
-                        break;
-                default:
-                        error(FATAL, 
-			    "dump_free_pages_zones_v2: no memtype specified\n");
-                }
-		do_search = TRUE;
-        } else {
-                searchphys = 0;
-		do_search = FALSE;
-	}
+                                case KMEM_OBJECT_ADDR_CACHED:
+                                        fprintf(fp, free_inuse_hdr);
+                                        fprintf(fp, 
+					    "   %lx  (cpu %d cache)\n", 
+						si->container ? si->container :
+						(ulong)si->spec_addr, si->cpu);
+                                        break;
+
+                                case KMEM_OBJECT_ADDR_SHARED:
+                                        fprintf(fp, free_inuse_hdr);
+                                        fprintf(fp,
+                                            "   %lx  (shared cache)\n",
+						si->container ? si->container :
+                                                (ulong)si->spec_addr);
+                                        break;
+                                }
+
+				break;
+			}
+		}
 
-        verbose = (do_search || (fi->flags & VERBOSE)) ? TRUE : FALSE;
+next_cache:
+                readmem(si->cache+OFFSET(kmem_cache_s_next), 
+		        KVADDR, &si->cache, sizeof(ulong),
+                        "kmem_cache_s next", FAULT_ON_ERROR);
 
-	if (VALID_MEMBER(zone_spanned_pages))
-		zone_size_offset =  OFFSET(zone_spanned_pages);
-	else
-		error(FATAL, "zone struct has no spanned_pages field\n");
+                if (si->cache != cache_end)
+			si->cache -= OFFSET(kmem_cache_s_next);
 
-	if (do_search)
-		open_tmpfile();
+	} while (si->cache != cache_end);
 
-	hq_open();
+        if ((si->flags & ADDRESS_SPECIFIED) && !si->found)
+		error(INFO, "%s: address not found in cache: %lx\n", 
+			reqname, si->spec_addr);
+ 
+	if (si->errors)
+		error(INFO, "%ld error%s encountered\n", 
+			si->errors, si->errors > 1 ? "s" : "");
 
-	for (n = sum = found = 0; n < vt->numnodes; n++) {
-                nt = &vt->node_table[n];
-		node_zones = nt->pgdat + OFFSET(pglist_data_node_zones);
+	FREEBUF(si->addrlist);
+	FREEBUF(si->kmem_bufctl);
+        for (i = 0; i < vt->kmem_max_cpus; i++)
+                FREEBUF(si->cpudata[i]);
+	FREEBUF(si->shared_array_cache);
 
-		for (i = 0; i < vt->nr_zones; i++) {
-	
-			if (fi->flags == GET_FREE_PAGES) {
-	                	readmem(node_zones+
-					OFFSET(zone_free_pages), 
-					KVADDR, &value, sizeof(ulong),
-	                        	"node_zones free_pages", 
-					FAULT_ON_ERROR);
-				sum += value;
-				node_zones += SIZE(zone);
-				continue;
-			}
-	
-	                if (fi->flags == GET_FREE_HIGHMEM_PAGES) {
-	                        if (i == ZONE_HIGHMEM) {
-	                                readmem(node_zones+
-						OFFSET(zone_free_pages),
-						KVADDR, &value, sizeof(ulong),
-	                                        "node_zones free_pages",
-	                                        FAULT_ON_ERROR);
-	                                sum += value;
-	                        }
-	                        node_zones += SIZE(zone);
-	                        continue;
-	                }
-	
-			if (fi->flags == GET_ZONE_SIZES) {
-	                	readmem(node_zones+zone_size_offset, 
-					KVADDR, &size, sizeof(ulong),
-	                        	"node_zones size", FAULT_ON_ERROR);
-	                        sum += size;
-	                        node_zones += SIZE(zone);
-	                        continue;
-			}
+}
 
-			if ((i == 0) && (vt->flags & NODES)) {
-				if (n) {
-					fprintf(fp, "\n");
-					pad_line(fp, 
-						VADDR_PRLEN > 8 ? 74 : 66, '-');
-					fprintf(fp, "\n");
-				}
-				fprintf(fp, "%sNODE\n %2d\n", 
-					n ? "\n" : "", nt->node_id);
-			}
 
-	                fprintf(fp, "%s%s  %s  START_PADDR  START_MAPNR\n",
-				i > 0 ? "\n" : "",
-	                        zone_hdr,
-	                        mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, 
-				    "MEM_MAP"));
-	
-			fprintf(fp, "%3d   ", i);
-	
-	        	readmem(node_zones+OFFSET(zone_name), KVADDR, 
-				&value, sizeof(void *), 
-				"node_zones name", FAULT_ON_ERROR);
-	                if (read_string(value, buf, BUFSIZE-1))
-	                	fprintf(fp, "%-9s ", buf);
-			else
-				fprintf(fp, "(unknown) ");
-	
-	        	readmem(node_zones+zone_size_offset, KVADDR, 
-				&size, sizeof(ulong), 
-				"node_zones size", FAULT_ON_ERROR);
-	                fprintf(fp, "%6ld  ", size);
-	
-	        	readmem(node_zones+OFFSET(zone_free_pages), 
-				KVADDR, &value, sizeof(ulong), 
-				"node_zones free_pages", FAULT_ON_ERROR);
-	
-	                fprintf(fp, "%6ld  ", value);
-	
-                        readmem(node_zones+OFFSET(zone_zone_mem_map),
-                                KVADDR, &zone_mem_map, sizeof(ulong),
-                                "node_zones zone_mem_map", FAULT_ON_ERROR);
+/*
+ *  Walk through the slab chain hanging off a kmem_cache_s structure,
+ *  gathering basic statistics.
+ *
+ *  TBD: Given a specified physical address, determine whether it's in this
+ *  slab chain, and whether it's in use or not.
+ */
 
-			readmem(node_zones+ OFFSET(zone_zone_start_pfn),
-                                KVADDR, &zone_start_pfn, sizeof(ulong),
-                                "node_zones zone_start_pfn", FAULT_ON_ERROR);
-                        zone_start_paddr = PTOB(zone_start_pfn);
+#define INSLAB(obj, si) \
+  ((ulong)((ulong)(obj) & ~(si->slabsize-1)) == si->s_mem)
 
-                        if (zone_mem_map) 
-                        	zone_start_mapnr = 
-					(zone_mem_map - nt->mem_map) / 
-						SIZE(page);
-                        else
-                                zone_start_mapnr = 0;
-	
-	                fprintf(fp, "%s  %s  %s\n",
-	                	mkstring(buf1, VADDR_PRLEN,
-	                            CENTER|LONG_HEX,MKSTR(zone_mem_map)),
-	                	mkstring(buf2, strlen("START_PADDR"),
-	                            CENTER|LONG_HEX|RJUST,
-					MKSTR(zone_start_paddr)),
-	                	mkstring(buf3, strlen("START_MAPNR"),
-	                            CENTER|LONG_DEC|RJUST,
-					MKSTR(zone_start_mapnr)));
-	
-			sum += value;
+static void
+do_slab_chain(int cmd, struct meminfo *si)
+{
+	ulong tmp, magic;
+	ulong kmem_slab_end;
+	char *kmem_slab_s_buf;
 
-			if (value)
-				found += dump_zone_free_area(node_zones+
-					OFFSET(zone_free_area), 
-					vt->nr_free_areas, verbose);
+	si->slabsize = (power(2, si->order) * PAGESIZE());
 
-			node_zones += SIZE(zone);
-		}
-	}
+	kmem_slab_end = si->cache + OFFSET(kmem_cache_s_c_offset);
 
-	hq_close();
+	switch (cmd)
+	{
+	case SLAB_GET_COUNTS:
+		si->slab = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_firstp));
 
-        if (fi->flags & (GET_FREE_PAGES|GET_ZONE_SIZES|GET_FREE_HIGHMEM_PAGES)){
-                fi->retval = sum;
-                return;
-        }
+		if (slab_data_saved(si))
+			return;
 
-	fprintf(fp, "\nnr_free_pages: %ld  ", sum);
-	if (sum == found)
-		fprintf(fp, "(verified)\n");
-	else
-		fprintf(fp, "(found %ld)\n", found);
+		si->num_slabs = si->inuse = 0;
 
-	if (!do_search)
-		return;
+		if (si->slab == kmem_slab_end)
+			return;
 
-        found = FALSE;
-        rewind(pc->tmpfile);
-        order = offset = 0;
-	last_node[0] = NULLCHAR;
-        last_zone[0] = NULLCHAR;
-        last_area[0] = NULLCHAR;
-        last_area_hdr[0] = NULLCHAR;
+		kmem_slab_s_buf = GETBUF(SIZE(kmem_slab_s));
 
+		do {
+			if (received_SIGINT()) {
+				FREEBUF(kmem_slab_s_buf);
+				restart(0);
+			}
 
-        while (fgets(buf, BUFSIZE, pc->tmpfile)) {
-		if (CRASHDEBUG(1) && STRNEQ(buf, "<readmem"))
-			continue;
+			readmem(si->slab, KVADDR, kmem_slab_s_buf,
+				SIZE(kmem_slab_s), "kmem_slab_s buffer",
+				FAULT_ON_ERROR);
 
-		if (STRNEQ(buf, "nr_free_pages:"))
-			continue;
+			magic = ULONG(kmem_slab_s_buf +
+				OFFSET(kmem_slab_s_s_magic));
 
-		if (STRNEQ(buf, "NODE")) { 
-			fgets(buf, BUFSIZE, pc->tmpfile);
-			strcpy(last_node, strip_linefeeds(buf));
-			continue;
-		}
-		if (STRNEQ(buf, "ZONE")) {
-			fgets(buf, BUFSIZE, pc->tmpfile);
-			strcpy(last_zone, strip_linefeeds(buf));
-			continue;
-		}
-		if (STRNEQ(buf, "AREA")) {
-                        strcpy(last_area_hdr, buf);
-                        fgets(buf, BUFSIZE, pc->tmpfile);
-                        strcpy(last_area, strip_linefeeds(buf));
-			p = strstr(buf, "k");
-			*p = NULLCHAR;
-			while (*p != ' ')
-				p--;
-			chunk_size = atol(p+1) * 1024;
-			if (chunk_size == PAGESIZE())
-				order = 0;
-			else
-				order++;
-                        continue;
-                }
+			if (magic == SLAB_MAGIC_ALLOC) {
+	
+				tmp = ULONG(kmem_slab_s_buf +
+					OFFSET(kmem_slab_s_s_inuse));
+	
+				si->inuse += tmp;
+				si->num_slabs++;
+			} else {
+				fprintf(fp, 
+			   	    "\"%s\" cache: invalid s_magic: %lx\n", 
+					si->curname, magic);
+				si->errors++;
+				FREEBUF(kmem_slab_s_buf);
+				return;
+			}
+	
+			si->slab = ULONG(kmem_slab_s_buf +
+				OFFSET(kmem_slab_s_s_nextp));
+	
+		} while (si->slab != kmem_slab_end);
+		
+		FREEBUF(kmem_slab_s_buf);
+		save_slab_data(si);
+		break;
 
-                if (CRASHDEBUG(0) &&
-                    !hexadecimal(strip_linefeeds(buf), 0)) 
-                        continue;
+	case SLAB_WALKTHROUGH:
+        	if (!si->slab)
+			si->slab = ULONG(si->cache_buf + 
+				OFFSET(kmem_cache_s_c_firstp));
 
-                errflag = 0;
-                this_addr = htol(strip_linefeeds(buf),
-                        RETURN_ON_ERROR, &errflag);
-                if (errflag)
-                        continue;
+		if (si->slab == kmem_slab_end)
+			return;
+
+		if (CRASHDEBUG(1)) {
+			fprintf(fp, "search cache: [%s] ", si->curname);
+			if (si->flags & ADDRESS_SPECIFIED) 
+				fprintf(fp, "for %llx", si->spec_addr);
+			fprintf(fp, "\n");
+		}
 
-                if (!page_to_phys(this_addr, &this_phys)) 
-                        continue;
+		si->slab_buf = kmem_slab_s_buf = GETBUF(SIZE(kmem_slab_s));
 
-                if ((searchphys >= this_phys) &&
-                    (searchphys < (this_phys+chunk_size))) {
-                        if (searchphys > this_phys)
-                                offset = (searchphys - this_phys)/PAGESIZE();
-                        found = TRUE;
-                        break;
-                }
+	        do {
+                        if (received_SIGINT()) {
+				FREEBUF(kmem_slab_s_buf);
+                                restart(0);
+			}
 
-	}
-	close_tmpfile();
+			readmem(si->slab, KVADDR, kmem_slab_s_buf,
+				SIZE(kmem_slab_s), "kmem_slab_s buffer",
+				FAULT_ON_ERROR);
 
-	if (found) {
-		if (strlen(last_node)) 
-			fprintf(fp, "NODE\n%s\n", last_node); 
-                fprintf(fp, "%s  %s  START_PADDR  START_MAPNR\n",
-                        zone_hdr,
-                        mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "MEM_MAP"));
-		fprintf(fp, "%s\n", last_zone);
-		fprintf(fp, last_area_hdr);
-		fprintf(fp, "%s\n", last_area);
-		fprintf(fp, "%lx  ", this_addr);
-                if (order) {
-                	switch (fi->memtype)
-                	{
-                	case KVADDR:
-                                fprintf(fp, "(%lx is ", (ulong)fi->spec_addr);
-                        	break;
-                	case PHYSADDR:
-                                fprintf(fp, "(%llx is %s", fi->spec_addr,
-                                    PAGEOFFSET(fi->spec_addr) ?  "in " : "");
-                        	break;
+	                dump_slab(si);
+	
+	                if (si->found) {
+				FREEBUF(kmem_slab_s_buf);
+	                        return;
 			}
-                        fprintf(fp, "%s of %ld pages) ",
-                                ordinal(offset+1, buf), power(2, order));
-                }
+	
+			si->slab = ULONG(kmem_slab_s_buf +
+				OFFSET(kmem_slab_s_s_nextp));
+	
+	        } while (si->slab != kmem_slab_end);
 
-		fi->retval = TRUE;
-                fprintf(fp, "\n");
+		FREEBUF(kmem_slab_s_buf);
+		break;
 	}
 }
 
 
-static char *
-page_usage_hdr = "ZONE  NAME        FREE   ACTIVE  INACTIVE_DIRTY  INACTIVE_CLEAN  MIN/LOW/HIGH";
-
 /*
- *  Display info about the non-free pages in each zone.
+ *  do_slab_chain() adapted for newer percpu slab format.
  */
-static int
-dump_zone_page_usage(void)
-{
-	int i, n;
-	ulong value, node_zones;
-	struct node_table *nt;
-	ulong inactive_dirty_pages, inactive_clean_pages, active_pages; 
-	ulong free_pages, pages_min, pages_low, pages_high;
-	char namebuf[BUFSIZE];
-	char buf1[BUFSIZE];
-	char buf2[BUFSIZE];
-	char buf3[BUFSIZE];
 
-	if (!VALID_MEMBER(zone_struct_inactive_dirty_pages) ||
-	    !VALID_MEMBER(zone_struct_inactive_clean_pages) ||
-	    !VALID_MEMBER(zone_struct_active_pages) ||
-	    !VALID_MEMBER(zone_struct_pages_min) ||
-	    !VALID_MEMBER(zone_struct_pages_low) ||
-	    !VALID_MEMBER(zone_struct_pages_high))
-		return FALSE;
+#define SLAB_BASE(X) (PTOB(BTOP(X)))
 
-	fprintf(fp, "\n");
+#define INSLAB_PERCPU(obj, si) \
+  ((ulong)((ulong)(obj) & ~(si->slabsize-1)) == SLAB_BASE(si->s_mem))
 
-        for (n = 0; n < vt->numnodes; n++) {
-                nt = &vt->node_table[n];
-                node_zones = nt->pgdat + OFFSET(pglist_data_node_zones);
-                
-		if ((i == 0) && (vt->flags & NODES)) {
-                	fprintf(fp, "%sNODE\n %2d\n",
-                        	n ? "\n" : "", nt->node_id);
-                }
-		fprintf(fp, "%s\n", page_usage_hdr);
+#define SLAB_CHAINS (3)
 
-                for (i = 0; i < vt->nr_zones; i++) {
-			readmem(node_zones+OFFSET(zone_struct_free_pages),
-                                KVADDR, &free_pages, sizeof(ulong),
-                                "node_zones free_pages", FAULT_ON_ERROR);
-		        readmem(node_zones+
-				OFFSET(zone_struct_inactive_dirty_pages),
-		                KVADDR, &inactive_dirty_pages, sizeof(ulong),
-		                "node_zones inactive_dirty_pages", 
-				FAULT_ON_ERROR);
-		        readmem(node_zones+
-				OFFSET(zone_struct_inactive_clean_pages),
-		                KVADDR, &inactive_clean_pages, sizeof(ulong),
-		                "node_zones inactive_clean_pages", 
-				FAULT_ON_ERROR);
-		        readmem(node_zones+OFFSET(zone_struct_active_pages),
-		                KVADDR, &active_pages, sizeof(ulong),
-		                "node_zones active_pages", FAULT_ON_ERROR);
-		        readmem(node_zones+OFFSET(zone_struct_pages_min),
-		                KVADDR, &pages_min, sizeof(ulong),
-		                "node_zones pages_min", FAULT_ON_ERROR);
-		        readmem(node_zones+OFFSET(zone_struct_pages_low),
-		                KVADDR, &pages_low, sizeof(ulong),
-		                "node_zones pages_low", FAULT_ON_ERROR);
-		        readmem(node_zones+OFFSET(zone_struct_pages_high),
-		                KVADDR, &pages_high, sizeof(ulong),
-		                "node_zones pages_high", FAULT_ON_ERROR);
+static char *slab_chain_name_v1[] = {"full", "partial", "free"};
 
-                        readmem(node_zones+OFFSET(zone_struct_name), KVADDR,
-                                &value, sizeof(void *),
-                                "node_zones name", FAULT_ON_ERROR);
-                        if (read_string(value, buf1, BUFSIZE-1))
-                                sprintf(namebuf, "%-8s", buf1);
-                        else
-                                sprintf(namebuf, "(unknown)");
+static void
+do_slab_chain_percpu_v1(long cmd, struct meminfo *si)
+{
+	int i, tmp, s;
+	int list_borked;
+	char *slab_s_buf;
+	ulong specified_slab;
+	ulong last;
+	ulong slab_chains[SLAB_CHAINS];
 
-		        sprintf(buf2, "%ld/%ld/%ld", 
-				pages_min, pages_low, pages_high);
-		        fprintf(fp, "%3d   %s %7ld  %7ld %15ld %15ld  %s\n",
-				i,
-				namebuf,
-		                free_pages,
-		                active_pages,
-		                inactive_dirty_pages,
-		                inactive_clean_pages,
-		                mkstring(buf3, strlen("MIN/LOW/HIGH"), 
-				CENTER, buf2));
+	list_borked = 0;
+	si->slabsize = (power(2, si->order) * PAGESIZE());
+	si->cpucached_slab = 0;
 
-			node_zones += SIZE(zone_struct);
-		}
+	if (VALID_MEMBER(kmem_cache_s_slabs)) {
+		slab_chains[0] = si->cache + OFFSET(kmem_cache_s_slabs);
+		slab_chains[1] = 0;
+		slab_chains[2] = 0;
+	} else {
+		slab_chains[0] = si->cache + OFFSET(kmem_cache_s_slabs_full);
+		slab_chains[1] = si->cache + OFFSET(kmem_cache_s_slabs_partial);
+		slab_chains[2] = si->cache + OFFSET(kmem_cache_s_slabs_free);
 	}
 
-	return TRUE;
-}
+	if (CRASHDEBUG(1)) {
+		fprintf(fp, "[ %s: %lx ", si->curname, si->cache);
+		fprintf(fp, "full: %lx partial: %lx free: %lx ]\n",
+			slab_chains[0], slab_chains[1], slab_chains[2]);
+	}
 
+	switch (cmd)
+	{
+	case SLAB_GET_COUNTS:
+		si->flags |= SLAB_GET_COUNTS;
+		si->flags &= ~SLAB_WALKTHROUGH;
+		si->cpucached_cache = 0;
+        	si->num_slabs = si->inuse = 0;
+		gather_cpudata_list_v1(si); 
 
-/*
- *  Dump the num "order" contents of the zone_t free_area array.
- */
-char *free_area_hdr3 = "AREA    SIZE  FREE_AREA_STRUCT\n";
-char *free_area_hdr4 = "AREA    SIZE  FREE_AREA_STRUCT  BLOCKS  PAGES\n";
+		slab_s_buf = GETBUF(SIZE(slab_s));
 
-static int
-dump_zone_free_area(ulong free_area, int num, ulong verbose)
-{
-	int i;
-	long chunk_size;
-	int flen, total_free, cnt;
-	char buf[BUFSIZE];
-	ulong free_area_buf[3];
-	struct list_data list_data, *ld;
+		for (s = 0; s < SLAB_CHAINS; s++) {
 
-	if (VALID_STRUCT(free_area_struct)) {
-		if (SIZE(free_area_struct) != (3 * sizeof(ulong)))
-			error(FATAL, 
-			    "unrecognized free_area_struct size: %ld\n", 
-				SIZE(free_area_struct));
-	} else if (VALID_STRUCT(free_area)) {
-                if (SIZE(free_area) != (3 * sizeof(ulong)))
-                        error(FATAL,
-                            "unrecognized free_area struct size: %ld\n",
-                                SIZE(free_area));
-	} else error(FATAL, 
-		"neither free_area_struct or free_area structures exist\n");
+			if (!slab_chains[s])
+				continue;
 
-	ld = &list_data;
+	                if (!readmem(slab_chains[s],
+	                    KVADDR, &si->slab, sizeof(ulong),
+	                    "first slab", QUIET|RETURN_ON_ERROR)) {
+                		error(INFO, 
+				    "%s: %s list: bad slab pointer: %lx\n",
+                        		si->curname, slab_chain_name_v1[s],
+					slab_chains[s]);
+				list_borked = 1;
+				continue;
+			}
+	
+			if (slab_data_saved(si)) {
+				FREEBUF(slab_s_buf);
+				return;
+			}
+	
+			if (si->slab == slab_chains[s]) 
+				continue;
+	
+			last = slab_chains[s];
+
+			do {
+	                        if (received_SIGINT()) {
+					FREEBUF(slab_s_buf);
+	                                restart(0);
+				}
+
+				if (!verify_slab_v1(si, last, s)) {
+					list_borked = 1;
+					continue;
+				}
+				last = si->slab - OFFSET(slab_s_list);
+	
+		                readmem(si->slab, KVADDR, slab_s_buf, 
+					SIZE(slab_s), "slab_s buffer", 
+					FAULT_ON_ERROR);
+	
+				tmp = INT(slab_s_buf + OFFSET(slab_s_inuse));
+				si->inuse += tmp;
+	
+				if (ACTIVE())
+					gather_cpudata_list_v1(si); 
+
+				si->s_mem = ULONG(slab_s_buf + 
+					OFFSET(slab_s_s_mem));
+				gather_slab_cached_count(si);
+	
+				si->num_slabs++;
+		
+				si->slab = ULONG(slab_s_buf + 
+					OFFSET(slab_s_list));
+				si->slab -= OFFSET(slab_s_list);
 
-	if (!verbose)
-		fprintf(fp, free_area_hdr4);
+				/*
+				 *  Check for slab transition. (Tony Dziedzic)
+				 */
+				for (i = 0; i < SLAB_CHAINS; i++) {
+     					if ((i != s) && 
+					    (si->slab == slab_chains[i])) {
+       						error(NOTE, 
+	  	                      "%s: slab chain inconsistency: %s list\n",
+							si->curname,
+							slab_chain_name_v1[s]);
+       						list_borked = 1;
+     					}
+				}
+		
+			} while (si->slab != slab_chains[s] && !list_borked);
+		}
 
-	total_free = 0;
-	flen = MAX(VADDR_PRLEN, strlen("FREE_AREA_STRUCT"));
+		FREEBUF(slab_s_buf);
+		if (!list_borked)
+			save_slab_data(si);
+		break;
 
-	for (i = 0; i < num; i++, 
-	     free_area += SIZE_OPTION(free_area_struct, free_area)) {
-		if (verbose)
-			fprintf(fp, free_area_hdr3);
-		fprintf(fp, "%3d ", i);
-		chunk_size = power(2, i);
-		sprintf(buf, "%ldk", (chunk_size * PAGESIZE())/1024);
-                fprintf(fp, " %7s  ", buf);
+	case SLAB_WALKTHROUGH:
+		specified_slab = si->slab;
+		si->flags |= SLAB_WALKTHROUGH;
+		si->flags &= ~SLAB_GET_COUNTS;
 
-                readmem(free_area, KVADDR, free_area_buf,
-                        sizeof(ulong) * 3, "free_area_struct", FAULT_ON_ERROR);
+		for (s = 0; s < SLAB_CHAINS; s++) {
+			if (!slab_chains[s])
+				continue;
 
-		fprintf(fp, "%s  ",
-			mkstring(buf, flen, CENTER|LONG_HEX, MKSTR(free_area)));
+	        	if (!specified_slab) {
+	                	if (!readmem(slab_chains[s],
+	                            KVADDR, &si->slab, sizeof(ulong),
+	                            "slabs", QUIET|RETURN_ON_ERROR)) {
+                			error(INFO, 
+				         "%s: %s list: bad slab pointer: %lx\n",
+                        			si->curname, 
+						slab_chain_name_v1[s],
+						slab_chains[s]);
+					list_borked = 1;
+					continue;
+				}
+				last = slab_chains[s];
+			} else
+				last = 0;
+	
+			if (si->slab == slab_chains[s])
+				continue;
 
-		if (free_area_buf[0] == free_area) {
-			if (verbose)
+			if (CRASHDEBUG(1)) {
+				fprintf(fp, "search cache: [%s] ", si->curname);
+				if (si->flags & ADDRESS_SPECIFIED) 
+					fprintf(fp, "for %llx", si->spec_addr);
 				fprintf(fp, "\n");
-			else
-				fprintf(fp, "%6d %6d\n", 0, 0);
-			continue;
-		}
+			}
 	
-		if (verbose)
-			fprintf(fp, "\n");
-
-                BZERO(ld, sizeof(struct list_data));
-                ld->flags = verbose | RETURN_ON_DUPLICATE;
-                ld->start = free_area_buf[0];
-                ld->end = free_area;
-		if (VALID_MEMBER(page_list_next))
-			ld->list_head_offset = OFFSET(page_list);
-        	else if (VALID_MEMBER(page_lru))
-			ld->list_head_offset = OFFSET(page_lru)+
-				OFFSET(list_head_next);
-		else error(FATAL, 
-			"neither page.list or page.lru exist?\n");
-
-                cnt = do_list(ld);
-		if (cnt < 0) 
-			error(FATAL, 
-			    "corrupted free list from free_area_struct: %lx\n", 
-				free_area);
+		        do {
+	                        if (received_SIGINT())
+	                                restart(0);
 
-		if (!verbose)
-			fprintf(fp, "%6d %6ld\n", cnt, cnt*chunk_size);
+				if (!verify_slab_v1(si, last, s)) {
+					list_borked = 1;
+					continue;
+				}
+				last = si->slab - OFFSET(slab_s_list);
+	
+		                dump_slab_percpu_v1(si);
+		
+		                if (si->found) {
+					return;
+				}
+		
+		                readmem(si->slab+OFFSET(slab_s_list),
+		                        KVADDR, &si->slab, sizeof(ulong),
+		                        "slab list", FAULT_ON_ERROR);
+		
+				si->slab -= OFFSET(slab_s_list);
+	
+		        } while (si->slab != slab_chains[s] && !list_borked);
+		}
 
-                total_free += (cnt * chunk_size);
+		break;
 	}
-
-	return total_free;
 }
 
 /*
- *  dump_kmeminfo displays basic memory use information typically shown 
- *  by /proc/meminfo, and then some...
+ *  Try to preclude any attempt to translate a bogus slab structure.
  */
 
-char *kmeminfo_hdr = "              PAGES        TOTAL      PERCENTAGE\n";
-
-static void
-dump_kmeminfo(void)
+static int
+verify_slab_v1(struct meminfo *si, ulong last, int s)
 {
-	ulong totalram_pages;
-	ulong freeram_pages;
-	ulong used_pages;
-	ulong shared_pages;
-	ulong buffer_pages;
-	ulong subtract_buffer_pages;
-	ulong totalswap_pages, totalused_pages;
-        ulong totalhigh_pages;
-        ulong freehighmem_pages;
-        ulong totallowmem_pages;
-        ulong freelowmem_pages;
-	ulong pct;
-	ulong value1, value2;
-	uint tmp;
-	struct meminfo meminfo;
-	struct gnu_request req;
-	long page_cache_size;
-        ulong get_totalram;
-        ulong get_buffers;
-        ulong get_slabs;
-        struct syment *sp_array[2];
-	char buf[BUFSIZE];
-
-
-	BZERO(&meminfo, sizeof(struct meminfo));
-	meminfo.flags = GET_ALL;
-	dump_mem_map(&meminfo);
-	get_totalram = meminfo.get_totalram;
-	shared_pages = meminfo.get_shared;
-	get_buffers = meminfo.get_buffers;
-	get_slabs = meminfo.get_slabs;
-
-	fprintf(fp, kmeminfo_hdr);
-	/*
-	 *  Get total RAM based upon how the various versions of si_meminfo()
-         *  have done it, latest to earliest:
-	 *
-         *    Prior to 2.3.36, count all mem_map pages minus the reserved ones.
-         *    From 2.3.36 onwards, use "totalram_pages" if set.
-	 */
-	if (symbol_exists("totalram_pages")) {  
-		totalram_pages = vt->totalram_pages ? 
-			vt->totalram_pages : get_totalram; 
-	} else 
-		totalram_pages = get_totalram;
-
-	fprintf(fp, "%10s  %7ld  %11s         ----\n", "TOTAL MEM", 
-		totalram_pages, pages_to_size(totalram_pages, buf));
-
-	/*
-	 *  Get free pages from dump_free_pages() or its associates.
-	 *  Used pages are a free-bee...
-	 */
-	meminfo.flags = GET_FREE_PAGES;
-	vt->dump_free_pages(&meminfo);
-	freeram_pages = meminfo.retval;
-        pct = (freeram_pages * 100)/totalram_pages;
-	fprintf(fp, "%10s  %7ld  %11s  %3ld%% of TOTAL MEM\n", 
-		"FREE", freeram_pages, pages_to_size(freeram_pages, buf), pct);
-
-	used_pages = totalram_pages - freeram_pages;
-        pct = (used_pages * 100)/totalram_pages;
-        fprintf(fp, "%10s  %7ld  %11s  %3ld%% of TOTAL MEM\n", 
-		"USED", used_pages, pages_to_size(used_pages, buf), pct);
-
-	/*
-	 *  Get shared pages from dump_mem_map().  Note that this is done
-         *  differently than the kernel -- it just tallies the non-reserved
-         *  pages that have a count of greater than 1.
-	 */
-        pct = (shared_pages * 100)/totalram_pages;
-        fprintf(fp, "%10s  %7ld  %11s  %3ld%% of TOTAL MEM\n", 
-		"SHARED", shared_pages, pages_to_size(shared_pages, buf), pct);
-
-	subtract_buffer_pages = 0;
-	if (symbol_exists("buffermem_pages")) { 
-                get_symbol_data("buffermem_pages", sizeof(int), &tmp);
-		buffer_pages = (ulong)tmp;
-	} else if (symbol_exists("buffermem")) {
-                get_symbol_data("buffermem", sizeof(int), &tmp);
-		buffer_pages = BTOP(tmp);
-	} else if ((THIS_KERNEL_VERSION >= LINUX(2,6,0)) && 
-		symbol_exists("nr_blockdev_pages")) {
-		subtract_buffer_pages = buffer_pages = nr_blockdev_pages();
-	} else
-		buffer_pages = 0;
-
-        pct = (buffer_pages * 100)/totalram_pages;
-        fprintf(fp, "%10s  %7ld  %11s  %3ld%% of TOTAL MEM\n", 
-		"BUFFERS", buffer_pages, pages_to_size(buffer_pages, buf), pct);
+	char slab_s_buf[BUFSIZE];
+	struct kernel_list_head *list_head;
+	unsigned int inuse;
+	ulong s_mem;
+	char *list;
+	int errcnt;
 
-	if (CRASHDEBUG(1)) 
-        	error(NOTE, "pages with buffers: %ld\n", get_buffers);
+	list = slab_chain_name_v1[s];
 
-	/*
-	 *  page_cache_size has evolved from a long to an atomic_t to
-	 *  not existing at all.
-	 */
-	
-	if (symbol_exists("page_cache_size")) {
-		get_symbol_type("page_cache_size", NULL, &req);
-        	if (req.length == sizeof(int)) {
-                	get_symbol_data("page_cache_size", sizeof(int), &tmp);
-                	page_cache_size = (long)tmp;
-        	} else
-                	get_symbol_data("page_cache_size", sizeof(long),
-                        	&page_cache_size);
-	} else if (symbol_exists("nr_pagecache")) {
-               	get_symbol_data("nr_pagecache", sizeof(int), &tmp);
-               	page_cache_size = (long)tmp;
-	}
+	errcnt = 0;
 
-	page_cache_size -= subtract_buffer_pages;
+        if (!readmem(si->slab, KVADDR, slab_s_buf,
+            SIZE(slab_s), "slab_s buffer", QUIET|RETURN_ON_ERROR)) {
+                error(INFO, "%s: %s list: bad slab pointer: %lx\n",
+                        si->curname, list, si->slab);
+		return FALSE;
+        }                        
 
-        pct = (page_cache_size * 100)/totalram_pages;
-        fprintf(fp, "%10s  %7ld  %11s  %3ld%% of TOTAL MEM\n", 
-		"CACHED", page_cache_size, 
-		pages_to_size(page_cache_size, buf), pct);
+        list_head = (struct kernel_list_head *)
+		(slab_s_buf + OFFSET(slab_s_list));
 
-	/*
- 	 *  Although /proc/meminfo doesn't show it, show how much memory
-	 *  the slabs take up.
-	 */
+	if (!IS_KVADDR((ulong)list_head->next) || 
+	    !accessible((ulong)list_head->next)) {
+                error(INFO, "%s: %s list: slab: %lx  bad next pointer: %lx\n",
+                        si->curname, list, si->slab,
+			(ulong)list_head->next);
+		errcnt++;
+	}
 
-        pct = (get_slabs * 100)/totalram_pages;
-	fprintf(fp, "%10s  %7ld  %11s  %3ld%% of TOTAL MEM\n",
-		"SLAB", get_slabs, pages_to_size(get_slabs, buf), pct);
+	if (last && (last != (ulong)list_head->prev)) {
+                error(INFO, "%s: %s list: slab: %lx  bad prev pointer: %lx\n",
+                        si->curname, list, si->slab,
+                        (ulong)list_head->prev);
+		errcnt++;
+	}
 
-        if (symbol_exists("totalhigh_pages")) {
-	        switch (get_syment_array("totalhigh_pages", sp_array, 2))
-	        {
-	        case 1:
-	                get_symbol_data("totalhigh_pages", sizeof(ulong),
-	                        &totalhigh_pages);
-	                break;
-	        case 2:
-	                if (!(readmem(sp_array[0]->value, KVADDR,
-	                    &value1, sizeof(ulong),
-	                    "totalhigh_pages #1", RETURN_ON_ERROR)))
-	                        break;
-	                if (!(readmem(sp_array[1]->value, KVADDR,
-	                    &value2, sizeof(ulong),
-	                    "totalhigh_pages #2", RETURN_ON_ERROR)))
-	                        break;
-	                totalhigh_pages = MAX(value1, value2);
-	                break;
-	        }
+	inuse = UINT(slab_s_buf + OFFSET(slab_s_inuse));
+	if (inuse > si->c_num) {
+                error(INFO, "%s: %s list: slab: %lx  bad inuse counter: %ld\n",
+                        si->curname, list, si->slab, inuse);
+		errcnt++;
+	}
 
-		pct = totalhigh_pages ?
-			(totalhigh_pages * 100)/totalram_pages : 0;
-                fprintf(fp, "\n%10s  %7ld  %11s  %3ld%% of TOTAL MEM\n", 
-			"TOTAL HIGH", totalhigh_pages, 
-			pages_to_size(totalhigh_pages, buf), pct);
+	if (!last)
+		goto no_inuse_check_v1;
 
-		meminfo.flags = GET_FREE_HIGHMEM_PAGES;
-                vt->dump_free_pages(&meminfo);
-		freehighmem_pages = meminfo.retval;
-        	pct = freehighmem_pages ?  
-			(freehighmem_pages * 100)/totalhigh_pages : 0;
-                fprintf(fp, "%10s  %7ld  %11s  %3ld%% of TOTAL HIGH\n", 
-			"FREE HIGH", freehighmem_pages, 
-			pages_to_size(freehighmem_pages, buf), pct);
+	switch (s) 
+	{
+	case 0: /* full -- but can be one singular list */
+                if (VALID_MEMBER(kmem_cache_s_slabs_full) && 
+		    (inuse != si->c_num)) {
+                        error(INFO,
+                            "%s: %s list: slab: %lx  bad inuse counter: %ld\n",
+                                si->curname, list, si->slab, inuse);
+                        errcnt++;
+                }
+		break;
 
-                totallowmem_pages = totalram_pages - totalhigh_pages;
-		pct = (totallowmem_pages * 100)/totalram_pages;
-                fprintf(fp, "%10s  %7ld  %11s  %3ld%% of TOTAL MEM\n", 
-			"TOTAL LOW", totallowmem_pages, 
-			pages_to_size(totallowmem_pages, buf), pct);
+	case 1: /* partial */
+		if ((inuse == 0) || (inuse == si->c_num)) {
+                	error(INFO, 
+		 	    "%s: %s list: slab: %lx  bad inuse counter: %ld\n",
+                        	si->curname,  list, si->slab, inuse);
+			errcnt++;
+		}
+		break;
 
-                freelowmem_pages = freeram_pages - freehighmem_pages;
-        	pct = (freelowmem_pages * 100)/totallowmem_pages;
-                fprintf(fp, "%10s  %7ld  %11s  %3ld%% of TOTAL LOW\n", 
-			"FREE LOW", freelowmem_pages, 
-			pages_to_size(freelowmem_pages, buf), pct);
-        }
+	case 2: /* free */
+		if (inuse > 0) {
+                	error(INFO, 
+		 	    "%s: %s list: slab: %lx  bad inuse counter: %ld\n",
+                        	si->curname, list, si->slab, inuse);
+			errcnt++;
+		}
+		break;
+	}
 
-        /*
-         *  get swap data from dump_swap_info().
-         */
-	fprintf(fp, "\n");
-        if (dump_swap_info(RETURN_ON_ERROR, &totalswap_pages, 
-	    &totalused_pages)) {
-	        fprintf(fp, "%10s  %7ld  %11s         ----\n", 
-			"TOTAL SWAP", totalswap_pages, 
-			pages_to_size(totalswap_pages, buf));
-	        pct = totalswap_pages ? (totalused_pages * 100) /
-			totalswap_pages : 100;
-	        fprintf(fp, "%10s  %7ld  %11s  %3ld%% of TOTAL SWAP\n",
-	                "SWAP USED", totalused_pages,
-	                pages_to_size(totalused_pages, buf), pct);
-	        pct = totalswap_pages ? ((totalswap_pages - totalused_pages) *
-			100) / totalswap_pages : 0;
-	        fprintf(fp, "%10s  %7ld  %11s  %3ld%% of TOTAL SWAP\n", 
-			"SWAP FREE",
-	                totalswap_pages - totalused_pages,
-	                pages_to_size(totalswap_pages - totalused_pages, buf), 
-			pct);
-	} else
-		error(INFO, "swap_info[%ld].swap_map at %lx is unaccessible\n",
-			totalused_pages, totalswap_pages);
+no_inuse_check_v1:
+	s_mem = ULONG(slab_s_buf + OFFSET(slab_s_s_mem));
+	if (!IS_KVADDR(s_mem) || !accessible(s_mem)) {
+                error(INFO, "%s: %s list: slab: %lx  bad s_mem pointer: %lx\n",
+                        si->curname, list, si->slab, s_mem);
+		errcnt++;
+	}
 
-	dump_zone_page_usage();
+	return(errcnt ? FALSE : TRUE);
 }
 
 /*
- *  Emulate 2.6 nr_blockdev_pages() function.
+ *  Updated for 2.6 slab substructure.
  */
-static ulong
-nr_blockdev_pages(void)
-{
-        struct list_data list_data, *ld;
-	ulong *bdevlist;
-	int i, bdevcnt;
-	ulong inode, address_space;
-	ulong nrpages;
-	char *block_device_buf, *inode_buf, *address_space_buf;
 
-	block_device_buf = GETBUF(SIZE(block_device));
-	inode_buf = GETBUF(SIZE(inode));
-	address_space_buf = GETBUF(SIZE(address_space));
+static char *slab_chain_name_v2[] = {"partial", "full", "free"};
 
-        ld = &list_data;
-        BZERO(ld, sizeof(struct list_data));
+static void
+do_slab_chain_percpu_v2(long cmd, struct meminfo *si)
+{
+	int i, tmp, s;
+	int list_borked;
+	char *slab_buf;
+	ulong specified_slab;
+	ulong last;
+	ulong slab_chains[SLAB_CHAINS];
 
-	get_symbol_data("all_bdevs", sizeof(void *), &ld->start);
-	ld->end = symbol_value("all_bdevs");
-        ld->list_head_offset = OFFSET(block_device_bd_list);
+	list_borked = 0;
+	si->slabsize = (power(2, si->order) * PAGESIZE());
+	si->cpucached_slab = 0;
 
-        hq_open();
-        bdevcnt = do_list(ld);
-        bdevlist = (ulong *)GETBUF(bdevcnt * sizeof(ulong));
-        bdevcnt = retrieve_list(bdevlist, bdevcnt);
-        hq_close();
+	slab_chains[0] = si->cache + OFFSET(kmem_cache_s_lists) +
+		OFFSET(kmem_list3_slabs_partial);
+	slab_chains[1] = si->cache + OFFSET(kmem_cache_s_lists) +
+                OFFSET(kmem_list3_slabs_full);
+        slab_chains[2] = si->cache + OFFSET(kmem_cache_s_lists) +
+                OFFSET(kmem_list3_slabs_free);
 
-	/*
-	 *  go through the block_device list, emulating:
-	 *
-	 *      ret += bdev->bd_inode->i_mapping->nrpages;
-	 */
-	for (i = nrpages = 0; i < bdevcnt; i++) {
-                readmem(bdevlist[i], KVADDR, block_device_buf, 
-			SIZE(block_device), "block_device buffer", 
-			FAULT_ON_ERROR);
-		inode = ULONG(block_device_buf + OFFSET(block_device_bd_inode));
-                readmem(inode, KVADDR, inode_buf, SIZE(inode), "inode buffer", 
-			FAULT_ON_ERROR);
-		address_space = ULONG(inode_buf + OFFSET(inode_i_mapping));
-                readmem(address_space, KVADDR, address_space_buf, 
-			SIZE(address_space), "address_space buffer", 
-			FAULT_ON_ERROR);
-		nrpages += ULONG(address_space_buf + 
-			OFFSET(address_space_nrpages));
-	}
+        if (CRASHDEBUG(1)) {
+                fprintf(fp, "[ %s: %lx ", si->curname, si->cache);
+                fprintf(fp, "partial: %lx full: %lx free: %lx ]\n",
+                        slab_chains[0], slab_chains[1], slab_chains[2]);
+        }
 
-	FREEBUF(bdevlist);
-	FREEBUF(block_device_buf);
-	FREEBUF(inode_buf);
-	FREEBUF(address_space_buf);
+	switch (cmd)
+	{
+	case SLAB_GET_COUNTS:
+		si->flags |= SLAB_GET_COUNTS;
+		si->flags &= ~SLAB_WALKTHROUGH;
+		si->cpucached_cache = 0;
+        	si->num_slabs = si->inuse = 0;
+		gather_cpudata_list_v2(si); 
 
-	return nrpages;
-} 
+		slab_buf = GETBUF(SIZE(slab));
 
-/*
- *  dump_vmlist() displays information from the vmlist.
- */
+		for (s = 0; s < SLAB_CHAINS; s++) {
+			if (!slab_chains[s])
+				continue;
 
-static void
-dump_vmlist(struct meminfo *vi)
-{
-	char buf[BUFSIZE];
-	char buf1[BUFSIZE];
-	char buf2[BUFSIZE];
-	ulong vmlist;
-	ulong addr, size, next, pcheck; 
-	physaddr_t paddr;
+	                if (!readmem(slab_chains[s],
+	                    KVADDR, &si->slab, sizeof(ulong),
+	                    "first slab", QUIET|RETURN_ON_ERROR)) {
+                                error(INFO, 
+				    "%s: %s list: bad slab pointer: %lx\n",
+                                        si->curname,
+					slab_chain_name_v2[s],
+                                        slab_chains[s]);
+				list_borked = 1;
+				continue;
+			}
+	
+			if (slab_data_saved(si)) {
+				FREEBUF(slab_buf);
+				return;
+			}
+	
+			if (si->slab == slab_chains[s]) 
+				continue;
+	
+			last = slab_chains[s];
+
+			do {
+	                        if (received_SIGINT()) {
+					FREEBUF(slab_buf);
+	                                restart(0);
+				}
 
-	get_symbol_data("vmlist", sizeof(void *), &vmlist);
-	next = vmlist;
+				if (!verify_slab_v2(si, last, s)) {
+					list_borked = 1;
+					continue;
+				}
+				last = si->slab - OFFSET(slab_list);
+	
+		                readmem(si->slab, KVADDR, slab_buf, 
+					SIZE(slab), "slab buffer", 
+					FAULT_ON_ERROR);
+	
+				tmp = INT(slab_buf + OFFSET(slab_inuse));
+				si->inuse += tmp;
+	
+				if (ACTIVE())
+					gather_cpudata_list_v2(si); 
 
-	while (next) {
-		if ((next == vmlist) && 
-		    !(vi->flags & (GET_HIGHEST|GET_PHYS_TO_VMALLOC))) {
-			fprintf(fp, "%s  ", 
-			    mkstring(buf, MAX(strlen("VM_STRUCT"), VADDR_PRLEN),
-			    	CENTER|LJUST, "VM_STRUCT"));
-			fprintf(fp, "%s    SIZE\n",
-			    mkstring(buf, (VADDR_PRLEN * 2) + strlen(" - "),
-				CENTER|LJUST, "ADDRESS RANGE"));
+				si->s_mem = ULONG(slab_buf + 
+					OFFSET(slab_s_mem));
+				gather_slab_cached_count(si);
+	
+				si->num_slabs++;
+		
+				si->slab = ULONG(slab_buf + 
+					OFFSET(slab_list));
+				si->slab -= OFFSET(slab_list);
+
+				/*
+				 *  Check for slab transition. (Tony Dziedzic)
+				 */
+				for (i = 0; i < SLAB_CHAINS; i++) {
+     					if ((i != s) && 
+					    (si->slab == slab_chains[i])) {
+       						error(NOTE, 
+	  	                      "%s: slab chain inconsistency: %s list\n",
+							si->curname,
+							slab_chain_name_v2[s]);
+       						list_borked = 1;
+     					}
+				}
+		
+			} while (si->slab != slab_chains[s] && !list_borked);
 		}
 
-                readmem(next+OFFSET(vm_struct_addr), KVADDR, 
-			&addr, sizeof(void *),
-                        "vmlist addr", FAULT_ON_ERROR);
-                readmem(next+OFFSET(vm_struct_size), KVADDR, 
-			&size, sizeof(ulong),
-                        "vmlist size", FAULT_ON_ERROR);
+		FREEBUF(slab_buf);
+		if (!list_borked)
+			save_slab_data(si);
+		break;
 
-		if (!(vi->flags & ADDRESS_SPECIFIED) || 
-		    ((vi->memtype == KVADDR) &&
-		    ((vi->spec_addr >= addr) && (vi->spec_addr < (addr+size)))))
-			fprintf(fp, "%s%s  %s - %s  %6ld\n",
-				mkstring(buf,VADDR_PRLEN, LONG_HEX|CENTER|LJUST,
-				MKSTR(next)), space(MINSPACE-1),
-				mkstring(buf1, VADDR_PRLEN, LONG_HEX|RJUST,
-				MKSTR(addr)),
-				mkstring(buf2, VADDR_PRLEN, LONG_HEX|LJUST,
-				MKSTR(addr+size)),
-				size);
+	case SLAB_WALKTHROUGH:
+		specified_slab = si->slab;
+		si->flags |= SLAB_WALKTHROUGH;
+		si->flags &= ~SLAB_GET_COUNTS;
 
-		if ((vi->flags & ADDRESS_SPECIFIED) && 
-		     (vi->memtype == PHYSADDR)) {
-			for (pcheck = addr; pcheck < (addr+size); 
-			     pcheck += PAGESIZE()) {
-				if (!kvtop(NULL, pcheck, &paddr, 0))
+		for (s = 0; s < SLAB_CHAINS; s++) {
+			if (!slab_chains[s])
+				continue;
+
+	        	if (!specified_slab) {
+	                	if (!readmem(slab_chains[s],
+	                            KVADDR, &si->slab, sizeof(ulong),
+	                            "slabs", QUIET|RETURN_ON_ERROR)) {
+                                        error(INFO,
+                                         "%s: %s list: bad slab pointer: %lx\n",
+                                                si->curname,
+						slab_chain_name_v2[s],
+                                                slab_chains[s]);
+					list_borked = 1;
 					continue;
-		    		if ((vi->spec_addr >= paddr) && 
-				    (vi->spec_addr < (paddr+PAGESIZE()))) {
-					if (vi->flags & GET_PHYS_TO_VMALLOC) {
-						vi->retval = pcheck +
-						    PAGEOFFSET(paddr);
-						return;
-				        } else
-						fprintf(fp,
-						"%s%s  %s - %s  %6ld\n",
-						mkstring(buf, VADDR_PRLEN,
-						LONG_HEX|CENTER|LJUST,
-						MKSTR(next)), space(MINSPACE-1),
-						mkstring(buf1, VADDR_PRLEN,
-						LONG_HEX|RJUST, MKSTR(addr)),
-						mkstring(buf2, VADDR_PRLEN,
-						LONG_HEX|LJUST,
-						MKSTR(addr+size)), size);
-					break;
 				}
+				last = slab_chains[s];
+			} else
+				last = 0;
+			
+			if (si->slab == slab_chains[s])
+				continue;
+	
+			if (CRASHDEBUG(1)) {
+				fprintf(fp, "search cache: [%s] ", si->curname);
+				if (si->flags & ADDRESS_SPECIFIED) 
+					fprintf(fp, "for %llx", si->spec_addr);
+				fprintf(fp, "\n");
 			}
+	
+		        do {
+	                        if (received_SIGINT())
+	                                restart(0);
+	
+                                if (!verify_slab_v2(si, last, s)) {
+                                        list_borked = 1;
+                                        continue;
+                                }
+                                last = si->slab - OFFSET(slab_list);
 
+		                dump_slab_percpu_v2(si);
+		
+		                if (si->found) {
+					return;
+				}
+		
+		                readmem(si->slab+OFFSET(slab_list),
+		                        KVADDR, &si->slab, sizeof(ulong),
+		                        "slab list", FAULT_ON_ERROR);
+		
+				si->slab -= OFFSET(slab_list);
+	
+		        } while (si->slab != slab_chains[s] && !list_borked);
 		}
 
-                readmem(next+OFFSET(vm_struct_next), 
-			KVADDR, &next, sizeof(void *),
-                        "vmlist next", FAULT_ON_ERROR);
+		break;
 	}
-
-	if (vi->flags & GET_HIGHEST)
-		vi->retval = addr+size;
 }
 
-/*
- *  dump_page_lists() displays information from the active_list,
- *  inactive_dirty_list and inactive_clean_list from each zone.
- */
-static int
-dump_page_lists(struct meminfo *mi)
-{
-	int i, c, n, retval;
-        ulong node_zones, pgdat;
-	struct node_table *nt;
-	struct list_data list_data, *ld;
-	char buf[BUFSIZE];
-	ulong value;
-	ulong inactive_clean_pages, inactive_clean_list;
-	int nr_active_pages, nr_inactive_pages;
-	int nr_inactive_dirty_pages;
-
-	ld = &list_data;
 
-	retval = FALSE;
-	nr_active_pages = nr_inactive_dirty_pages = -1;
+/* 
+* Added To  Traverse the Nodelists 
+*/
 
-	BZERO(ld, sizeof(struct list_data));
-	ld->list_head_offset = OFFSET(page_lru);
-	if (mi->flags & ADDRESS_SPECIFIED)
-		ld->searchfor = mi->spec_addr;
-	else if (mi->flags & VERBOSE)
-		ld->flags |= VERBOSE;
-	
-	if (mi->flags & GET_ACTIVE_LIST) {
-		if (!symbol_exists("active_list"))
-			error(FATAL, 
-			    "active_list does not exist in this kernel\n");
+static void
+do_slab_chain_percpu_v2_nodes(long cmd, struct meminfo *si)
+{
+	int i, tmp, s, node;
+	int list_borked;
+	char *slab_buf;
+	ulong specified_slab;
+	ulong last;
+	ulong slab_chains[SLAB_CHAINS];
+	ulong *start_address;
+	int index;
 
-		if (symbol_exists("nr_active_pages"))
-			get_symbol_data("nr_active_pages", sizeof(int), 
-				&nr_active_pages);
-		else
-			error(FATAL, 
-			    "nr_active_pages does not exist in this kernel\n");
+	list_borked = 0;
+	si->slabsize = (power(2, si->order) * PAGESIZE());
+	si->cpucached_slab = 0;
+	start_address = (ulong *)GETBUF(sizeof(ulong) * vt->kmem_cache_len_nodes);
 
-		ld->end = symbol_value("active_list");
-                readmem(ld->end, KVADDR, &ld->start, sizeof(void *),
-                	"LIST_HEAD contents", FAULT_ON_ERROR);
-		
-		if (mi->flags & VERBOSE)
-			fprintf(fp, "active_list:\n");
+	if (!readmem(si->cache+OFFSET(kmem_cache_s_lists), KVADDR, 
+            &start_address[0], sizeof(ulong) * vt->kmem_cache_len_nodes, 
+            "array nodelist array", RETURN_ON_ERROR)) 
+                    error(INFO, "cannot read kmem_cache nodelists array"); 
 
-                if (ld->start == ld->end) {
-                       c = 0;
-                       ld->searchfor = 0;
-                       if (mi->flags & VERBOSE)
-                               fprintf(fp, "(empty)\n");
-                } else {
-                	hq_open();
-                	c = do_list(ld);
-                	hq_close();
-		}
+	switch (cmd)
+	{
+	case SLAB_GET_COUNTS:
+		si->flags |= SLAB_GET_COUNTS;
+		si->flags &= ~SLAB_WALKTHROUGH;
+		si->cpucached_cache = 0;
+        	si->num_slabs = si->inuse = 0;
+		slab_buf = GETBUF(SIZE(slab));
+		for (index=0; (index < vt->kmem_cache_len_nodes) && start_address[index]; index++)
+		{ 
+			if (vt->flags & NODES_ONLINE) {
+				node = next_online_node(index);
+				if (node < 0)
+					break;
+				if (node != index)
+					continue;
+			}
 
-		if ((mi->flags & ADDRESS_SPECIFIED) && ld->searchfor) {
-			fprintf(fp, "%lx\n", ld->searchfor);
-			retval = TRUE;
-                } else {
-                        fprintf(fp, "%snr_active_pages: %d ", 
-				mi->flags & VERBOSE ? "\n" : "",
-                                nr_active_pages);
-                        if (c != nr_active_pages)
-                                fprintf(fp, "(found %d)\n", c);
-                        else
-                                fprintf(fp, "(verified)\n");
-		}
-	}
+			slab_chains[0] = start_address[index] + OFFSET(kmem_list3_slabs_partial);
+			slab_chains[1] = start_address[index] + OFFSET(kmem_list3_slabs_full);
+		        slab_chains[2] = start_address[index] + OFFSET(kmem_list3_slabs_free);
+			
+			gather_cpudata_list_v2_nodes(si, index); 
+	
+		        if (CRASHDEBUG(1)) {
+                		fprintf(fp, "[ %s: %lx ", si->curname, si->cache);
+	                	fprintf(fp, "partial: %lx full: %lx free: %lx ]\n",
+                        		slab_chains[0], slab_chains[1], slab_chains[2]);
+			}
 
-	if (mi->flags & GET_INACTIVE_LIST) {
-		if (!symbol_exists("inactive_list"))
-			error(FATAL, 
-			    "inactive_list does not exist in this kernel\n");
+			for (s = 0; s < SLAB_CHAINS; s++) {
+				if (!slab_chains[s])
+					continue;
+	
+		                if (!readmem(slab_chains[s],
+	        	            KVADDR, &si->slab, sizeof(ulong),
+	                	    "first slab", QUIET|RETURN_ON_ERROR)) {
+	                                error(INFO, 
+					    "%s: %s list: bad slab pointer: %lx\n",
+                	                        si->curname,
+						slab_chain_name_v2[s],
+                                	        slab_chains[s]);
+					list_borked = 1;
+					continue;
+				}
+	
+				if (slab_data_saved(si)) {
+					FREEBUF(slab_buf);
+					FREEBUF(start_address);
+					return;
+				}
+			
+				if (si->slab == slab_chains[s]) 
+					continue;
+	
+				last = slab_chains[s];
 
-		if (symbol_exists("nr_inactive_pages"))
-			get_symbol_data("nr_inactive_pages", sizeof(int), 
-				&nr_inactive_pages);
-		else
-			error(FATAL, 
-			    "nr_active_pages does not exist in this kernel\n");
+				do {
+	        	                if (received_SIGINT()) {
+						FREEBUF(slab_buf);
+						FREEBUF(start_address);
+	                        	        restart(0);
+					}
 
-		ld->end = symbol_value("inactive_list");
-                readmem(ld->end, KVADDR, &ld->start, sizeof(void *),
-                	"LIST_HEAD contents", FAULT_ON_ERROR);
+					if (!verify_slab_v2(si, last, s)) {
+						list_borked = 1;
+						continue;
+					}
+					last = si->slab - OFFSET(slab_list);
 		
-		if (mi->flags & VERBOSE)
-			fprintf(fp, "inactive_list:\n");
+		        	        readmem(si->slab, KVADDR, slab_buf, 
+						SIZE(slab), "slab buffer", 
+						FAULT_ON_ERROR);
+		
+					tmp = INT(slab_buf + OFFSET(slab_inuse));
+					si->inuse += tmp;
+	
+					if (ACTIVE())
+						gather_cpudata_list_v2_nodes(si, index); 
 
-                if (ld->start == ld->end) {
-                       c = 0;
-                       ld->searchfor = 0;
-                       if (mi->flags & VERBOSE)
-                               fprintf(fp, "(empty)\n");
-                } else {
-                	hq_open();
-                	c = do_list(ld);
-                	hq_close();
-		}
+					si->s_mem = ULONG(slab_buf + 
+						OFFSET(slab_s_mem));
+					gather_slab_cached_count(si);
+	
+					si->num_slabs++;
+		
+					si->slab = ULONG(slab_buf + 
+						OFFSET(slab_list));
+					si->slab -= OFFSET(slab_list);
 
-		if ((mi->flags & ADDRESS_SPECIFIED) && ld->searchfor) {
-			fprintf(fp, "%lx\n", ld->searchfor);
-			retval = TRUE;
-                } else {
-                        fprintf(fp, "%snr_inactive_pages: %d ", 
-				mi->flags & VERBOSE ? "\n" : "",
-                                nr_inactive_pages);
-                        if (c != nr_inactive_pages)
-                                fprintf(fp, "(found %d)\n", c);
-                        else
-                                fprintf(fp, "(verified)\n");
+				/*
+				 *  Check for slab transition. (Tony Dziedzic)
+				 */
+					for (i = 0; i < SLAB_CHAINS; i++) {
+     						if ((i != s) && 
+						    (si->slab == slab_chains[i])) {
+       							error(NOTE, 
+		  	                      "%s: slab chain inconsistency: %s list\n",
+								si->curname,
+								slab_chain_name_v2[s]);
+       							list_borked = 1;
+     						}
+					}
+			
+				} while (si->slab != slab_chains[s] && !list_borked);
+			}
 		}
-	}
 
-        if (mi->flags & GET_INACTIVE_DIRTY) {
-		if (!symbol_exists("inactive_dirty_list"))
-			error(FATAL, 
-		        "inactive_dirty_list does not exist in this kernel\n");
+		if (!list_borked)
+			save_slab_data(si);
+		break;
 
-                if (symbol_exists("nr_inactive_dirty_pages"))
-                        get_symbol_data("nr_inactive_dirty_pages", sizeof(int), 
-                                &nr_inactive_dirty_pages);
-		else
-			error(FATAL,
-                     "nr_inactive_dirty_pages does not exist in this kernel\n");
+	case SLAB_WALKTHROUGH:
+		specified_slab = si->slab;     
+		si->flags |= SLAB_WALKTHROUGH;
+		si->flags &= ~SLAB_GET_COUNTS;
+		slab_buf = GETBUF(SIZE(slab));
+		for (index=0; (index < vt->kmem_cache_len_nodes) && start_address[index]; index++)
+		{ 
+			if (vt->flags & NODES_ONLINE) {
+				node = next_online_node(index);
+				if (node < 0)
+					break;
+				if (node != index)
+					continue;
+			}
 
-		ld->end = symbol_value("inactive_dirty_list");
-                readmem(ld->end, KVADDR, &ld->start, sizeof(void *),
-                	"LIST_HEAD contents", FAULT_ON_ERROR);
+			slab_chains[0] = start_address[index] + OFFSET(kmem_list3_slabs_partial);
+			slab_chains[1] = start_address[index] + OFFSET(kmem_list3_slabs_full);
+		        slab_chains[2] = start_address[index] + OFFSET(kmem_list3_slabs_free);
+	
+			gather_cpudata_list_v2_nodes(si, index);
+ 
+		        if (CRASHDEBUG(1)) {
+                		fprintf(fp, "[ %s: %lx ", si->curname, si->cache);
+	                	fprintf(fp, "partial: %lx full: %lx free: %lx ]\n",
+                        		slab_chains[0], slab_chains[1], slab_chains[2]);
+			}
 
-		if (mi->flags & VERBOSE)
-			fprintf(fp, "%sinactive_dirty_list:\n",
-				mi->flags & GET_ACTIVE_LIST ? "\n" : "");
+			for (s = 0; s < SLAB_CHAINS; s++) {
+				if (!slab_chains[s])
+					continue;
 
-                if (ld->start == ld->end) {
-                       c = 0;
-                       ld->searchfor = 0;
-                       if (mi->flags & VERBOSE)
-                               fprintf(fp, "(empty)\n");
-                } else {
-			hq_open();
-        		c = do_list(ld);
-        		hq_close();
-		}
+	        	if (!specified_slab) {
+	                	if (!readmem(slab_chains[s],
+	       	                    KVADDR, &si->slab, sizeof(ulong),
+	               	            "slabs", QUIET|RETURN_ON_ERROR)) {
+                               	        error(INFO,
+	                                        "%s: %s list: bad slab pointer: %lx\n",
+                                                si->curname,
+						slab_chain_name_v2[s],
+                       	                        slab_chains[s]);
+						list_borked = 1;
+						continue;
+					}
+					last = slab_chains[s];
+				} else
+					last = 0;
+			
+				if (si->slab == slab_chains[s])
+					continue;
+				
+				readmem(si->slab, KVADDR, slab_buf, 
+						SIZE(slab), "slab buffer", 
+						FAULT_ON_ERROR);
+		
+				si->s_mem = ULONG(slab_buf + 
+						OFFSET(slab_s_mem));
 
-                if ((mi->flags & ADDRESS_SPECIFIED) && ld->searchfor) { 
-                        fprintf(fp, "%lx\n", ld->searchfor);
-			retval = TRUE;
-		} else {
-			fprintf(fp, "%snr_inactive_dirty_pages: %d ", 
-				mi->flags & VERBOSE ? "\n" : "",
-				nr_inactive_dirty_pages);
-        		if (c != nr_inactive_dirty_pages)
-                		fprintf(fp, "(found %d)\n", c);
-        		else
-                		fprintf(fp, "(verified)\n");
+				if (CRASHDEBUG(1)) {
+					fprintf(fp, "search cache: [%s] ", si->curname);
+					if (si->flags & ADDRESS_SPECIFIED) 
+						fprintf(fp, "for %llx", si->spec_addr);
+					fprintf(fp, "\n");
+				}
+	
+			        do {
+		                        if (received_SIGINT())
+					{
+						FREEBUF(start_address);
+						FREEBUF(slab_buf);
+	        	                        restart(0);
+					}
+	
+                        	        if (!verify_slab_v2(si, last, s)) {
+                                	        list_borked = 1;
+                                        	continue;
+	                                }
+        	                        last = si->slab - OFFSET(slab_list);
+	
+			                dump_slab_percpu_v2(si);
+					
+					if (si->found) {
+						FREEBUF(start_address);
+						FREEBUF(slab_buf);
+						return;
+					}
+		
+			                readmem(si->slab+OFFSET(slab_list),
+			                        KVADDR, &si->slab, sizeof(ulong),
+			                        "slab list", FAULT_ON_ERROR);
+			
+					si->slab -= OFFSET(slab_list);
+	
+			        } while (si->slab != slab_chains[s] && !list_borked);
+			}
 		}
-        }
 
-        if (mi->flags & GET_INACTIVE_CLEAN) {
-		if (INVALID_MEMBER(zone_struct_inactive_clean_list))
-			error(FATAL, 
-		        "inactive_clean_list(s) do not exist in this kernel\n");
-
-        	get_symbol_data("pgdat_list", sizeof(void *), &pgdat);
-
-                if ((mi->flags & VERBOSE) && 
-		    (mi->flags & (GET_ACTIVE_LIST|GET_INACTIVE_DIRTY)))
-			fprintf(fp, "\n");
-
-        	for (n = 0; pgdat; n++) {
-                	nt = &vt->node_table[n];
-
-                	node_zones = nt->pgdat + OFFSET(pglist_data_node_zones);
+		break;
+	}
+	FREEBUF(slab_buf);
+	FREEBUF(start_address);
+}
 
-                	for (i = 0; i < vt->nr_zones; i++) {
-                        	readmem(node_zones+OFFSET(zone_struct_name), 
-					KVADDR, &value, sizeof(void *),
-                                	"zone_struct name", FAULT_ON_ERROR);
-                        	if (!read_string(value, buf, BUFSIZE-1))
-                                	sprintf(buf, "(unknown) ");
+/*
+ *  Try to preclude any attempt to translate a bogus slab structure.
+ */
+static int
+verify_slab_v2(struct meminfo *si, ulong last, int s)
+{
+	char slab_buf[BUFSIZE];
+	struct kernel_list_head *list_head;
+	unsigned int inuse;
+	ulong s_mem;
+	char *list;
+	int errcnt;
 
-                		if (mi->flags & VERBOSE) {
-					if (vt->numnodes > 1)
-                        			fprintf(fp, "NODE %d ", n);
-                        		fprintf(fp, 
-				            "\"%s\" inactive_clean_list:\n", 
-						buf);
-				}
+	list = slab_chain_name_v2[s];
 
-				readmem(node_zones +
-				    OFFSET(zone_struct_inactive_clean_pages),
-                                    KVADDR, &inactive_clean_pages, 
-				    sizeof(ulong), "inactive_clean_pages", 
-				    FAULT_ON_ERROR);
+	errcnt = 0;
 
-                                readmem(node_zones +
-                                    OFFSET(zone_struct_inactive_clean_list),
-                                    KVADDR, &inactive_clean_list, 
-                                    sizeof(ulong), "inactive_clean_list", 
-                                    FAULT_ON_ERROR);
+        if (!readmem(si->slab, KVADDR, slab_buf,
+            SIZE(slab), "slab buffer", QUIET|RETURN_ON_ERROR)) {
+                error(INFO, "%s: %s list: bad slab pointer: %lx\n",
+                        si->curname, list, si->slab);
+		return FALSE;
+        }                        
 
-				ld->start = inactive_clean_list;
-				ld->end = node_zones +
-                                    OFFSET(zone_struct_inactive_clean_list);
-        			if (mi->flags & ADDRESS_SPECIFIED)
-                			ld->searchfor = mi->spec_addr;
+        list_head = (struct kernel_list_head *)(slab_buf + OFFSET(slab_list));
+	if (!IS_KVADDR((ulong)list_head->next) || 
+	    !accessible((ulong)list_head->next)) {
+                error(INFO, "%s: %s list: slab: %lx  bad next pointer: %lx\n",
+                        si->curname, list, si->slab,
+			(ulong)list_head->next);
+		errcnt++;
+	}
 
-				if (ld->start == ld->end) {
-					c = 0;
-					ld->searchfor = 0;
-					if (mi->flags & VERBOSE)
-						fprintf(fp, "(empty)\n");
-				} else {
-                			hq_open();
-                			c = do_list(ld);
-                			hq_close();
-				}
+	if (last && (last != (ulong)list_head->prev)) {
+                error(INFO, "%s: %s list: slab: %lx  bad prev pointer: %lx\n",
+                        si->curname, list, si->slab,
+                        (ulong)list_head->prev);
+		errcnt++;
+	}
 
-		                if ((mi->flags & ADDRESS_SPECIFIED) && 
-				    ld->searchfor) {
-		                        fprintf(fp, "%lx\n", ld->searchfor);
-		                        retval = TRUE;
-		                } else {
-					if (vt->numnodes > 1)
-						fprintf(fp, "NODE %d ", n);
-					fprintf(fp, "\"%s\" ", buf);
-		                        fprintf(fp, 
-					    "inactive_clean_pages: %ld ",
-		                                inactive_clean_pages);
-		                        if (c != inactive_clean_pages)
-		                                fprintf(fp, "(found %d)\n", c);
-		                        else
-		                                fprintf(fp, "(verified)\n");
-		                }
+	inuse = UINT(slab_buf + OFFSET(slab_inuse));
+	if (inuse > si->c_num) {
+                error(INFO, "%s: %s list: slab: %lx  bad inuse counter: %ld\n",
+                        si->curname, list, si->slab, inuse);
+		errcnt++;
+	}
 
-				node_zones += SIZE(zone_struct);
-			}
+	if (!last)
+		goto no_inuse_check_v2;
 
-                	readmem(pgdat + OFFSET_OPTION(pglist_data_node_next,
-				pglist_data_pgdat_next), KVADDR,
-                        	&pgdat, sizeof(void *), "pglist_data node_next",
-                        	FAULT_ON_ERROR);
+	switch (s) 
+	{
+	case 0: /* partial */
+                if ((inuse == 0) || (inuse == si->c_num)) {
+                	error(INFO, 
+		 	    "%s: %s list: slab: %lx  bad inuse counter: %ld\n",
+                        	si->curname, list, si->slab, inuse);
+			errcnt++;
 		}
-        }
+		break;
 
-	return retval;
-}
+	case 1: /* full */
+		if (inuse != si->c_num) {
+                	error(INFO, 
+		 	    "%s: %s list: slab: %lx  bad inuse counter: %ld\n",
+                        	si->curname, list, si->slab, inuse);
+			errcnt++;
+		}
+		break;
+
+	case 2: /* free */
+		if (inuse > 0) {
+                	error(INFO, 
+		 	    "%s: %s list: slab: %lx  bad inuse counter: %ld\n",
+                        	si->curname, list, si->slab, inuse);
+			errcnt++;
+		}
+		break;
+	}
 
+no_inuse_check_v2:
+	s_mem = ULONG(slab_buf + OFFSET(slab_s_mem));
+	if (!IS_KVADDR(s_mem) || !accessible(s_mem)) {
+                error(INFO, "%s: %s list: slab: %lx  bad s_mem pointer: %lx\n",
+                        si->curname, list, si->slab, s_mem);
+		errcnt++;
+	}
 
+	return(errcnt ? FALSE : TRUE);
+}
 
 /*
- *  Check whether an address is a kmem_cache_t address, and if so, return
- *  a pointer to the static buffer containing its name string.  Otherwise
- *  return NULL on failure.
+ *  If it's a dumpfile, save the essential slab data to avoid re-reading 
+ *  the whole slab chain more than once.  This may seem like overkill, but
+ *  if the problem is a memory leak, or just the over-use of the buffer_head
+ *  cache, it's painful to wait each time subsequent kmem -s or -i commands
+ *  simply need the basic slab counts.
  */
+struct slab_data {
+	ulong cache_addr;
+	int num_slabs;
+	int inuse;
+	ulong cpucached_cache;
+};
 
-#define PERCPU_NOT_SUPPORTED "per-cpu slab format not supported yet\n"
+#define NO_SLAB_DATA ((void *)(-1))
 
-static char * 
-is_kmem_cache_addr(ulong vaddr, char *kbuf)
+static void 
+save_slab_data(struct meminfo *si)
 {
-        ulong cache, cache_cache, name;
-	long next_offset, name_offset;
-	char *cache_buf;
+	int i;
 
-	if (vt->flags & KMEM_CACHE_UNAVAIL) {
-		error(INFO, "kmem cache slab subsystem not available\n");
-		return NULL;
+	if (si->flags & SLAB_DATA_NOSAVE) {
+		si->flags &= ~SLAB_DATA_NOSAVE;
+		return;
 	}
 
-        name_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ?
-                OFFSET(kmem_cache_s_name) : OFFSET(kmem_cache_s_c_name);
-        next_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ?
-                OFFSET(kmem_cache_s_next) : OFFSET(kmem_cache_s_c_nextp);
+	if (ACTIVE())
+		return;
 
-        cache = cache_cache = symbol_value("cache_cache");
+	if (vt->slab_data == NO_SLAB_DATA)
+		return;
 
-	cache_buf = GETBUF(SIZE(kmem_cache_s));
+	if (!vt->slab_data) {
+        	if (!(vt->slab_data = (struct slab_data *)
+            	    malloc(sizeof(struct slab_data) * vt->kmem_cache_count))) {
+                	error(INFO, "cannot malloc slab_data table");
+			vt->slab_data = NO_SLAB_DATA;
+			return;
+		}
+		for (i = 0; i < vt->kmem_cache_count; i++) {
+			vt->slab_data[i].cache_addr = (ulong)NO_SLAB_DATA;
+			vt->slab_data[i].num_slabs = 0;
+			vt->slab_data[i].inuse = 0;
+			vt->slab_data[i].cpucached_cache = 0;
+		}
+	}
 
-        do {
-	        readmem(cache, KVADDR, cache_buf, SIZE(kmem_cache_s),
-	        	"kmem_cache_s buffer", FAULT_ON_ERROR);
+	for (i = 0; i < vt->kmem_cache_count; i++) {
+		if (vt->slab_data[i].cache_addr == si->cache) 
+			break;
 
-		if (cache == vaddr) {
-	                if (vt->kmem_cache_namelen) {
-				BCOPY(cache_buf+name_offset, kbuf, 
-					vt->kmem_cache_namelen);
-	                } else {
-				name = ULONG(cache_buf + name_offset);
-	                        if (!read_string(name, kbuf, BUFSIZE-1)) {
-					if (vt->flags & 
-					  (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2))
-	                                	error(FATAL,
-	                      "cannot read kmem_cache_s.name string at %lx\n",
-	                                        	name);
-					else
-	                                	error(FATAL,
-	                      "cannot read kmem_cache_s.c_name string at %lx\n",
-	                                        	name);
-				}
-	                }
-			FREEBUF(cache_buf);
-			return kbuf;
+		if (vt->slab_data[i].cache_addr == (ulong)NO_SLAB_DATA) {
+			vt->slab_data[i].cache_addr = si->cache; 
+			vt->slab_data[i].num_slabs = si->num_slabs; 
+			vt->slab_data[i].inuse = si->inuse; 
+			vt->slab_data[i].cpucached_cache = si->cpucached_cache;
+			break;
 		}
+	}
+}
 
-		cache = ULONG(cache_buf + next_offset);
+static int 
+slab_data_saved(struct meminfo *si)
+{
+	int i;
 
-		if (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2))
-			cache -= next_offset;
+	if (ACTIVE() || !vt->slab_data || (vt->slab_data == NO_SLAB_DATA)) 
+		return FALSE;
 
-        } while (cache != cache_cache);
+	for (i = 0; i < vt->kmem_cache_count; i++) {
+		if (vt->slab_data[i].cache_addr == si->cache) {
+			si->inuse = vt->slab_data[i].inuse;
+			si->num_slabs = vt->slab_data[i].num_slabs;
+			si->cpucached_cache = vt->slab_data[i].cpucached_cache;
+			return TRUE;
+		}
+	}
 
-	FREEBUF(cache_buf);
-	return NULL;
+	return FALSE;
 }
 
-/*
- *  Note same functionality as above, but instead it just
- *  dumps all slab cache names and their addresses.
- */
 static void
-kmem_cache_list(void)
+dump_saved_slab_data(void)
 {
-        ulong cache, cache_cache, name;
-	long next_offset, name_offset;
-	char *cache_buf;
-	char buf[BUFSIZE];
+	int i;
 
-	if (vt->flags & KMEM_CACHE_UNAVAIL) {
-		error(INFO, "kmem cache slab subsystem not available\n");
+	if (!vt->slab_data || (vt->slab_data == NO_SLAB_DATA))
 		return;
-	}
-
-        name_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ?
-                OFFSET(kmem_cache_s_name) : OFFSET(kmem_cache_s_c_name);
-        next_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ?
-                OFFSET(kmem_cache_s_next) : OFFSET(kmem_cache_s_c_nextp);
 
-        cache = cache_cache = symbol_value("cache_cache");
+	for (i = 0; i < vt->kmem_cache_count; i++) {
+		if (vt->slab_data[i].cache_addr == (ulong)NO_SLAB_DATA)
+			break;
 
-	cache_buf = GETBUF(SIZE(kmem_cache_s));
+		fprintf(fp, 
+             "     cache: %lx inuse: %5d num_slabs: %3d cpucached_cache: %ld\n",
+			vt->slab_data[i].cache_addr,
+			vt->slab_data[i].inuse,
+			vt->slab_data[i].num_slabs,
+			vt->slab_data[i].cpucached_cache);
+	}
+}
 
-        do {
-	        readmem(cache, KVADDR, cache_buf, SIZE(kmem_cache_s),
-	        	"kmem_cache_s buffer", FAULT_ON_ERROR);
+/*
+ *  Dump the contents of a kmem slab.
+ */
 
-	        if (vt->kmem_cache_namelen) {
-			BCOPY(cache_buf+name_offset, buf, 
-				vt->kmem_cache_namelen);
-	        } else {
-			name = ULONG(cache_buf + name_offset);
-	                if (!read_string(name, buf, BUFSIZE-1)) {
-				if (vt->flags & 
-				    (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2))
-	                               	error(FATAL,
-	                      "cannot read kmem_cache_s.name string at %lx\n",
-	                                       	name);
-				else
-	                               	error(FATAL,
-	                      "cannot read kmem_cache_s.c_name string at %lx\n",
-	                                       	name);
-			}
-	        }
+static void
+dump_slab(struct meminfo *si)
+{
+	uint16_t s_offset;
 
-		fprintf(fp, "%lx %s\n", cache, buf);
+	si->s_mem = ULONG(si->slab_buf + OFFSET(kmem_slab_s_s_mem));
+	si->s_mem = PTOB(BTOP(si->s_mem));
 
-		cache = ULONG(cache_buf + next_offset);
+        if (si->flags & ADDRESS_SPECIFIED)  {
+                if (INSLAB(si->slab, si) && (si->spec_addr >= si->slab) &&
+                    (si->spec_addr < (si->slab+SIZE(kmem_slab_s)))) {
+                	si->found = KMEM_SLAB_ADDR;
+                        return;
+                }
+		if (INSLAB(si->spec_addr, si))
+			si->found = KMEM_ON_SLAB;  /* But don't return yet... */
+		else
+			return;
+        }
 
-		if (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2))
-			cache -= next_offset;
+	si->s_freep = VOID_PTR(si->slab_buf + OFFSET(kmem_slab_s_s_freep));
+	si->s_inuse = ULONG(si->slab_buf + OFFSET(kmem_slab_s_s_inuse));
+	si->s_index = ULONG_PTR(si->slab_buf + OFFSET(kmem_slab_s_s_index));
+	s_offset = USHORT(si->slab_buf + OFFSET(kmem_slab_s_s_offset));
 
-        } while (cache != cache_cache);
+	if (!(si->flags & ADDRESS_SPECIFIED)) {
+		fprintf(fp, slab_hdr);
+		DUMP_SLAB_INFO();
+	}
 
-	FREEBUF(cache_buf);
+	dump_slab_objects(si);
 }
 
 /*
- *  Translate an address to its physical page number, verify that the
- *  page in fact belongs to the slab subsystem, and if so, return the 
- *  name of the cache to which it belongs.
+ *  dump_slab() adapted for newer percpu slab format.
  */
-static char *
-vaddr_to_kmem_cache(ulong vaddr, char *buf)
+
+static void
+dump_slab_percpu_v1(struct meminfo *si)
 {
-	physaddr_t paddr;
-	ulong page;
-	ulong cache;
+	int tmp;
+
+        readmem(si->slab+OFFSET(slab_s_s_mem),
+                KVADDR, &si->s_mem, sizeof(ulong),
+                "s_mem", FAULT_ON_ERROR);
+
+	/*
+	 * Include the array of kmem_bufctl_t's appended to slab.
+	 */
+	tmp = SIZE(slab_s) + (SIZE(kmem_bufctl_t) * si->c_num);
+
+        if (si->flags & ADDRESS_SPECIFIED)  {
+                if (INSLAB_PERCPU(si->slab, si) && 
+		    (si->spec_addr >= si->slab) &&
+                    (si->spec_addr < (si->slab+tmp))) {
+			if (si->spec_addr >= (si->slab + SIZE(slab_s)))
+				si->found = KMEM_BUFCTL_ADDR;
+			else
+                		si->found = KMEM_SLAB_ADDR;
+                } else if (INSLAB_PERCPU(si->spec_addr, si))
+			si->found = KMEM_ON_SLAB;  /* But don't return yet... */
+		else
+			return;
+        }
 
-        if (!kvtop(NULL, vaddr, &paddr, 0)) {
-		error(WARNING, 
-		    "cannot make virtual-to-physical translation: %lx\n", 
-			vaddr);
-		return NULL;
-	}
+        readmem(si->slab+OFFSET(slab_s_inuse),
+                KVADDR, &tmp, sizeof(int),
+                "inuse", FAULT_ON_ERROR);
+	si->s_inuse = tmp;
 
-	if (!phys_to_page(paddr, &page)) {
-		error(WARNING, "cannot find mem_map page for address: %lx\n", 
-			vaddr);
-		return NULL;
-	}
+        readmem(si->slab+OFFSET(slab_s_free),
+                KVADDR, &si->free, SIZE(kmem_bufctl_t),
+                "kmem_bufctl_t", FAULT_ON_ERROR);
 
-	if (VALID_MEMBER(page_next))
-                readmem(page+OFFSET(page_next),
-                        KVADDR, &cache, sizeof(void *),
-                        "page.next", FAULT_ON_ERROR);
-	else if (VALID_MEMBER(page_list_next))
-                readmem(page+OFFSET(page_list_next),
-                        KVADDR, &cache, sizeof(void *),
-                        "page.list.next", FAULT_ON_ERROR);
-	else if (VALID_MEMBER(page_lru))
-                readmem(page+OFFSET(page_lru)+OFFSET(list_head_next),
-                        KVADDR, &cache, sizeof(void *),
-                        "page.lru.next", FAULT_ON_ERROR);
-	else
-		error(FATAL, "cannot determine slab cache from page struct\n");
+	gather_slab_free_list_percpu(si);
+	gather_slab_cached_count(si);
 
-	return(is_kmem_cache_addr(cache, buf)); 
+	if (!(si->flags & ADDRESS_SPECIFIED)) {
+		fprintf(fp, slab_hdr);
+		DUMP_SLAB_INFO();
+	}
+
+	dump_slab_objects_percpu(si);
 }
 
+
 /*
- *  Translate an address to its physical page number, verify that the
- *  page in fact belongs to the slab subsystem, and if so, return the
- *  address of the slab to which it belongs.
+ *  Updated for 2.6 slab substructure.
  */
-static ulong
-vaddr_to_slab(ulong vaddr)
+static void
+dump_slab_percpu_v2(struct meminfo *si)
 {
-        physaddr_t paddr;
-        ulong page;
-        ulong slab;
+	int tmp;
 
-        if (!kvtop(NULL, vaddr, &paddr, 0)) {
-                error(WARNING,
-                    "cannot make virtual-to-physical translation: %lx\n",
-                        vaddr);
-                return 0;
-        }
+        readmem(si->slab+OFFSET(slab_s_mem),
+                KVADDR, &si->s_mem, sizeof(ulong),
+                "s_mem", FAULT_ON_ERROR);
 
-        if (!phys_to_page(paddr, &page)) {
-                error(WARNING, "cannot find mem_map page for address: %lx\n",
-                        vaddr);
-                return 0;
+	/*
+	 * Include the array of kmem_bufctl_t's appended to slab.
+	 */
+	tmp = SIZE(slab) + (SIZE(kmem_bufctl_t) * si->c_num);
+
+        if (si->flags & ADDRESS_SPECIFIED)  {
+                if (INSLAB_PERCPU(si->slab, si) && 
+		    (si->spec_addr >= si->slab) &&
+                    (si->spec_addr < (si->slab+tmp))) {
+			if (si->spec_addr >= (si->slab + SIZE(slab)))
+				si->found = KMEM_BUFCTL_ADDR;
+			else
+                		si->found = KMEM_SLAB_ADDR;
+                } else if (INSLAB_PERCPU(si->spec_addr, si))
+			si->found = KMEM_ON_SLAB;  /* But don't return yet... */
+		else
+			return;
         }
 
-	slab = 0;
+        readmem(si->slab+OFFSET(slab_inuse),
+                KVADDR, &tmp, sizeof(int),
+                "inuse", FAULT_ON_ERROR);
+	si->s_inuse = tmp;
 
-        if (VALID_MEMBER(page_prev))
-                readmem(page+OFFSET(page_prev),
-                        KVADDR, &slab, sizeof(void *),
-                        "page.prev", FAULT_ON_ERROR);
-        else if (VALID_MEMBER(page_list_prev))
-                readmem(page+OFFSET(page_list_prev),
-                        KVADDR, &slab, sizeof(void *),
-                        "page.list.prev", FAULT_ON_ERROR);
-	else if (VALID_MEMBER(page_lru))
-                readmem(page+OFFSET(page_lru)+OFFSET(list_head_prev),
-                        KVADDR, &slab, sizeof(void *),
-                        "page.lru.prev", FAULT_ON_ERROR);
-        else
-                error(FATAL, "unknown definition of struct page?\n");
+        readmem(si->slab+OFFSET(slab_free),
+                KVADDR, &si->free, SIZE(kmem_bufctl_t),
+                "kmem_bufctl_t", FAULT_ON_ERROR);
 
-	return slab;
+	gather_slab_free_list_percpu(si);
+	gather_slab_cached_count(si);
+
+	if (!(si->flags & ADDRESS_SPECIFIED)) {
+		fprintf(fp, slab_hdr);
+		DUMP_SLAB_INFO();
+	}
+
+	dump_slab_objects_percpu(si);
 }
 
 
+
 /*
- *  Initialize any data required for scouring the kmalloc subsystem more
- *  efficiently.
+ *  Gather the free objects in a slab into the si->addrlist, checking for
+ *  specified addresses that are in-slab kmem_bufctls, and making error checks 
+ *  along the way.  Object address checks are deferred to dump_slab_objects().
  */
-char slab_hdr[BUFSIZE] = { 0 };
-char kmem_cache_hdr[BUFSIZE] = { 0 };
-char free_inuse_hdr[BUFSIZE] = { 0 };
+
+#define INOBJECT(addr, obj) ((addr >= obj) && (addr < (obj+si->size)))
 
 static void
-kmem_cache_init(void)
+gather_slab_free_list(struct meminfo *si)
 {
-	ulong cache, cache_end, max_cnum, max_limit, max_cpus, tmp, tmp2;
-	long cache_count, num_offset, next_offset;
-	char *cache_buf;
-
-	if (vt->flags & KMEM_CACHE_UNAVAIL)
-		return;
-
-	if (DUMPFILE() && (vt->flags & KMEM_CACHE_INIT))
-		return; 
-
-        if (!strlen(slab_hdr)) 
-                sprintf(slab_hdr, 
-		    "SLAB%sMEMORY%sTOTAL  ALLOCATED  FREE\n",
-                        space(VADDR_PRLEN > 8 ? 14 : 6),
-                        space(VADDR_PRLEN > 8 ? 12 : 4));
-
-	if (!strlen(kmem_cache_hdr)) 
-		sprintf(kmem_cache_hdr,
-     "CACHE%sNAME                 OBJSIZE  ALLOCATED     TOTAL  SLABS  SSIZE\n",
-			space(VADDR_PRLEN > 8 ? 12 : 4));
-
-	if (!strlen(free_inuse_hdr)) 
-		sprintf(free_inuse_hdr, "FREE / [ALLOCATED]\n");
+	ulong *next, obj;
+	ulong expected, cnt;
 
-	num_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? 
-		OFFSET(kmem_cache_s_num) : OFFSET(kmem_cache_s_c_num);
-	next_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ?
-		OFFSET(kmem_cache_s_next) : OFFSET(kmem_cache_s_c_nextp);
-        max_cnum = max_limit = max_cpus = cache_count = 0;
+	BNEG(si->addrlist, sizeof(ulong) * (si->c_num+1));
 
-	/*
-	 *  Pre-2.6 versions used the "cache_cache" as the head of the
-	 *  slab chain list.  2.6 uses the "cache_chain" list_head.
-	 */
-        if (vt->flags & PERCPU_KMALLOC_V2) {
-                get_symbol_data("cache_chain", sizeof(ulong), &cache);
-		cache -= next_offset;
-                cache_end = symbol_value("cache_chain");
-        } else
-                cache = cache_end = symbol_value("cache_cache");
+	if (!si->s_freep)
+		return;
 
-	cache_buf = GETBUF(SIZE(kmem_cache_s));
+	cnt = 0;
+	expected = si->c_num - si->s_inuse;
 
-        do {
-		cache_count++;
+	next = si->s_freep; 
+	do {
 
-                if (!readmem(cache, KVADDR, cache_buf, SIZE(kmem_cache_s),
-                        "kmem_cache_s buffer", RETURN_ON_ERROR)) {
-			vt->flags |= KMEM_CACHE_UNAVAIL;
+		if (cnt == si->c_num) {
 			error(INFO, 
-		          "unable to initialize kmem slab cache subsystem\n\n");
+		     "\"%s\" cache: too many objects found in slab free list\n",
+				si->curname);
+			si->errors++;
 			return;
 		}
 
-		tmp = (ulong)(UINT(cache_buf + num_offset));
-
-                if (tmp > max_cnum)
-                        max_cnum = tmp;
-
-		if ((tmp = max_cpudata_limit(cache, &tmp2)) > max_limit)
-			max_limit = tmp;
+		/*
+                 *  Off-slab kmem_bufctls are contained in arrays of object 
+		 *  pointers that point to:
+	         *    1. next kmem_bufctl (or NULL) if the object is free.
+	         *    2. to the object if it the object is in use.
+                 *
+	 	 *  On-slab kmem_bufctls resides just after the object itself,
+	         *  and point to:
+	         *    1. next kmem_bufctl (or NULL) if object is free.
+	         *    2. the containing slab if the object is in use.
+		 */
 
-		if (tmp2 > max_cpus)
-			max_cpus = tmp2;
+	        if (si->c_flags & SLAB_CFLGS_BUFCTL) 
+                	obj = si->s_mem + ((next - si->s_index) * si->c_offset);
+		else 
+			obj = (ulong)next - si->c_offset;
 
-		cache = ULONG(cache_buf + next_offset);
+		si->addrlist[cnt] = obj; 
 
-		switch (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) 
-		{
-		case PERCPU_KMALLOC_V1:
-			cache -= next_offset;
-			break;
-		case PERCPU_KMALLOC_V2:
-			if (cache != cache_end)
-				cache -= next_offset;
-			break;
+		if (si->flags & ADDRESS_SPECIFIED) {
+			if (INSLAB(next, si) && 
+		            (si->spec_addr >= (ulong)next) &&
+			    (si->spec_addr < (ulong)(next + 1))) {
+				si->found = KMEM_BUFCTL_ADDR;
+				return;
+			}
 		}
 
-        } while (cache != cache_end);
-
-	FREEBUF(cache_buf);
+		cnt++;
 
-	vt->kmem_max_c_num = max_cnum;
-	vt->kmem_max_limit = max_limit;
-	vt->kmem_max_cpus = max_cpus;
-	vt->kmem_cache_count = cache_count;
+		if (!INSLAB(obj, si)) {
+			error(INFO, 
+		       "\"%s\" cache: address not contained within slab: %lx\n",
+				si->curname, obj);
+			si->errors++;
+		}
 
-	if (CRASHDEBUG(2)) {
-		fprintf(fp, "kmem_cache_init:\n");
-		fprintf(fp, "  kmem_max_c_num: %ld\n", vt->kmem_max_c_num);
-		fprintf(fp, "  kmem_max_limit: %ld\n", vt->kmem_max_limit);
-		fprintf(fp, "  kmem_max_cpus: %ld\n", vt->kmem_max_cpus);
-		fprintf(fp, "  kmem_cache_count: %ld\n", vt->kmem_cache_count);
-	}
+        	readmem((ulong)next, KVADDR, &next, sizeof(void *),
+                	"s_freep chain entry", FAULT_ON_ERROR);
+	} while (next); 
 
-	if (!(vt->flags & KMEM_CACHE_INIT)) {
-		if (vt->flags & PERCPU_KMALLOC_V1)
-			ARRAY_LENGTH_INIT(vt->kmem_cache_namelen,
-				kmem_cache_s_name, "kmem_cache_s.name", 
-				NULL, sizeof(char));
-		else if (vt->flags & PERCPU_KMALLOC_V2)
-			vt->kmem_cache_namelen = 0;
-		else
-			ARRAY_LENGTH_INIT(vt->kmem_cache_namelen,
-				kmem_cache_s_c_name, "kmem_cache_s.c_name", 
-				NULL, 0);
+	if (cnt != expected) {
+		error(INFO, 
+	       "\"%s\" cache: free object mismatch: expected: %ld found: %ld\n",
+			si->curname, expected, cnt); 
+		si->errors++;
 	}
-
-	vt->flags |= KMEM_CACHE_INIT;
 }
 
+
 /*
- *  Determine the largest cpudata limit for a given cache.
+ *  gather_slab_free_list() adapted for newer percpu slab format.
  */
-static ulong
-max_cpudata_limit(ulong cache, ulong *cpus)
+
+#define BUFCTL_END 0xffffFFFF
+
+static void
+gather_slab_free_list_percpu(struct meminfo *si)
 {
 	int i;
-	ulong cpudata[NR_CPUS];
-	int limit; 
-	ulong max_limit;
-
-	if (vt->flags & PERCPU_KMALLOC_V2)
-		goto kmem_cache_s_array;
+	ulong obj;
+	ulong expected, cnt;
+	int free_index;
+	ulong kmembp;
+	short *kbp;
 
-        if (INVALID_MEMBER(kmem_cache_s_cpudata)) {
-                *cpus = 0;
-                return 0;
-        }
+	BNEG(si->addrlist, sizeof(ulong) * (si->c_num+1));
 
-	readmem(cache+OFFSET(kmem_cache_s_cpudata),
-        	KVADDR, &cpudata[0], 
-		sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_cpudata),
-                "cpudata array", FAULT_ON_ERROR);
+	if (CRASHDEBUG(1)) 
+		fprintf(fp, "slab: %lx si->s_inuse: %ld si->c_num: %ld\n", 
+			si->slab, si->s_inuse, si->c_num);
 
-	for (i = max_limit = 0; (i < ARRAY_LENGTH(kmem_cache_s_cpudata)) && 
-	     cpudata[i]; i++) {
-		readmem(cpudata[i]+OFFSET(cpucache_s_limit),
-        		KVADDR, &limit, sizeof(int),
-                	"cpucache limit", FAULT_ON_ERROR);
-		if (limit > max_limit)
-			max_limit = limit;
-	}
+	if (si->s_inuse == si->c_num )
+		return;
 
-	*cpus = i;
+	kmembp = si->slab + SIZE_OPTION(slab_s, slab);
+        readmem((ulong)kmembp, KVADDR, si->kmem_bufctl, 
+		SIZE(kmem_bufctl_t) * si->c_num,
+                "kmem_bufctl array", FAULT_ON_ERROR);
 
-	return max_limit;
+	if (CRASHDEBUG(1)) {
+		for (i = 0; (SIZE(kmem_bufctl_t) == sizeof(int)) && 
+		     (i < si->c_num); i++) 
+			fprintf(fp, "%d ", si->kmem_bufctl[i]);
 
-kmem_cache_s_array:
+		for (kbp = (short *)&si->kmem_bufctl[0], i = 0; 
+		     (SIZE(kmem_bufctl_t) == sizeof(short)) && (i < si->c_num);
+		     i++) 
+			fprintf(fp, "%d ", *(kbp + i));
 
-	readmem(cache+OFFSET(kmem_cache_s_array),
-        	KVADDR, &cpudata[0], 
-		sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_array),
-                "array cache array", FAULT_ON_ERROR);
+		fprintf(fp, "\n");
+	}
 
-	for (i = max_limit = 0; (i < ARRAY_LENGTH(kmem_cache_s_array)) && 
-	     cpudata[i]; i++) {
-                readmem(cpudata[i]+OFFSET(array_cache_limit),
-                        KVADDR, &limit, sizeof(int),
-                        "array cache limit", FAULT_ON_ERROR);
-                if (limit > max_limit)
-                        max_limit = limit;
-        }
+	cnt = 0;
+	expected = si->c_num - si->s_inuse;
 
-	*cpus = i;
-	return max_limit;
-}
+	if (SIZE(kmem_bufctl_t) == sizeof(int)) {
+		for (free_index = si->free; free_index != BUFCTL_END;
+		     free_index = si->kmem_bufctl[free_index]) {
+	
+	                if (cnt == si->c_num) {
+	                        error(INFO,
+                     "\"%s\" cache: too many objects found in slab free list\n",
+	                                si->curname);
+	                        si->errors++;
+	                        return;
+	                }
+	
+			obj = si->s_mem + (free_index*si->size);
+			si->addrlist[cnt] = obj; 
+			cnt++;
+		}
+	} else if (SIZE(kmem_bufctl_t) == sizeof(short)) {
+		kbp = (short *)&si->kmem_bufctl[0];
 
-/*
- *  Determine whether the current slab cache is contained in
- *  the comma-separated list from a "kmem -I list1,list2 ..."
- *  command entry.
- */
-static int
-ignore_cache(struct meminfo *si, char *name)
-{
-	int i, argc;
-	char *p1;
-	char *arglist[MAXARGS];
-	char buf[BUFSIZE];
+                for (free_index = si->free; free_index != BUFCTL_END;
+                     free_index = (int)*(kbp + free_index)) {
 
-	if (!si->ignore)
-		return FALSE;
+                        if (cnt == si->c_num) {
+                                error(INFO,
+                     "\"%s\" cache: too many objects found in slab free list\n",                                        si->curname);
+                                si->errors++;
+                                return;
+                        }
 
-	strcpy(buf, si->ignore);
+                        obj = si->s_mem + (free_index*si->size);
+                        si->addrlist[cnt] = obj;
+                        cnt++;
+                }
+	} else 
+		error(FATAL, 
+                "size of kmem_bufctl_t (%d) not sizeof(int) or sizeof(short)\n",
+			SIZE(kmem_bufctl_t));
 
-	p1 = buf;
-	while (*p1) {
-		if (*p1 == ',')
-			*p1 = ' ';
-		p1++;
+	if (cnt != expected) {
+		error(INFO, 
+	       "\"%s\" cache: free object mismatch: expected: %ld found: %ld\n",
+			si->curname, expected, cnt); 
+		si->errors++;
 	}
+}
 
-	argc = parse_line(buf, arglist);
 
-	for (i = 0; i < argc; i++) {
-		if (STREQ(name, arglist[i]))
-			return TRUE;
-	}
 
-	return FALSE;
-}
+/*
+ *  Dump the FREE, [ALLOCATED] and <CACHED> objects of a slab.
+ */  
 
+#define DUMP_SLAB_OBJECT() \
+        for (j = on_free_list = 0; j < si->c_num; j++) {	\
+                if (obj == si->addrlist[j]) {			\
+                        on_free_list = TRUE;			\
+                        break;					\
+                }						\
+        }							\
+								\
+        if (on_free_list) {					\
+                if (!(si->flags & ADDRESS_SPECIFIED))		\
+                        fprintf(fp, "   %lx\n", obj);		\
+                if (si->flags & ADDRESS_SPECIFIED) {		\
+                        if (INOBJECT(si->spec_addr, obj)) {	\
+                                si->found =			\
+                                    KMEM_OBJECT_ADDR_FREE;	\
+				si->container = obj;		\
+                                return;				\
+                        }					\
+                }						\
+        } else {						\
+                if (!(si->flags & ADDRESS_SPECIFIED))		\
+                        fprintf(fp, "  [%lx]\n", obj);		\
+                cnt++;						\
+                if (si->flags & ADDRESS_SPECIFIED) {		\
+                        if (INOBJECT(si->spec_addr, obj)) {	\
+                                si->found =			\
+                                    KMEM_OBJECT_ADDR_INUSE;	\
+				si->container = obj;		\
+                                return;				\
+                        }					\
+                }						\
+        }
 
-/*
- *  dump_kmem_cache() displays basic information about kmalloc() slabs.
- *  At this point, only kmem_cache_s structure data for each slab is dumped.
- *
- *  TBD: Given a specified physical address, and determine which slab it came
- *  from, and whether it's in use or not.
- */
+static void
+dump_slab_objects(struct meminfo *si)
+{
+	int i, j;
+	ulong *next;
+	int on_free_list; 
+	ulong cnt, expected;
+	ulong bufctl, obj;
 
-#define SLAB_C_MAGIC            0x4F17A36DUL
-#define SLAB_MAGIC_ALLOC        0xA5C32F2BUL    /* slab is alive */
-#define SLAB_MAGIC_DESTROYED    0xB2F23C5AUL    /* slab has been destroyed */
+	gather_slab_free_list(si);
 
-#define SLAB_CFLGS_BUFCTL       0x020000UL      /* bufctls in own cache */
+	if ((si->flags & ADDRESS_SPECIFIED) && (si->found & ~KMEM_ON_SLAB))
+		return;
 
-#define KMEM_SLAB_ADDR          (1)
-#define KMEM_BUFCTL_ADDR        (2)
-#define KMEM_OBJECT_ADDR_FREE   (3)
-#define KMEM_OBJECT_ADDR_INUSE  (4)
-#define KMEM_OBJECT_ADDR_CACHED (5)
-#define KMEM_ON_SLAB            (6)
+        cnt = 0;
+        expected = si->s_inuse;
+	si->container = 0;
 
-#define DUMP_KMEM_CACHE_INFO_V1() \
-      {  \
-	char b1[BUFSIZE]; \
-	fprintf(fp, "%s %-18s  %8ld  ", \
-		mkstring(b1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(si->cache)), \
-        	buf, si->size); \
-        fprintf(fp, "%9ld  %8ld  %5ld   %3ldk\n", \
-		vt->flags & PERCPU_KMALLOC_V1 ? \
-		si->inuse - si->cpucached_cache : \
-                si->inuse, si->num_slabs * si->c_num, \
-                si->num_slabs, si->slabsize/1024); \
-      }
+        if (CRASHDEBUG(1))
+                for (i = 0; i < si->c_num; i++) {
+                        fprintf(fp, "si->addrlist[%d]: %lx\n", 
+				i, si->addrlist[i]);
+                }
 
-#define DUMP_KMEM_CACHE_INFO_V2()  dump_kmem_cache_info_v2(si) 
+        if (!(si->flags & ADDRESS_SPECIFIED)) 
+		fprintf(fp, free_inuse_hdr);
 
-static void
-dump_kmem_cache_info_v2(struct meminfo *si)
-{
-	char b1[BUFSIZE];
-	char b2[BUFSIZE];
-	int namelen, sizelen, spacelen;
+        /* For on-slab bufctls, c_offset is the distance between the start of
+         * an obj and its related bufctl.  For off-slab bufctls, c_offset is
+         * the distance between objs in the slab.
+         */
 
-	fprintf(fp, "%s ",
-		mkstring(b1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(si->cache))); 
+        if (si->c_flags & SLAB_CFLGS_BUFCTL) {
+		for (i = 0, next = si->s_index; i < si->c_num; i++, next++) {
+                	obj = si->s_mem + 
+				((next - si->s_index) * si->c_offset);
+			DUMP_SLAB_OBJECT();
+		}
+	} else {
+		/*
+		 *  Get the "real" s_mem, i.e., without the offset stripped off.
+		 *  It contains the address of the first object.
+		 */
+        	readmem(si->slab+OFFSET(kmem_slab_s_s_mem),
+                	KVADDR, &obj, sizeof(ulong),
+                	"s_mem", FAULT_ON_ERROR);
 
-	namelen = strlen(si->curname);
-	sprintf(b2, "%ld", si->size);
-	sizelen = strlen(b2);
-	spacelen = 0;
+		for (i = 0; i < si->c_num; i++) {
+			DUMP_SLAB_OBJECT();
+
+                	if (si->flags & ADDRESS_SPECIFIED) {
+				bufctl = obj + si->c_offset;
+
+                        	if ((si->spec_addr >= bufctl) &&
+                                    (si->spec_addr < 
+				    (bufctl + SIZE(kmem_bufctl_t)))) {
+                                	si->found = KMEM_BUFCTL_ADDR;
+                                	return;
+                        	}
+                	}
 
-	if (namelen++ > 18) {
-		spacelen = 29 - namelen - sizelen;
-		fprintf(fp, "%s%s%ld  ", si->curname,
-			space(spacelen <= 0 ? 1 : spacelen), si->size); 
-		if (spacelen > 0)
-			spacelen = 1;
-		sprintf(b1, "%c%dld  ", '%', 9 + spacelen - 1);
-	} else {
-		fprintf(fp, "%-18s  %8ld  ", si->curname, si->size); 
-		sprintf(b1, "%c%dld  ", '%', 9);
+			obj += (si->c_offset + SIZE(kmem_bufctl_t));
+		}
 	}
 
-        fprintf(fp, b1, vt->flags & (PERCPU_KMALLOC_V2) ?
-                si->inuse - si->cpucached_cache : si->inuse); 
+        if (cnt != expected) {
+                error(INFO,
+              "\"%s\" cache: inuse object mismatch: expected: %ld found: %ld\n",
+                        si->curname, expected, cnt);
+                si->errors++;
+        }
 
-        fprintf(fp, "%8ld  %5ld   %3ldk\n",  
-		si->num_slabs * si->c_num, 
-                si->num_slabs, si->slabsize/1024); 
 }
 
-#define DUMP_SLAB_INFO() \
-      { \
-        char b1[BUFSIZE], b2[BUFSIZE]; \
-        ulong allocated, freeobjs; \
-        if (vt->flags & PERCPU_KMALLOC_V1) { \
-                allocated = si->s_inuse - si->cpucached_slab; \
-                freeobjs = si->c_num - allocated - si->cpucached_slab; \
-        } else { \
-                allocated = si->s_inuse; \
-                freeobjs = si->c_num - si->s_inuse; \
-        } \
-        fprintf(fp, "%s  %s  %5ld  %9ld  %4ld\n", \
-                mkstring(b1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(si->slab)), \
-                mkstring(b2, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(si->s_mem)), \
-                si->c_num, allocated, \
-                vt->flags & PERCPU_KMALLOC_V1 ? freeobjs + si->cpucached_slab :\
-                freeobjs); \
-      }
+
+/*
+ *  dump_slab_objects() adapted for newer percpu slab format.
+ */
 
 static void
-dump_kmem_cache(struct meminfo *si)
+dump_slab_objects_percpu(struct meminfo *si)
 {
-	char buf[BUFSIZE];
-	char kbuf[BUFSIZE];
-	char *reqname;
-	ulong cache_cache;
-	ulong name, magic;
-	int cnt;
-	char *p1;
-
-	if (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) 
-		error(FATAL, 
-		    "dump_kmem_cache called with PERCPU_KMALLOC_V[12] set\n");
-
-	si->found = si->retval = 0;
-	reqname = NULL;
+	int i, j;
+	int on_free_list, on_cpudata_list, on_shared_list; 
+	ulong cnt, expected;
+	ulong obj;
 
-	if ((!(si->flags & VERBOSE) || si->reqname) &&
-	     !(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES)))
-		fprintf(fp, kmem_cache_hdr);
+	if ((si->flags & ADDRESS_SPECIFIED) && (si->found & ~KMEM_ON_SLAB))
+		return;
 
-	si->addrlist = (ulong *)GETBUF((vt->kmem_max_c_num+1) * sizeof(ulong));
-	cnt = 0;
-	si->cache = cache_cache = symbol_value("cache_cache");
+        cnt = 0;
+        expected = si->s_inuse;
+	si->container = 0;
 
-	if (si->flags & ADDRESS_SPECIFIED) {
-	        if (!(p1 = vaddr_to_kmem_cache(si->spec_addr, kbuf))) {
-			error(INFO, 
-			   "address is not allocated in slab subsystem: %lx\n",
-				si->spec_addr);
-			return;
-		}
-		
-		if (si->reqname && (si->reqname != p1)) 
-			error(INFO, 
-			    "ignoring pre-selected %s cache for address: %lx\n",
-				si->reqname, si->spec_addr, si->reqname);
+        if (CRASHDEBUG(1))
+                for (i = 0; i < si->c_num; i++) {
+                        fprintf(fp, "si->addrlist[%d]: %lx\n", 
+				i, si->addrlist[i]);
+                }
 
-		reqname = p1;
-	} else
-		reqname = si->reqname;
+        if (!(si->flags & ADDRESS_SPECIFIED)) 
+		fprintf(fp, free_inuse_hdr);
 
-	si->cache_buf = GETBUF(SIZE(kmem_cache_s));
+	for (i = 0, obj = si->s_mem; i < si->c_num; i++, obj += si->size) {
+		on_free_list = FALSE;
+		on_cpudata_list = FALSE;
+		on_shared_list = FALSE;
 
-	do {
-		if ((si->flags & VERBOSE) && !si->reqname &&
-		    !(si->flags & ADDRESS_SPECIFIED))
-			fprintf(fp, "%s%s", cnt++ ? "\n" : "", kmem_cache_hdr);
+	        for (j = 0; j < si->c_num; j++) {        
+	                if (obj == si->addrlist[j]) {                   
+	                        on_free_list = TRUE;                    
+	                        break;                                  
+	                }                                               
+	        }                                                       
 
-                readmem(si->cache, KVADDR, si->cache_buf, SIZE(kmem_cache_s),
-                	"kmem_cache_s buffer", FAULT_ON_ERROR);
+		on_cpudata_list = check_cpudata_list(si, obj);
+		on_shared_list = check_shared_list(si, obj);
 
-		if (vt->kmem_cache_namelen) {
-			BCOPY(si->cache_buf + OFFSET(kmem_cache_s_c_name),
-				buf, vt->kmem_cache_namelen);
-		} else {
-			name = ULONG(si->cache_buf + 
-				OFFSET(kmem_cache_s_c_name));
-                	if (!read_string(name, buf, BUFSIZE-1))
-				error(FATAL, 
-			      "cannot read kmem_cache_s.c_name string at %lx\n",
-					name);
+		if (on_free_list && on_cpudata_list) {
+			error(INFO, 
+		    "\"%s\" cache: object %lx on both free and cpu %d lists\n",
+				si->curname, obj, si->cpu);
+			si->errors++;
 		}
-
-		if (reqname && !STREQ(reqname, buf)) 
-			goto next_cache;
-
-		if (ignore_cache(si, buf)) {
-			fprintf(fp, "%lx %-18s [IGNORED]\n", si->cache, buf);
-			goto next_cache;
+		if (on_free_list && on_shared_list) {
+			error(INFO, 
+		    "\"%s\" cache: object %lx on both free and shared lists\n",
+				si->curname, obj);
+			si->errors++;
+		}
+		if (on_cpudata_list && on_shared_list) {
+			error(INFO, 
+		    "\"%s\" cache: object %lx on both cpu %d and shared lists\n",
+				si->curname, obj, si->cpu);
+			si->errors++;
 		}
+	                                                               
+	        if (on_free_list) {                                     
+	                if (!(si->flags & ADDRESS_SPECIFIED))           
+	                        fprintf(fp, "   %lx\n", obj);           
+	                if (si->flags & ADDRESS_SPECIFIED) {            
+	                        if (INOBJECT(si->spec_addr, obj)) {     
+	                                si->found =                     
+	                                    KMEM_OBJECT_ADDR_FREE;      
+					si->container = obj;
+	                                return;                         
+	                        }                                       
+	                }                                               
+		} else if (on_cpudata_list) {
+                        if (!(si->flags & ADDRESS_SPECIFIED))
+                                fprintf(fp, "   %lx  (cpu %d cache)\n", obj,
+					si->cpu);
+                        cnt++;    
+                        if (si->flags & ADDRESS_SPECIFIED) {
+                                if (INOBJECT(si->spec_addr, obj)) {
+                                        si->found =
+                                            KMEM_OBJECT_ADDR_CACHED;
+					si->container = obj;
+                                        return;
+                                } 
+                        }
+		} else if (on_shared_list) {
+                        if (!(si->flags & ADDRESS_SPECIFIED))
+                                fprintf(fp, "   %lx  (shared cache)\n", obj);
+			cnt++;
+                        if (si->flags & ADDRESS_SPECIFIED) {
+                                if (INOBJECT(si->spec_addr, obj)) {
+                                        si->found =
+                                            KMEM_OBJECT_ADDR_SHARED;
+					si->container = obj;
+                                        return;
+                                } 
+			}
+	        } else {                                                
+	                if (!(si->flags & ADDRESS_SPECIFIED))           
+	                        fprintf(fp, "  [%lx]\n", obj);          
+	                cnt++;                                          
+	                if (si->flags & ADDRESS_SPECIFIED) {            
+	                        if (INOBJECT(si->spec_addr, obj)) {     
+	                                si->found =                     
+	                                    KMEM_OBJECT_ADDR_INUSE;     
+					si->container = obj;
+	                                return;                         
+	                        }                                       
+	                }                                               
+	        }
+	}
 
-		si->curname = buf;
+        if (cnt != expected) {
+                error(INFO,
+              "\"%s\" cache: inuse object mismatch: expected: %ld found: %ld\n",
+                        si->curname, expected, cnt);
+                si->errors++;
+        }
+}
 
-		if (CRASHDEBUG(1))
-			fprintf(fp, "cache: %lx %s\n", si->cache, si->curname);
-		console("cache: %lx %s\n", si->cache, si->curname);
+/*
+ *  Determine how many of the "inuse" slab objects are actually cached
+ *  in the kmem_cache_s header.  Set the per-slab count and update the 
+ *  cumulative per-cache count.  With the addition of the shared list
+ *  check, the terms "cpucached_cache" and "cpucached_slab" are somewhat
+ *  misleading.  But they both are types of objects that are cached
+ *  in the kmem_cache_s header, just not necessarily per-cpu.
+ */
 
-		magic = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_magic));
+static void
+gather_slab_cached_count(struct meminfo *si)
+{
+	int i;
+	ulong obj;
+	int in_cpudata, in_shared;
 
-		if (magic == SLAB_C_MAGIC) {
+	si->cpucached_slab = 0;
 
-			si->size = ULONG(si->cache_buf + 
-				OFFSET(kmem_cache_s_c_org_size));
-			if (!si->size) {
-				if (STREQ(si->curname, "kmem_cache"))
-					si->size = SIZE(kmem_cache_s);
-				else {
-					error(INFO, 
-					    "\"%s\" cache: c_org_size: %ld\n",
-						si->curname, si->size);
-					si->errors++;
-				}
+        for (i = 0, obj = si->s_mem; i < si->c_num; i++, obj += si->size) {
+		in_cpudata = in_shared = 0;
+		if (check_cpudata_list(si, obj)) {
+			in_cpudata = TRUE;
+			si->cpucached_slab++;
+			if (si->flags & SLAB_GET_COUNTS) {
+				si->cpucached_cache++;
 			}
-			si->c_flags = ULONG(si->cache_buf +
-				OFFSET(kmem_cache_s_c_flags));
-			si->c_offset = ULONG(si->cache_buf + 
-				OFFSET(kmem_cache_s_c_offset));
-			si->order = ULONG(si->cache_buf + 
-				OFFSET(kmem_cache_s_c_gfporder));
-			si->c_num = ULONG(si->cache_buf +
-				OFFSET(kmem_cache_s_c_num));
-
-			do_slab_chain(SLAB_GET_COUNTS, si);
-
-			if (!(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) 
-				DUMP_KMEM_CACHE_INFO_V1();
-
-			if (si->flags == GET_SLAB_PAGES) 
-				si->retval += (si->num_slabs * 
-				    	(si->slabsize/PAGESIZE()));
+		}
+                if (check_shared_list(si, obj)) {
+			in_shared = TRUE;
+			if (!in_cpudata) {
+                        	si->cpucached_slab++;
+                        	if (si->flags & SLAB_GET_COUNTS) {
+                                	si->cpucached_cache++;
+                        	}
+			}
+                }
+		if (in_cpudata && in_shared) {
+			si->flags |= SLAB_DATA_NOSAVE;
+			if (!(si->flags & VERBOSE))
+				error(INFO, 
+		    "\"%s\" cache: object %lx on both cpu %d and shared lists\n",
+				si->curname, obj, si->cpu);
+		}
+	}
+}
 
-			if (si->flags & (VERBOSE|ADDRESS_SPECIFIED)) {
-				si->slab = (si->flags & ADDRESS_SPECIFIED) ?
-					vaddr_to_slab(si->spec_addr) : 0;
-			
-				do_slab_chain(SLAB_WALKTHROUGH, si);
+/*
+ *  Populate the percpu object list for a given slab.
+ */
 
-				if (si->found) {
-					fprintf(fp, kmem_cache_hdr);
-					DUMP_KMEM_CACHE_INFO_V1();
-					fprintf(fp, slab_hdr);
-					DUMP_SLAB_INFO();
+static void
+gather_cpudata_list_v1(struct meminfo *si)
+{
+        int i, j;
+	int avail;
+        ulong cpudata[NR_CPUS];
 
-					switch (si->found)
-					{
-					case KMEM_BUFCTL_ADDR:
-						fprintf(fp, "   %lx ", 
-							(ulong)si->spec_addr);
-						fprintf(fp, 
-						   "(ON-SLAB kmem_bufctl_t)\n");
-						break;
+        if (INVALID_MEMBER(kmem_cache_s_cpudata))
+                return;
 
-					case KMEM_SLAB_ADDR:
-						fprintf(fp, "   %lx ", 
-							(ulong)si->spec_addr);
-						fprintf(fp,
-					            "(ON-SLAB kmem_slab_t)\n");
-						break;
+        readmem(si->cache+OFFSET(kmem_cache_s_cpudata),
+                KVADDR, &cpudata[0], 
+		sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_cpudata),
+                "cpudata array", FAULT_ON_ERROR);
 
-					case KMEM_ON_SLAB:
-						fprintf(fp, "   %lx ", 
-							(ulong)si->spec_addr);
-						fprintf(fp, 
-						    "(unused part of slab)\n");
-						break;
-						
-					case KMEM_OBJECT_ADDR_FREE:
-                                                fprintf(fp, free_inuse_hdr);
-						fprintf(fp, "   %lx\n", 
-                                                        (ulong)si->spec_addr);
-						break;
+        for (i = 0; (i < ARRAY_LENGTH(kmem_cache_s_cpudata)) && 
+	     cpudata[i]; i++) {
+		BZERO(si->cpudata[i], sizeof(ulong) * vt->kmem_max_limit);
 
-                                        case KMEM_OBJECT_ADDR_INUSE:
-                                                fprintf(fp, free_inuse_hdr);
-                                                fprintf(fp, "  [%lx]\n",
-                                                        (ulong)si->spec_addr);
-                                                break;
-					}
+                readmem(cpudata[i]+OFFSET(cpucache_s_avail),
+                        KVADDR, &avail, sizeof(int),
+                        "cpucache avail", FAULT_ON_ERROR);
 
-					break;
-				}
-			}
+		if (!avail) 
+			continue;
 
-		} else {
-			error(INFO, "\"%s\" cache: invalid c_magic: %lx\n", 
-				si->curname, magic);
+		if (avail > vt->kmem_max_limit) {
+			error(INFO, 
+	  	  "\"%s\" cache: cpucache_s.avail %d greater than limit %ld\n",
+				si->curname, avail, vt->kmem_max_limit);
 			si->errors++;
 		}
 
-next_cache:
-		si->cache = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_nextp));
-
-	} while (si->cache != cache_cache);
-
-	FREEBUF(si->cache_buf);
+		if (CRASHDEBUG(2))
+			fprintf(fp, "%s: cpu[%d] avail: %d\n", 
+				si->curname, i, avail);
 
-        if ((si->flags & ADDRESS_SPECIFIED) && !si->found)
-		error(INFO, "%s: address not found in cache: %lx\n", 
-			reqname, si->spec_addr);
- 
-	if (si->errors)
-		error(INFO, "%ld error%s encountered\n", 
-			si->errors, si->errors > 1 ? "s" : "");
+                readmem(cpudata[i]+SIZE(cpucache_s),
+                        KVADDR, si->cpudata[i],
+			sizeof(void *) * avail,
+                        "cpucache avail", FAULT_ON_ERROR);
 
-	FREEBUF(si->addrlist);
+		if (CRASHDEBUG(2))
+			for (j = 0; j < avail; j++)
+				fprintf(fp, "  %lx\n", si->cpudata[i][j]);
+        }
 }
 
 /*
- *  dump_kmem_cache() adapted for newer percpu slab format.
+ *  Updated for 2.6 slab percpu data structure, this also gathers
+ *  the shared array_cache list as well.
  */
-
 static void
-dump_kmem_cache_percpu_v1(struct meminfo *si)
+gather_cpudata_list_v2(struct meminfo *si)
 {
-	int i;
-	char buf[BUFSIZE];
-	char kbuf[BUFSIZE];
-	char *reqname;
-	ulong cache_cache;
-	ulong name;
-	int cnt;
-	uint tmp_val;  /* Used as temporary variable to read sizeof(int) and 
-			assigned to ulong variable. We are doing this to mask
-			the endian issue */
-	char *p1;
-
-        if (!(vt->flags & PERCPU_KMALLOC_V1)) 
-                error(FATAL, 
-                   "dump_kmem_cache_percpu called without PERCPU_KMALLOC_V1\n");
+        int i, j;
+	int avail;
+        ulong cpudata[NR_CPUS];
+	ulong shared;
 
-	si->found = si->retval = 0;
-	reqname = NULL;
+        readmem(si->cache+OFFSET(kmem_cache_s_array),
+                KVADDR, &cpudata[0], 
+		sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_array),
+                "array_cache array", FAULT_ON_ERROR);
 
-	if ((!(si->flags & VERBOSE) || si->reqname) &&
-	     !(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES)))
-		fprintf(fp, kmem_cache_hdr);
+        for (i = 0; (i < ARRAY_LENGTH(kmem_cache_s_array)) && 
+	     cpudata[i]; i++) {
+		BZERO(si->cpudata[i], sizeof(ulong) * vt->kmem_max_limit);
 
-	si->addrlist = (ulong *)GETBUF((vt->kmem_max_c_num+1) * sizeof(ulong));
-	si->kmem_bufctl = (int *)GETBUF((vt->kmem_max_c_num+1) * sizeof(int));
-	for (i = 0; i < vt->kmem_max_cpus; i++) 
-		si->cpudata[i] = (ulong *)
-			GETBUF(vt->kmem_max_limit * sizeof(ulong)); 
+                readmem(cpudata[i]+OFFSET(array_cache_avail),
+                        KVADDR, &avail, sizeof(int),
+                        "array cache avail", FAULT_ON_ERROR);
 
-	cnt = 0;
-	si->cache = cache_cache = symbol_value("cache_cache");
+		if (!avail) 
+			continue;
 
-	if (si->flags & ADDRESS_SPECIFIED) {
-	        if (!(p1 = vaddr_to_kmem_cache(si->spec_addr, kbuf))) {
+		if (avail > vt->kmem_max_limit) {
 			error(INFO, 
-			   "address is not allocated in slab subsystem: %lx\n",
-				si->spec_addr);
-			return;
+	  	  "\"%s\" cache: array_cache.avail %d greater than limit %ld\n",
+				si->curname, avail, vt->kmem_max_limit);
+			si->errors++;
 		}
-		
-		if (si->reqname && (si->reqname != p1)) 
-			error(INFO, 
-			    "ignoring pre-selected %s cache for address: %lx\n",
-				si->reqname, si->spec_addr, si->reqname);
-		reqname = p1;
-	} else
-		reqname = si->reqname;
 
-	do {
-		if ((si->flags & VERBOSE) && !si->reqname &&
-		    !(si->flags & ADDRESS_SPECIFIED))
-			fprintf(fp, "%s%s", cnt++ ? "\n" : "", kmem_cache_hdr);
+		if (CRASHDEBUG(2))
+			fprintf(fp, "%s: cpu[%d] avail: %d\n", 
+				si->curname, i, avail);
 
-		if (vt->kmem_cache_namelen) {
-                        readmem(si->cache+OFFSET(kmem_cache_s_name), 
-				KVADDR, buf, vt->kmem_cache_namelen,
-                                "name array", FAULT_ON_ERROR);
-		} else {
-                	readmem(si->cache+OFFSET(kmem_cache_s_name), 
-				KVADDR, &name, sizeof(ulong),
-                        	"name", FAULT_ON_ERROR);
-                	if (!read_string(name, buf, BUFSIZE-1))
-				error(FATAL, 
-			      "cannot read kmem_cache_s.name string at %lx\n",
-					name);
-		}
+                readmem(cpudata[i]+SIZE(array_cache),
+                        KVADDR, si->cpudata[i],
+			sizeof(void *) * avail,
+                        "array_cache avail", FAULT_ON_ERROR);
 
-		if (reqname && !STREQ(reqname, buf)) 
-			goto next_cache;
+		if (CRASHDEBUG(2))
+			for (j = 0; j < avail; j++)
+				fprintf(fp, "  %lx (cpu %d)\n", si->cpudata[i][j], i);
+        }
 
-                if (ignore_cache(si, buf)) {
-                        fprintf(fp, "%lx %-18s [IGNORED]\n", si->cache, buf);
-                        goto next_cache;
-                }
+        /*
+         *  If the shared list contains anything, gather them as well.
+         */
+	BZERO(si->shared_array_cache, sizeof(ulong) * vt->kmem_max_limit);
 
-		si->curname = buf;
+        if (!VALID_MEMBER(kmem_list3_shared) ||
+            !VALID_MEMBER(kmem_cache_s_lists) ||
+            !readmem(si->cache+OFFSET(kmem_cache_s_lists)+
+       	    OFFSET(kmem_list3_shared), KVADDR, &shared, sizeof(void *),
+	    "kmem_list3 shared", RETURN_ON_ERROR|QUIET) ||
+	    !readmem(shared+OFFSET(array_cache_avail),
+            KVADDR, &avail, sizeof(int), "shared array_cache avail",
+            RETURN_ON_ERROR|QUIET) || !avail)
+		return;
 
-	        readmem(si->cache+OFFSET(kmem_cache_s_objsize),
-	        	KVADDR, &tmp_val, sizeof(uint),
-	                "objsize", FAULT_ON_ERROR);
-		si->size = (ulong)tmp_val;
+	if (avail > vt->kmem_max_limit) {
+		error(INFO, 
+  	  "\"%s\" cache: shared array_cache.avail %d greater than limit %ld\n",
+			si->curname, avail, vt->kmem_max_limit);
+		si->errors++;
+		return;
+	}
 
-		if (!si->size) {
-			if (STREQ(si->curname, "kmem_cache"))
-				si->size = SIZE(kmem_cache_s);
-			else {
-				error(INFO, "\"%s\" cache: objsize: %ld\n",
-					si->curname, si->size);
-				si->errors++;
-			}
-		}
+	if (CRASHDEBUG(2))
+		fprintf(fp, "%s: shared avail: %d\n", 
+			si->curname, avail);
 
-	        readmem(si->cache+OFFSET(kmem_cache_s_flags), 
-			KVADDR, &tmp_val, sizeof(uint),
-	                "kmem_cache_s flags", FAULT_ON_ERROR);
-		si->c_flags = (ulong)tmp_val;
+        readmem(shared+SIZE(array_cache), KVADDR, si->shared_array_cache,
+        	sizeof(void *) * avail, "shared array_cache avail", 
+		FAULT_ON_ERROR);
 
-                readmem(si->cache+OFFSET(kmem_cache_s_gfporder),
-                        KVADDR, &tmp_val, sizeof(uint),
-                        "gfporder", FAULT_ON_ERROR);
-		si->order = (ulong)tmp_val;
+        if (CRASHDEBUG(2))
+        	for (j = 0; j < avail; j++)
+                	fprintf(fp, "  %lx (shared list)\n", si->shared_array_cache[j]);
+}
 
-        	readmem(si->cache+OFFSET(kmem_cache_s_num),
-                	KVADDR, &tmp_val, sizeof(uint),
-                	"kmem_cache_s num", FAULT_ON_ERROR);
-		si->c_num = (ulong)tmp_val;
 
-		do_slab_chain_percpu_v1(SLAB_GET_COUNTS, si);
 
-		if (!(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) {
-			DUMP_KMEM_CACHE_INFO_V1();
-			if (CRASHDEBUG(3))
-				dump_struct("kmem_cache_s", si->cache, 0);
+/*
+ *  Updated gather_cpudata_list_v2 for per-node kmem_list3's in kmem_cache 
+ */
+static void
+gather_cpudata_list_v2_nodes(struct meminfo *si, int index)
+{
+        int i, j;
+	int avail;
+        ulong cpudata[NR_CPUS];
+	ulong shared;
+	ulong *start_address;
+
+	start_address = (ulong *) GETBUF(sizeof(ulong) * vt->kmem_cache_len_nodes);
+        readmem(si->cache+OFFSET(kmem_cache_s_array),
+                KVADDR, &cpudata[0], 
+		sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_array),
+                "array_cache array", FAULT_ON_ERROR);
+
+        for (i = 0; (i < ARRAY_LENGTH(kmem_cache_s_array)) && 
+	     (cpudata[i]) && !(index); i++) {
+		BZERO(si->cpudata[i], sizeof(ulong) * vt->kmem_max_limit);
+
+                readmem(cpudata[i]+OFFSET(array_cache_avail),
+                        KVADDR, &avail, sizeof(int),
+                        "array cache avail", FAULT_ON_ERROR);
+
+		if (!avail) 
+			continue;
+
+		if (avail > vt->kmem_max_limit) {
+			error(INFO, 
+	  	  "\"%s\" cache: array_cache.avail %d greater than limit %ld\n",
+				si->curname, avail, vt->kmem_max_limit);
+			si->errors++;
 		}
 
-		if (si->flags == GET_SLAB_PAGES) 
-			si->retval += (si->num_slabs * 
-				(si->slabsize/PAGESIZE()));
+		if (CRASHDEBUG(2))
+			fprintf(fp, "%s: cpu[%d] avail: %d\n", 
+				si->curname, i, avail);
+		
+                readmem(cpudata[i]+SIZE(array_cache),
+                        KVADDR, si->cpudata[i],
+			sizeof(void *) * avail,
+                        "array_cache avail", FAULT_ON_ERROR);
 
-		if (si->flags & (VERBOSE|ADDRESS_SPECIFIED)) {
+		if (CRASHDEBUG(2))
+			for (j = 0; j < avail; j++)
+				fprintf(fp, "  %lx (cpu %d)\n", si->cpudata[i][j], i);
+        }
 
-			gather_cpudata_list_v1(si);
+        /*
+         *  If the shared list contains anything, gather them as well.
+         */
+	if (!index) {
+		BZERO(si->shared_array_cache, sizeof(ulong) * 
+			vt->kmem_max_limit * vt->kmem_cache_len_nodes);
+		si->current_cache_index = 0;
+	}
 
-                        si->slab = (si->flags & ADDRESS_SPECIFIED) ?
-                        	vaddr_to_slab(si->spec_addr) : 0;
+	if (!readmem(si->cache+OFFSET(kmem_cache_s_lists), KVADDR, &start_address[0], 
+	    sizeof(ulong) * vt->kmem_cache_len_nodes , "array nodelist array", 
+	    RETURN_ON_ERROR) ||  
+	    !readmem(start_address[index] + OFFSET(kmem_list3_shared), KVADDR, &shared,
+	     sizeof(void *), "kmem_list3 shared", RETURN_ON_ERROR|QUIET) || !shared ||
+	    !readmem(shared + OFFSET(array_cache_avail), KVADDR, &avail, sizeof(int), 
+	    "shared array_cache avail", RETURN_ON_ERROR|QUIET) || !avail) {
+		FREEBUF(start_address);
+		return;
+	}
 
-			do_slab_chain_percpu_v1(SLAB_WALKTHROUGH, si);
+	if (avail > vt->kmem_max_limit) {
+		error(INFO, 
+  	  "\"%s\" cache: shared array_cache.avail %d greater than limit %ld\n",
+			si->curname, avail, vt->kmem_max_limit);
+		si->errors++;
+		FREEBUF(start_address);
+		return;
+	}
 
-			if (si->found) {
-				fprintf(fp, kmem_cache_hdr);
-				DUMP_KMEM_CACHE_INFO_V1();
-				fprintf(fp, slab_hdr);
-        			gather_slab_cached_count(si);
-				DUMP_SLAB_INFO();
+	if (CRASHDEBUG(2))
+		fprintf(fp, "%s: shared avail: %d\n", 
+			si->curname, avail);
 
-				switch (si->found)
-				{
-				case KMEM_BUFCTL_ADDR:
-					fprintf(fp, "   %lx ", 
-						(ulong)si->spec_addr);
-					fprintf(fp,"(kmem_bufctl_t)\n");
-					break;
+        readmem(shared+SIZE(array_cache), KVADDR, si->shared_array_cache + si->current_cache_index,
+        	sizeof(void *) * avail, "shared array_cache avail", 
+		FAULT_ON_ERROR);
 
-				case KMEM_SLAB_ADDR:
-					fprintf(fp, "   %lx ", 
-						(ulong)si->spec_addr);
-					fprintf(fp, "(slab_s)\n");
-					break;
+	if ((si->current_cache_index + avail) > 
+	    (vt->kmem_max_limit * vt->kmem_cache_len_nodes)) {
+		error(INFO, 
+  	  "\"%s\" cache: total shared array_cache.avail %d greater than total limit %ld\n",
+			si->curname, 
+			si->current_cache_index + avail, 
+			vt->kmem_max_limit * vt->kmem_cache_len_nodes);
+		si->errors++;
+		FREEBUF(start_address);
+		return;
+	}
 
-				case KMEM_ON_SLAB:
-					fprintf(fp, "   %lx ", 
-						(ulong)si->spec_addr);
-					fprintf(fp, "(unused part of slab)\n");
-					break;
-						
-				case KMEM_OBJECT_ADDR_FREE:
-                                        fprintf(fp, free_inuse_hdr);
-					fprintf(fp, "   %lx\n", 
-						(ulong)si->spec_addr);
-					break;
+        if (CRASHDEBUG(2))
+        	for (j = si->current_cache_index; j < (si->current_cache_index + avail); j++)
+                	fprintf(fp, "  %lx (shared list)\n", si->shared_array_cache[j]);
+	
+	si->current_cache_index += avail;
+	FREEBUF(start_address);
+}
 
-                                case KMEM_OBJECT_ADDR_INUSE:
-                                        fprintf(fp, free_inuse_hdr);
-                                        fprintf(fp, "  [%lx]\n", 
-						(ulong)si->spec_addr);
-                                        break;
+/*
+ *  Check whether a given address is contained in the previously-gathered
+ *  percpu object cache.
+ */
 
-                                case KMEM_OBJECT_ADDR_CACHED:
-                                        fprintf(fp, free_inuse_hdr);
-                                        fprintf(fp, 
-					    "   %lx  (cpu %d cache)\n", 
-						(ulong)si->spec_addr, si->cpu);
-                                        break;
-				}
+static int
+check_cpudata_list(struct meminfo *si, ulong obj)
+{
+        int i, j;
 
-				break;
+        for (i = 0; i < vt->kmem_max_cpus; i++) {
+                for (j = 0; si->cpudata[i][j]; j++)
+			if (si->cpudata[i][j] == obj) {
+				si->cpu = i;
+				return TRUE;
 			}
-		}
+	}
 
-next_cache:
-                readmem(si->cache+OFFSET(kmem_cache_s_next), 
-		        KVADDR, &si->cache, sizeof(ulong),
-                        "kmem_cache_s next", FAULT_ON_ERROR);
+	return FALSE;
+}
 
-		si->cache -= OFFSET(kmem_cache_s_next);
+/*
+ *  Check whether a given address is contained in the previously-gathered
+ *  shared object cache.
+ */
 
-	} while (si->cache != cache_cache);
+static int
+check_shared_list(struct meminfo *si, ulong obj)
+{
+	int i;
 
-        if ((si->flags & ADDRESS_SPECIFIED) && !si->found)
-		error(INFO, "%s: address not found in cache: %lx\n", 
-			reqname, si->spec_addr);
- 
-	if (si->errors)
-		error(INFO, "%ld error%s encountered\n", 
-			si->errors, si->errors > 1 ? "s" : "");
+	if (INVALID_MEMBER(kmem_list3_shared) ||
+	    !si->shared_array_cache)
+		return FALSE;
 
-	FREEBUF(si->addrlist);
-	FREEBUF(si->kmem_bufctl);
-        for (i = 0; i < vt->kmem_max_cpus; i++)
-                FREEBUF(si->cpudata[i]);
+        for (i = 0; si->shared_array_cache[i]; i++) {
+		if (si->shared_array_cache[i] == obj)
+			return TRUE;
+	}
 
+        return FALSE;
 }
 
-
 /*
- *  Updated for 2.6 slab substructure. 
+ *  Search the various memory subsystems for instances of this address.
+ *  Start with the most specific areas, ending up with at least the 
+ *  mem_map page data.
  */
 static void
-dump_kmem_cache_percpu_v2(struct meminfo *si)
+kmem_search(struct meminfo *mi)
 {
-	int i;
+	struct syment *sp;
+	struct meminfo tmp_meminfo;
 	char buf[BUFSIZE];
-	char kbuf[BUFSIZE];
-	char *reqname;
-	ulong cache_end;
-	ulong name;
-	int cnt;
-	uint tmp_val; /* Used as temporary variable to read sizeof(int) and
-			assigned to ulong variable. We are doing this to mask
-			the endian issue */
-	char *p1;
-
-        if (!(vt->flags & PERCPU_KMALLOC_V2)) 
-                error(FATAL, 
-                   "dump_kmem_cache_percpu called without PERCPU_KMALLOC_V2\n");
+	ulong vaddr, orig_flags;
+	physaddr_t paddr;
+	ulong offset;
+	ulong task;
+	struct task_context *tc;
 
-	si->found = si->retval = 0;
-	reqname = NULL;
+	pc->curcmd_flags &= ~HEADER_PRINTED;
 
-	if ((!(si->flags & VERBOSE) || si->reqname) &&
-	     !(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES)))
-		fprintf(fp, kmem_cache_hdr);
+	switch (mi->memtype)
+	{
+	case KVADDR:
+		vaddr = mi->spec_addr;
+		break;
 
-	si->addrlist = (ulong *)GETBUF((vt->kmem_max_c_num+1) * sizeof(ulong));
-	si->kmem_bufctl = (int *)GETBUF((vt->kmem_max_c_num+1) * sizeof(int));
-	for (i = 0; i < vt->kmem_max_cpus; i++) 
-		si->cpudata[i] = (ulong *)
-			GETBUF(vt->kmem_max_limit * sizeof(ulong)); 
+	case PHYSADDR:
+		vaddr = mi->spec_addr < VTOP(vt->high_memory) ?
+			PTOV(mi->spec_addr) : BADADDR;
+		break;
+	}
 
-	cnt = 0;
+	orig_flags = mi->flags;
+	mi->retval = 0;
 
-        get_symbol_data("cache_chain", sizeof(ulong), &si->cache);
-        si->cache -= OFFSET(kmem_cache_s_next);
-        cache_end = symbol_value("cache_chain");
+	/*
+	 *  Check first for a possible symbolic display of the virtual
+	 *  address associated with mi->spec_addr or PTOV(mi->spec_addr).
+	 */
+	if (((vaddr >= kt->stext) && (vaddr <= kt->end)) ||
+	    IS_MODULE_VADDR(mi->spec_addr)) {
+		if ((sp = value_search(vaddr, &offset))) {
+			show_symbol(sp, offset, SHOW_LINENUM | SHOW_RADIX());
+			fprintf(fp, "\n");
+		}
+	}
 
-	if (si->flags & ADDRESS_SPECIFIED) {
-	        if (!(p1 = vaddr_to_kmem_cache(si->spec_addr, kbuf))) {
-			error(INFO, 
-			   "address is not allocated in slab subsystem: %lx\n",
-				si->spec_addr);
-			return;
+	/*
+	 *  Check for a valid mapped address.
+	 */
+	if ((mi->memtype == KVADDR) && IS_VMALLOC_ADDR(mi->spec_addr)) {
+		if (kvtop(NULL, mi->spec_addr, &paddr, 0)) {
+			mi->flags = orig_flags | VMLIST_VERIFY;
+			dump_vmlist(mi);
+			if (mi->retval) {
+				mi->flags = orig_flags;
+				dump_vmlist(mi);
+				fprintf(fp, "\n");
+				mi->spec_addr = paddr;
+				mi->memtype = PHYSADDR;
+				goto mem_map;
+			}
 		}
-		
-		if (si->reqname && (si->reqname != p1)) 
-			error(INFO, 
-			    "ignoring pre-selected %s cache for address: %lx\n",
-				si->reqname, si->spec_addr, si->reqname);
-		reqname = p1;
-	} else
-		reqname = si->reqname;
+	}
 
-	do {
-		if ((si->flags & VERBOSE) && !si->reqname &&
-		    !(si->flags & ADDRESS_SPECIFIED))
-			fprintf(fp, "%s%s", cnt++ ? "\n" : "", kmem_cache_hdr);
+	/*
+	 *  If the address is physical, check whether it's in vmalloc space.
+	 */
+	if (mi->memtype == PHYSADDR) {
+		mi->flags = orig_flags;
+		mi->flags |= GET_PHYS_TO_VMALLOC;
+		mi->retval = 0;
+        	dump_vmlist(mi);
+		mi->flags &= ~GET_PHYS_TO_VMALLOC;
 
-		if (vt->kmem_cache_namelen) {
-                        readmem(si->cache+OFFSET(kmem_cache_s_name), 
-				KVADDR, buf, vt->kmem_cache_namelen,
-                                "name array", FAULT_ON_ERROR);
-		} else {
-                	readmem(si->cache+OFFSET(kmem_cache_s_name), 
-				KVADDR, &name, sizeof(ulong),
-                        	"name", FAULT_ON_ERROR);
-                	if (!read_string(name, buf, BUFSIZE-1))
-				error(FATAL, 
-			      "cannot read kmem_cache_s.name string at %lx\n",
-					name);
+		if (mi->retval) {
+			if ((sp = value_search(mi->retval, &offset))) {
+                        	show_symbol(sp, offset, 
+					SHOW_LINENUM | SHOW_RADIX());
+                        	fprintf(fp, "\n");
+                	}
+        		dump_vmlist(mi);
+			fprintf(fp, "\n");
+			goto mem_map;
 		}
+	}
+
+	/*
+         *  Check whether the containing page belongs to the slab subsystem.
+	 */
+	mi->flags = orig_flags;
+	mi->retval = 0;
+	if ((vaddr != BADADDR) && vaddr_to_kmem_cache(vaddr, buf, VERBOSE)) {
+		BZERO(&tmp_meminfo, sizeof(struct meminfo));
+		tmp_meminfo.spec_addr = vaddr;
+		tmp_meminfo.memtype = KVADDR;
+		tmp_meminfo.flags = mi->flags;
+		vt->dump_kmem_cache(&tmp_meminfo);
+		fprintf(fp, "\n");
+	}
+	if ((vaddr != BADADDR) && is_slab_page(mi, buf)) {
+		BZERO(&tmp_meminfo, sizeof(struct meminfo));
+		tmp_meminfo.spec_addr = vaddr;
+		tmp_meminfo.memtype = KVADDR;
+		tmp_meminfo.flags = mi->flags;
+		vt->dump_kmem_cache(&tmp_meminfo);
+		fprintf(fp, "\n");
+	}
+
+	/*
+	 *  Check free list.
+	 */
+	mi->flags = orig_flags;
+	mi->retval = 0;
+	vt->dump_free_pages(mi);
+	if (mi->retval)
+		fprintf(fp, "\n");
 
-		if (reqname && !STREQ(reqname, buf)) 
-			goto next_cache;
+	if (vt->page_hash_table) {
+		/*
+		 *  Check the page cache.
+		 */
+		mi->flags = orig_flags;
+		mi->retval = 0;
+		dump_page_hash_table(mi);
+		if (mi->retval)
+			fprintf(fp, "\n");
+	}
 
-                if (ignore_cache(si, buf)) {
-                        fprintf(fp, "%lx %-18s [IGNORED]\n", si->cache, buf);
-                        goto next_cache;
-                }
+	/*
+	 *  Check whether it's a current task or stack address.
+	 */
+	if ((mi->memtype == KVADDR) && (task = vaddr_in_task_struct(vaddr)) &&
+	    (tc = task_to_context(task))) {
+		show_context(tc);
+		fprintf(fp, "\n");
+	} else if ((mi->memtype == KVADDR) && (task = stkptr_to_task(vaddr)) &&
+	    (tc = task_to_context(task))) {
+		show_context(tc);
+		fprintf(fp, "\n");
+	}
 
-		si->curname = buf;
+mem_map:
+	mi->flags = orig_flags;
+	pc->curcmd_flags &= ~HEADER_PRINTED;
+	if (vaddr != BADADDR)
+		dump_mem_map(mi);
+	else
+		mi->retval = FALSE;
 
-	        readmem(si->cache+OFFSET(kmem_cache_s_objsize),
-	        	KVADDR, &tmp_val, sizeof(uint),
-	                "objsize", FAULT_ON_ERROR);
-		si->size = (ulong)tmp_val;
+	if (!mi->retval)
+		fprintf(fp, "%llx: %s address not found in mem map\n", 
+			mi->spec_addr, memtype_string(mi->memtype, 0));
+}
 
-		if (!si->size) {
-			if (STREQ(si->curname, "kmem_cache"))
-				si->size = SIZE(kmem_cache_s);
-			else {
-				error(INFO, "\"%s\" cache: objsize: %ld\n",
-					si->curname, si->size);
-				si->errors++;
-			}
-		}
+/*
+ *  Determine whether an address is a page pointer from the mem_map[] array.
+ *  If the caller requests it, return the associated physical address.
+ */
+int
+is_page_ptr(ulong addr, physaddr_t *phys)
+{
+	int n;
+        ulong ppstart, ppend;
+	struct node_table *nt;
+	ulong pgnum, node_size;
+	ulong nr, sec_addr;
+	ulong nr_mem_sections;
+	ulong coded_mem_map, mem_map, end_mem_map;
+	physaddr_t section_paddr;
+
+	if (IS_SPARSEMEM()) {
+		nr_mem_sections = NR_MEM_SECTIONS();
+	        for (nr = 0; nr <= nr_mem_sections ; nr++) {
+	                if ((sec_addr = valid_section_nr(nr))) {
+	                        coded_mem_map = section_mem_map_addr(sec_addr);
+	                        mem_map = sparse_decode_mem_map(coded_mem_map, nr);
+				end_mem_map = mem_map + (PAGES_PER_SECTION() * SIZE(page));
+
+				if ((addr >= mem_map) && (addr < end_mem_map)) { 
+	        			if ((addr - mem_map) % SIZE(page))
+						return FALSE;
+					if (phys) {
+						section_paddr = PTOB(section_nr_to_pfn(nr));
+						pgnum = (addr - mem_map) / SIZE(page);
+						*phys = section_paddr + ((physaddr_t)pgnum * PAGESIZE());
+					} 
+					return TRUE;
+				}
+	                }
+	        }
+		return FALSE;
+	}
 
-	        readmem(si->cache+OFFSET(kmem_cache_s_flags), 
-			KVADDR, &tmp_val, sizeof(uint),
-	                "kmem_cache_s flags", FAULT_ON_ERROR);
-		si->c_flags = (ulong)tmp_val;
+	for (n = 0; n < vt->numnodes; n++) {
+		nt = &vt->node_table[n];
+                if ((vt->flags & V_MEM_MAP) && (vt->numnodes == 1))
+	        	node_size = vt->max_mapnr;
+		else
+	        	node_size = nt->size;
 
-                readmem(si->cache+OFFSET(kmem_cache_s_gfporder),
-                        KVADDR, &tmp_val, sizeof(uint),
-                        "gfporder", FAULT_ON_ERROR);
-		si->order = (ulong)tmp_val;
+        	ppstart = nt->mem_map;
+		ppend = ppstart + (node_size * SIZE(page));
 
-        	readmem(si->cache+OFFSET(kmem_cache_s_num),
-                	KVADDR, &tmp_val, sizeof(uint),
-                	"kmem_cache_s num", FAULT_ON_ERROR);
-		si->c_num = (ulong)tmp_val;
+		if ((addr < ppstart) || (addr >= ppend))
+                	continue;
 
-		do_slab_chain_percpu_v2(SLAB_GET_COUNTS, si);
+		/*
+		 *  We're in the mem_map range -- but it is a page pointer?
+		 */
+	        if ((addr - ppstart) % SIZE(page))
+			return FALSE;
 
-		if (!(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) {
-			DUMP_KMEM_CACHE_INFO_V2();
-			if (CRASHDEBUG(3))
-				dump_struct("kmem_cache_s", si->cache, 0);
+		if (phys) {
+			pgnum = (addr - nt->mem_map) / SIZE(page);
+			*phys = ((physaddr_t)pgnum * PAGESIZE()) + nt->start_paddr;
 		}
 
-		if (si->flags == GET_SLAB_PAGES) 
-			si->retval += (si->num_slabs * 
-				(si->slabsize/PAGESIZE()));
-
-		if (si->flags & (VERBOSE|ADDRESS_SPECIFIED)) {
-
-			gather_cpudata_list_v2(si);
+		return TRUE;
+	}
 
-                        si->slab = (si->flags & ADDRESS_SPECIFIED) ?
-                        	vaddr_to_slab(si->spec_addr) : 0;
+	return FALSE;
 
-			do_slab_chain_percpu_v2(SLAB_WALKTHROUGH, si);
+#ifdef PRE_NODES
+        ppstart = vt->mem_map;
+	ppend = ppstart + (vt->total_pages * vt->page_struct_len);
 
-			if (si->found) {
-				fprintf(fp, kmem_cache_hdr);
-				DUMP_KMEM_CACHE_INFO_V2();
-				fprintf(fp, slab_hdr);
-        			gather_slab_cached_count(si);
-				DUMP_SLAB_INFO();
+	if ((addr < ppstart) || (addr >= ppend)) 
+		return FALSE;
 
-				switch (si->found)
-				{
-				case KMEM_BUFCTL_ADDR:
-					fprintf(fp, "   %lx ", 
-						(ulong)si->spec_addr);
-					fprintf(fp,"(kmem_bufctl_t)\n");
-					break;
+	if ((addr - ppstart) % vt->page_struct_len)
+		return FALSE;
 
-				case KMEM_SLAB_ADDR:
-					fprintf(fp, "   %lx ", 
-						(ulong)si->spec_addr);
-					fprintf(fp, "(slab)\n");
-					break;
+	return TRUE;
+#endif
+}
 
-				case KMEM_ON_SLAB:
-					fprintf(fp, "   %lx ", 
-						(ulong)si->spec_addr);
-					fprintf(fp, "(unused part of slab)\n");
-					break;
-						
-				case KMEM_OBJECT_ADDR_FREE:
-                                        fprintf(fp, free_inuse_hdr);
-					fprintf(fp, "   %lx\n", 
-						(ulong)si->spec_addr);
-					break;
+/*
+ *  Return the physical address associated with this page pointer.
+ */
+static int 
+page_to_phys(ulong pp, physaddr_t *phys)
+{
+	return(is_page_ptr(pp, phys));
+}
 
-                                case KMEM_OBJECT_ADDR_INUSE:
-                                        fprintf(fp, free_inuse_hdr);
-                                        fprintf(fp, "  [%lx]\n", 
-						(ulong)si->spec_addr);
-                                        break;
 
-                                case KMEM_OBJECT_ADDR_CACHED:
-                                        fprintf(fp, free_inuse_hdr);
-                                        fprintf(fp, 
-					    "   %lx  (cpu %d cache)\n", 
-						(ulong)si->spec_addr, si->cpu);
-                                        break;
-				}
+/*
+ *  Return the page pointer associated with this physical address.
+ */
+static int 
+phys_to_page(physaddr_t phys, ulong *pp)
+{
+	int n;
+        ulong pgnum;
+        struct node_table *nt;
+	physaddr_t pstart, pend;
+	ulong node_size;
 
-				break;
-			}
+	if (IS_SPARSEMEM()) {
+		ulong map;
+		map = pfn_to_map(phys >> PAGESHIFT());
+		if (map) {
+			*pp = map;
+			return TRUE;
 		}
+		return FALSE;
+	}
 
-next_cache:
-                readmem(si->cache+OFFSET(kmem_cache_s_next), 
-		        KVADDR, &si->cache, sizeof(ulong),
-                        "kmem_cache_s next", FAULT_ON_ERROR);
+        for (n = 0; n < vt->numnodes; n++) {
+                nt = &vt->node_table[n];
+                if ((vt->flags & V_MEM_MAP) && (vt->numnodes == 1))
+                        node_size = vt->max_mapnr;
+                else
+                        node_size = nt->size;
 
-                if (si->cache != cache_end)
-			si->cache -= OFFSET(kmem_cache_s_next);
+                pstart = nt->start_paddr;
+                pend = pstart + ((ulonglong)node_size * PAGESIZE());
 
-	} while (si->cache != cache_end);
+                if ((phys < pstart) || (phys >= pend))
+                        continue;
+                /*
+                 *  We're in the physical range -- calculate the page.
+                 */
+		pgnum = BTOP(phys - pstart);
+		*pp = nt->mem_map + (pgnum * SIZE(page));
 
-        if ((si->flags & ADDRESS_SPECIFIED) && !si->found)
-		error(INFO, "%s: address not found in cache: %lx\n", 
-			reqname, si->spec_addr);
- 
-	if (si->errors)
-		error(INFO, "%ld error%s encountered\n", 
-			si->errors, si->errors > 1 ? "s" : "");
+                return TRUE;
+        }
 
-	FREEBUF(si->addrlist);
-	FREEBUF(si->kmem_bufctl);
-        for (i = 0; i < vt->kmem_max_cpus; i++)
-                FREEBUF(si->cpudata[i]);
+	return FALSE;
 
+#ifdef PRE_NODES
+	if (phys >= (vt->total_pages * PAGESIZE()))
+		return FALSE;
+
+	pgnum = PTOB(BTOP(phys)) / PAGESIZE();
+	*pp = vt->mem_map + (pgnum * vt->page_struct_len);
+	
+	return TRUE;
+#endif
 }
 
 
 /*
- *  Walk through the slab chain hanging off a kmem_cache_s structure,
- *  gathering basic statistics.
- *
- *  TBD: Given a specified physical address, determine whether it's in this
- *  slab chain, and whether it's in use or not.
+ *  Try to read a string of non-NULL characters from a memory location, 
+ *  returning the number of characters read.
  */
+int
+read_string(ulong kvaddr, char *buf, int maxlen)
+{
+	char strbuf[MIN_PAGE_SIZE];
+        ulong kp;
+	char *bufptr;
+	long cnt, size;
 
-#define INSLAB(obj, si) \
-  ((ulong)((ulong)(obj) & ~(si->slabsize-1)) == si->s_mem)
+        BZERO(buf, maxlen);
+	BZERO(strbuf, MIN_PAGE_SIZE);
 
-static void
-do_slab_chain(int cmd, struct meminfo *si)
-{
-	ulong tmp, magic;
-	ulong kmem_slab_end;
-	char *kmem_slab_s_buf;
+	kp = kvaddr;
+	bufptr = strbuf;
+	size = maxlen;
 
-	si->slabsize = (power(2, si->order) * PAGESIZE());
+	while (size > 0) {
+        	cnt = MIN_PAGE_SIZE - (kp & (MIN_PAGE_SIZE-1)); 
+ 
+        	if (cnt > size)
+                        cnt = size;
 
-	kmem_slab_end = si->cache + OFFSET(kmem_cache_s_c_offset);
+                if (!readmem(kp, KVADDR, bufptr, cnt,
+                    "readstring characters", QUIET|RETURN_ON_ERROR))
+                        break;
 
-	switch (cmd)
-	{
-	case SLAB_GET_COUNTS:
-		si->slab = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_firstp));
+		if (count_buffer_chars(bufptr, NULLCHAR, cnt))
+			break;
 
-		if (slab_data_saved(si))
-			return;
+                kp += cnt;
+                bufptr += cnt;
+                size -= cnt;
+	}
 
-		si->num_slabs = si->inuse = 0;
+	strcpy(buf, strbuf);
+	return (strlen(buf));
+}
 
-		if (si->slab == kmem_slab_end)
-			return;
+/*
+ *  "help -v" output
+ */
+void
+dump_vm_table(int verbose)
+{
+	int i;
+	struct node_table *nt;
+	int others;
+	ulong *up;
 
-		kmem_slab_s_buf = GETBUF(SIZE(kmem_slab_s));
+	others = 0;
+	fprintf(fp, "              flags: %lx  %s(", 
+		vt->flags, count_bits_long(vt->flags) > 4 ? "\n " : "");
+	if (vt->flags & NODES)
+		fprintf(fp, "%sNODES", others++ ? "|" : "");
+	if (vt->flags & NODES_ONLINE)
+		fprintf(fp, "%sNODES_ONLINE", others++ ? "|" : "");
+	if (vt->flags & ZONES)
+		fprintf(fp, "%sZONES", others++ ? "|" : "");
+	if (vt->flags & PERCPU_KMALLOC_V1)
+		fprintf(fp, "%sPERCPU_KMALLOC_V1", others++ ? "|" : "");
+	if (vt->flags & PERCPU_KMALLOC_V2)
+		fprintf(fp, "%sPERCPU_KMALLOC_V2", others++ ? "|" : "");
+	if (vt->flags & COMMON_VADDR)
+		fprintf(fp, "%sCOMMON_VADDR", others++ ? "|" : "");
+	if (vt->flags & KMEM_CACHE_INIT)
+		fprintf(fp, "%sKMEM_CACHE_INIT", others++ ? "|" : "");
+	if (vt->flags & V_MEM_MAP)
+		fprintf(fp, "%sV_MEM_MAP", others++ ? "|" : "");
+	if (vt->flags & KMEM_CACHE_UNAVAIL)
+		fprintf(fp, "%sKMEM_CACHE_UNAVAIL", others++ ? "|" : "");
+	if (vt->flags & DISCONTIGMEM)
+		fprintf(fp, "%sDISCONTIGMEM", others++ ? "|" : "");
+	if (vt->flags & FLATMEM)
+		fprintf(fp, "%sFLATMEM", others++ ? "|" : "");
+	if (vt->flags & SPARSEMEM)
+		fprintf(fp, "%sSPARSEMEM", others++ ? "|" : "");\
+	if (vt->flags & SPARSEMEM_EX)
+		fprintf(fp, "%sSPARSEMEM_EX", others++ ? "|" : "");\
+	if (vt->flags & KMEM_CACHE_DELAY)
+		fprintf(fp, "%sKMEM_CACHE_DELAY", others++ ? "|" : "");\
+	if (vt->flags & PERCPU_KMALLOC_V2_NODES)
+		fprintf(fp, "%sPERCPU_KMALLOC_V2_NODES", others++ ? "|" : "");\
+	if (vt->flags & VM_STAT)
+		fprintf(fp, "%sVM_STAT", others++ ? "|" : "");\
+	if (vt->flags & KMALLOC_SLUB)
+		fprintf(fp, "%sKMALLOC_SLUB", others++ ? "|" : "");\
+	if (vt->flags & CONFIG_NUMA)
+		fprintf(fp, "%sCONFIG_NUMA", others++ ? "|" : "");\
+	if (vt->flags & VM_EVENT)
+		fprintf(fp, "%sVM_EVENT", others++ ? "|" : "");\
 
-		do {
-			if (received_SIGINT()) {
-				FREEBUF(kmem_slab_s_buf);
-				restart(0);
-			}
+	fprintf(fp, ")\n");
+	if (vt->kernel_pgd[0] == vt->kernel_pgd[1])
+       		fprintf(fp, "     kernel_pgd[NR_CPUS]: %lx ...\n", 
+			vt->kernel_pgd[0]);
+	else {
+       		fprintf(fp, "     kernel_pgd[NR_CPUS]: ");
+		for (i = 0; i < NR_CPUS; i++) {
+			if ((i % 4) == 0)
+				fprintf(fp, "\n     ");
+			fprintf(fp, "%lx ", vt->kernel_pgd[i]);
+		}
+		fprintf(fp, "\n");
+	}
+        fprintf(fp, "        high_memory: %lx\n", vt->high_memory);
+        fprintf(fp, "      vmalloc_start: %lx\n", vt->vmalloc_start);
+        fprintf(fp, "            mem_map: %lx\n", vt->mem_map);
+        fprintf(fp, "        total_pages: %ld\n", vt->total_pages);
+        fprintf(fp, "          max_mapnr: %ld\n", vt->max_mapnr);
+        fprintf(fp, "     totalram_pages: %ld\n", vt->totalram_pages);
+        fprintf(fp, "    totalhigh_pages: %ld\n", vt->totalhigh_pages);
+        fprintf(fp, "      num_physpages: %ld\n", vt->num_physpages);
+	fprintf(fp, "    page_hash_table: %lx\n", vt->page_hash_table);
+	fprintf(fp, "page_hash_table_len: %d\n", vt->page_hash_table_len);
+	fprintf(fp, "     kmem_max_c_num: %ld\n", vt->kmem_max_c_num);
+	fprintf(fp, "     kmem_max_limit: %ld\n", vt->kmem_max_limit);
+	fprintf(fp, "      kmem_max_cpus: %ld\n", vt->kmem_max_cpus);
+	fprintf(fp, "   kmem_cache_count: %ld\n", vt->kmem_cache_count);
+	fprintf(fp, " kmem_cache_namelen: %d\n", vt->kmem_cache_namelen);
+	fprintf(fp, "kmem_cache_nodelist_len: %ld\n", vt->kmem_cache_len_nodes);
+	fprintf(fp, "        PG_reserved: %lx\n", vt->PG_reserved);
+	fprintf(fp, "            PG_slab: %ld (%lx)\n", vt->PG_slab, 
+		(ulong)1 << vt->PG_slab);
+	fprintf(fp, "  PG_head_tail_mask: %lx\n", vt->PG_head_tail_mask);
+	fprintf(fp, "        paddr_prlen: %d\n", vt->paddr_prlen);
+	fprintf(fp, "           numnodes: %d\n", vt->numnodes);
+	fprintf(fp, "           nr_zones: %d\n", vt->nr_zones);
+	fprintf(fp, "      nr_free_areas: %d\n", vt->nr_free_areas);
+	for (i = 0; i < vt->numnodes; i++) {
+		nt = &vt->node_table[i];
+		fprintf(fp, "      node_table[%d]: \n", i);
+		fprintf(fp, "                   id: %d\n", nt->node_id);
+		fprintf(fp, "                pgdat: %lx\n", nt->pgdat);
+		fprintf(fp, "                 size: %ld\n", nt->size);
+		fprintf(fp, "              present: %ld\n", nt->present);
+		fprintf(fp, "              mem_map: %lx\n", nt->mem_map);
+		fprintf(fp, "          start_paddr: %llx\n", nt->start_paddr);
+		fprintf(fp, "          start_mapnr: %ld\n", nt->start_mapnr);
+	}
 
-			readmem(si->slab, KVADDR, kmem_slab_s_buf,
-				SIZE(kmem_slab_s), "kmem_slab_s buffer",
-				FAULT_ON_ERROR);
+	fprintf(fp, "    dump_free_pages: ");
+	if (vt->dump_free_pages == dump_free_pages)
+		fprintf(fp, "dump_free_pages()\n");
+	else if (vt->dump_free_pages == dump_free_pages_zones_v1)
+		fprintf(fp, "dump_free_pages_zones_v1()\n");
+	else if (vt->dump_free_pages == dump_free_pages_zones_v2)
+		fprintf(fp, "dump_free_pages_zones_v2()\n");
+	else if (vt->dump_free_pages == dump_multidimensional_free_pages)
+		fprintf(fp, "dump_multidimensional_free_pages()\n");
+	else
+		fprintf(fp, "%lx (unknown)\n", (ulong)vt->dump_free_pages);
 
-			magic = ULONG(kmem_slab_s_buf +
-				OFFSET(kmem_slab_s_s_magic));
+	fprintf(fp, "    dump_kmem_cache: ");
+	if (vt->dump_kmem_cache == dump_kmem_cache)
+		fprintf(fp, "dump_kmem_cache()\n");
+	else if (vt->dump_kmem_cache == dump_kmem_cache_percpu_v1)
+		fprintf(fp, "dump_kmem_cache_percpu_v1()\n");
+	else if (vt->dump_kmem_cache == dump_kmem_cache_percpu_v2)
+		fprintf(fp, "dump_kmem_cache_percpu_v2()\n");
+	else if (vt->dump_kmem_cache == dump_kmem_cache_slub)
+		fprintf(fp, "dump_kmem_cache_slub()\n");
+	else
+		fprintf(fp, "%lx (unknown)\n", (ulong)vt->dump_kmem_cache);
+	fprintf(fp, "          slab_data: %lx\n", (ulong)vt->slab_data);
+	if (verbose) 
+		dump_saved_slab_data();
+	fprintf(fp, "      cpu_slab_type: %d\n", vt->cpu_slab_type);
+	fprintf(fp, "       nr_swapfiles: %d\n", vt->nr_swapfiles);
+	fprintf(fp, "     last_swap_read: %lx\n", vt->last_swap_read);
+	fprintf(fp, "   swap_info_struct: %lx\n", (ulong)vt->swap_info_struct);
+	fprintf(fp, "            mem_sec: %lx\n", (ulong)vt->mem_sec);
+	fprintf(fp, "        mem_section: %lx\n", (ulong)vt->mem_section);
+	fprintf(fp, "       ZONE_HIGHMEM: %d\n", vt->ZONE_HIGHMEM);
+	fprintf(fp, "node_online_map_len: %d\n", vt->node_online_map_len);
+	if (vt->node_online_map_len) {
+		fprintf(fp, "    node_online_map: ");
+		up = (ulong *)vt->node_online_map;
+		for (i = 0; i < vt->node_online_map_len; i++) {
+			fprintf(fp, "%s%lx", i ? ", " : "[", *up);
+			up++;
+		}
+		fprintf(fp, "]\n");
+	} else {
+		fprintf(fp, "    node_online_map: (unused)\n");
+	}
+	fprintf(fp, "   nr_vm_stat_items: %d\n", vt->nr_vm_stat_items);
+	fprintf(fp, "      vm_stat_items: %s", (vt->flags & VM_STAT) ?
+		"\n" : "(not used)\n");
+	for (i = 0; i < vt->nr_vm_stat_items; i++)
+		fprintf(fp, "        [%d] %s\n", i, vt->vm_stat_items[i]);
+
+	fprintf(fp, "  nr_vm_event_items: %d\n", vt->nr_vm_event_items);
+	fprintf(fp, "     vm_event_items: %s", (vt->flags & VM_EVENT) ?
+		"\n" : "(not used)\n");
+	for (i = 0; i < vt->nr_vm_event_items; i++)
+		fprintf(fp, "        [%d] %s\n", i, vt->vm_event_items[i]);
 
-			if (magic == SLAB_MAGIC_ALLOC) {
-	
-				tmp = ULONG(kmem_slab_s_buf +
-					OFFSET(kmem_slab_s_s_inuse));
-	
-				si->inuse += tmp;
-				si->num_slabs++;
-			} else {
-				fprintf(fp, 
-			   	    "\"%s\" cache: invalid s_magic: %lx\n", 
-					si->curname, magic);
-				si->errors++;
-				FREEBUF(kmem_slab_s_buf);
-				return;
-			}
-	
-			si->slab = ULONG(kmem_slab_s_buf +
-				OFFSET(kmem_slab_s_s_nextp));
-	
-		} while (si->slab != kmem_slab_end);
-		
-		FREEBUF(kmem_slab_s_buf);
-		save_slab_data(si);
-		break;
+	dump_vma_cache(VERBOSE);
+}
 
-	case SLAB_WALKTHROUGH:
-        	if (!si->slab)
-			si->slab = ULONG(si->cache_buf + 
-				OFFSET(kmem_cache_s_c_firstp));
+/*
+ *  Calculate the amount of memory referenced in the kernel-specific "nodes".
+ */
+uint64_t
+total_node_memory()
+{
+	int i;
+	struct node_table *nt;
+	uint64_t total;
 
-		if (si->slab == kmem_slab_end)
-			return;
+        for (i = total = 0; i < vt->numnodes; i++) {
+                nt = &vt->node_table[i];
 
 		if (CRASHDEBUG(1)) {
-			fprintf(fp, "search cache: [%s] ", si->curname);
-			if (si->flags & ADDRESS_SPECIFIED) 
-				fprintf(fp, "for %llx", si->spec_addr);
-			fprintf(fp, "\n");
+                	console("node_table[%d]: \n", i);
+                	console("           id: %d\n", nt->node_id);
+                	console("        pgdat: %lx\n", nt->pgdat);
+                	console("         size: %ld\n", nt->size);
+                	console("      present: %ld\n", nt->present);
+                	console("      mem_map: %lx\n", nt->mem_map);
+                	console("  start_paddr: %lx\n", nt->start_paddr);
+                	console("  start_mapnr: %ld\n", nt->start_mapnr);
 		}
 
-		si->slab_buf = kmem_slab_s_buf = GETBUF(SIZE(kmem_slab_s));
-
-	        do {
-                        if (received_SIGINT()) {
-				FREEBUF(kmem_slab_s_buf);
-                                restart(0);
-			}
-
-			readmem(si->slab, KVADDR, kmem_slab_s_buf,
-				SIZE(kmem_slab_s), "kmem_slab_s buffer",
-				FAULT_ON_ERROR);
-
-	                dump_slab(si);
-	
-	                if (si->found) {
-				FREEBUF(kmem_slab_s_buf);
-	                        return;
-			}
-	
-			si->slab = ULONG(kmem_slab_s_buf +
-				OFFSET(kmem_slab_s_s_nextp));
-	
-	        } while (si->slab != kmem_slab_end);
+		if (nt->present)
+			total += (uint64_t)((uint64_t)nt->present * (uint64_t)PAGESIZE());
+		else
+			total += (uint64_t)((uint64_t)nt->size * (uint64_t)PAGESIZE());
+        }
 
-		FREEBUF(kmem_slab_s_buf);
-		break;
-	}
+	return total;
 }
 
-
 /*
- *  do_slab_chain() adapted for newer percpu slab format.
+ *  Dump just the vm_area_struct cache table data so that it can be
+ *  called from above or for debug purposes.
  */
+void
+dump_vma_cache(ulong verbose)
+{
+	int i;
+        ulong vhits;
 
-#define SLAB_BASE(X) (PTOB(BTOP(X)))
+	if (!verbose)
+		goto show_hits;
 
-#define INSLAB_PERCPU(obj, si) \
-  ((ulong)((ulong)(obj) & ~(si->slabsize-1)) == SLAB_BASE(si->s_mem))
+        for (i = 0; i < VMA_CACHE; i++)
+                fprintf(fp, "     cached_vma[%2d]: %lx (%ld)\n",
+                        i, vt->cached_vma[i],
+                        vt->cached_vma_hits[i]);
+        fprintf(fp, "          vma_cache: %lx\n", (ulong)vt->vma_cache);
+        fprintf(fp, "    vma_cache_index: %d\n", vt->vma_cache_index);
+        fprintf(fp, "    vma_cache_fills: %ld\n", vt->vma_cache_fills);
+	fflush(fp);
 
-#define SLAB_CHAINS (3)
+show_hits:
+        if (vt->vma_cache_fills) {
+                for (i = vhits = 0; i < VMA_CACHE; i++)
+                        vhits += vt->cached_vma_hits[i];
 
-static char *slab_chain_name_v1[] = {"full", "partial", "free"};
+                fprintf(stderr, "%s       vma hit rate: %2ld%% (%ld of %ld)\n",
+			verbose ? "" : "  ",
+                        (vhits * 100)/vt->vma_cache_fills,
+                        vhits, vt->vma_cache_fills);
+        }
+}
 
-static void
-do_slab_chain_percpu_v1(long cmd, struct meminfo *si)
+/*
+ *  Guess at the "real" amount of physical memory installed, formatting
+ *  it in a MB or GB based string.
+ */
+char *
+get_memory_size(char *buf)
 {
-	int i, tmp, s;
-	int list_borked;
-	char *slab_s_buf;
-	ulong specified_slab;
-	ulong last;
-	ulong slab_chains[SLAB_CHAINS];
+	uint64_t total;
+	ulong next_gig;
+#ifdef OLDWAY
+	ulong mbs, gbs;
+#endif
 
-	list_borked = 0;
-	si->slabsize = (power(2, si->order) * PAGESIZE());
-	si->cpucached_slab = 0;
+	total = machdep->memory_size();
 
-	if (VALID_MEMBER(kmem_cache_s_slabs)) {
-		slab_chains[0] = si->cache + OFFSET(kmem_cache_s_slabs);
-		slab_chains[1] = 0;
-		slab_chains[2] = 0;
-	} else {
-		slab_chains[0] = si->cache + OFFSET(kmem_cache_s_slabs_full);
-		slab_chains[1] = si->cache + OFFSET(kmem_cache_s_slabs_partial);
-		slab_chains[2] = si->cache + OFFSET(kmem_cache_s_slabs_free);
+	if ((next_gig = roundup(total, GIGABYTES(1)))) {
+		if ((next_gig - total) <= MEGABYTES(64))
+			total = next_gig;
 	}
 
-	if (CRASHDEBUG(1)) {
-		fprintf(fp, "[ %s: %lx ", si->curname, si->cache);
-		fprintf(fp, "full: %lx partial: %lx free: %lx ]\n",
-			slab_chains[0], slab_chains[1], slab_chains[2]);
-	}
+	return (pages_to_size((ulong)(total/PAGESIZE()), buf));
 
-	switch (cmd)
-	{
-	case SLAB_GET_COUNTS:
-		si->flags |= SLAB_GET_COUNTS;
-		si->flags &= ~SLAB_WALKTHROUGH;
-		si->cpucached_cache = 0;
-        	si->num_slabs = si->inuse = 0;
-		gather_cpudata_list_v1(si); 
+#ifdef OLDWAY
+	gbs = (ulong)(total/GIGABYTES(1));
+	mbs = (ulong)(total/MEGABYTES(1));
+	if (gbs) 
+		mbs = (total % GIGABYTES(1))/MEGABYTES(1);
 
-		slab_s_buf = GETBUF(SIZE(slab_s));
+        if (total%MEGABYTES(1))
+                mbs++;
 
-		for (s = 0; s < SLAB_CHAINS; s++) {
+	if (gbs) 
+		sprintf(buf, mbs ? "%ld GB %ld MB" : "%ld GB", gbs, mbs);
+	else 
+		sprintf(buf, "%ld MB", mbs);
 
-			if (!slab_chains[s])
-				continue;
+	return buf;
+#endif
+}
 
-	                if (!readmem(slab_chains[s],
-	                    KVADDR, &si->slab, sizeof(ulong),
-	                    "first slab", QUIET|RETURN_ON_ERROR)) {
-                		error(INFO, 
-				    "%s: %s list: bad slab pointer: %lx\n",
-                        		si->curname, slab_chain_name_v1[s],
-					slab_chains[s]);
-				list_borked = 1;
-				continue;
-			}
-	
-			if (slab_data_saved(si)) {
-				FREEBUF(slab_s_buf);
-				return;
-			}
-	
-			if (si->slab == slab_chains[s]) 
-				continue;
-	
-			last = slab_chains[s];
+/*
+ *  For use by architectures not having machine-specific manners for
+ *  best determining physical memory size.
+ */ 
+uint64_t
+generic_memory_size(void)
+{
+	if (machdep->memsize)
+		return machdep->memsize;
 
-			do {
-	                        if (received_SIGINT()) {
-					FREEBUF(slab_s_buf);
-	                                restart(0);
-				}
+        return (machdep->memsize = total_node_memory());
+}
 
-				if (!verify_slab_v1(si, last, s)) {
-					list_borked = 1;
-					continue;
-				}
-				last = si->slab - OFFSET(slab_s_list);
-	
-		                readmem(si->slab, KVADDR, slab_s_buf, 
-					SIZE(slab_s), "slab_s buffer", 
-					FAULT_ON_ERROR);
-	
-				tmp = INT(slab_s_buf + OFFSET(slab_s_inuse));
-				si->inuse += tmp;
-	
-				if (ACTIVE())
-					gather_cpudata_list_v1(si); 
+/*
+ *  Determine whether a virtual address is user or kernel or ambiguous.
+ */ 
+int
+vaddr_type(ulong vaddr, struct task_context *tc)
+{
+	int memtype, found;
 
-				si->s_mem = ULONG(slab_s_buf + 
-					OFFSET(slab_s_s_mem));
-				gather_slab_cached_count(si);
-	
-				si->num_slabs++;
-		
-				si->slab = ULONG(slab_s_buf + 
-					OFFSET(slab_s_list));
-				si->slab -= OFFSET(slab_s_list);
+	if (!tc)
+		tc = CURRENT_CONTEXT();
+	memtype = found = 0;
 
-				/*
-				 *  Check for slab transition. (Tony Dziedzic)
-				 */
-				for (i = 0; i < SLAB_CHAINS; i++) {
-     					if ((i != s) && 
-					    (si->slab == slab_chains[i])) {
-       						error(NOTE, 
-	  	                      "%s: slab chain inconsistency: %s list\n",
-							si->curname,
-							slab_chain_name_v1[s]);
-       						list_borked = 1;
-     					}
-				}
-		
-			} while (si->slab != slab_chains[s] && !list_borked);
-		}
+	if (machdep->is_uvaddr(vaddr, tc)) {
+		memtype |= UVADDR;
+		found++;
+	}
 
-		FREEBUF(slab_s_buf);
-		if (!list_borked)
-			save_slab_data(si);
-		break;
+	if (machdep->is_kvaddr(vaddr)) {
+		memtype |= KVADDR;
+		found++;
+	}
 
-	case SLAB_WALKTHROUGH:
-		specified_slab = si->slab;
-		si->flags |= SLAB_WALKTHROUGH;
-		si->flags &= ~SLAB_GET_COUNTS;
+	if (found == 1)
+		return memtype;
+	else
+		return AMBIGUOUS;
+}
 
-		for (s = 0; s < SLAB_CHAINS; s++) {
-			if (!slab_chains[s])
-				continue;
+/*
+ * Determine the first valid user space address
+ */
+static int
+address_space_start(struct task_context *tc, ulong *addr)
+{
+        ulong vma;
+        char *vma_buf;
 
-	        	if (!specified_slab) {
-	                	if (!readmem(slab_chains[s],
-	                            KVADDR, &si->slab, sizeof(ulong),
-	                            "slabs", QUIET|RETURN_ON_ERROR)) {
-                			error(INFO, 
-				         "%s: %s list: bad slab pointer: %lx\n",
-                        			si->curname, 
-						slab_chain_name_v1[s],
-						slab_chains[s]);
-					list_borked = 1;
-					continue;
-				}
-				last = slab_chains[s];
-			} else
-				last = 0;
-	
-			if (si->slab == slab_chains[s])
-				continue;
+        if (!tc->mm_struct)
+                return FALSE;
 
-			if (CRASHDEBUG(1)) {
-				fprintf(fp, "search cache: [%s] ", si->curname);
-				if (si->flags & ADDRESS_SPECIFIED) 
-					fprintf(fp, "for %llx", si->spec_addr);
-				fprintf(fp, "\n");
-			}
+        fill_mm_struct(tc->mm_struct);
+        vma = ULONG(tt->mm_struct + OFFSET(mm_struct_mmap));
+        if (!vma)
+                return FALSE;
+	vma_buf = fill_vma_cache(vma);
+        *addr = ULONG(vma_buf + OFFSET(vm_area_struct_vm_start));
 	
-		        do {
-	                        if (received_SIGINT())
-	                                restart(0);
+	return TRUE;
+}
 
-				if (!verify_slab_v1(si, last, s)) {
-					list_borked = 1;
-					continue;
-				}
-				last = si->slab - OFFSET(slab_s_list);
-	
-		                dump_slab_percpu_v1(si);
-		
-		                if (si->found) {
-					return;
-				}
-		
-		                readmem(si->slab+OFFSET(slab_s_list),
-		                        KVADDR, &si->slab, sizeof(ulong),
-		                        "slab list", FAULT_ON_ERROR);
-		
-				si->slab -= OFFSET(slab_s_list);
-	
-		        } while (si->slab != slab_chains[s] && !list_borked);
-		}
+/*
+ *  Search for a given value between a starting and ending address range,
+ *  applying an optional mask for "don't care" bits.  As an alternative
+ *  to entering the starting address value, -k means "start of kernel address
+ *  space".  For processors with ambiguous user/kernel address spaces,
+ *  -u or -k must be used (with or without -s) as a differentiator.
+ */
+void
+cmd_search(void)
+{
+        int c;
+	ulong start, end, mask, memtype, len;
+	ulong uvaddr_end;
+	int sflag;
+	struct meminfo meminfo;
+	ulong value_array[MAXARGS];
+	struct syment *sp;
+
+	start = end = mask = sflag = memtype = len = 0;
+	uvaddr_end = COMMON_VADDR_SPACE() ? (ulong)(-1) : machdep->kvbase;
+	BZERO(value_array, sizeof(ulong) * MAXARGS);
+
+        while ((c = getopt(argcnt, args, "l:uks:e:v:m:")) != EOF) {
+                switch(c)
+                {
+		case 'u':
+			if (XEN_HYPER_MODE())
+				error(FATAL, 
+ 			 	    "-u option is not applicable to the "
+				    "Xen hypervisor\n");
 
-		break;
-	}
-}
+			if (!sflag) {
+				address_space_start(CURRENT_CONTEXT(),&start);
+				sflag++;
+			}
+			memtype = UVADDR;
+			sflag++;
+			break;
 
-/*
- *  Try to preclude any attempt to translate a bogus slab structure.
- */
+		case 'k':
+			if (XEN_HYPER_MODE())
+				error(FATAL, 
+ 			 	    "-k option is not applicable to the "
+				    "Xen hypervisor\n");
 
-static int
-verify_slab_v1(struct meminfo *si, ulong last, int s)
-{
-	char slab_s_buf[BUFSIZE];
-	struct kernel_list_head *list_head;
-	unsigned int inuse;
-	ulong s_mem;
-	char *list;
-	int errcnt;
+			if (!sflag) {
+				start = machdep->kvbase;
+				if (machine_type("IA64") &&
+				    (start < machdep->identity_map_base) &&
+				    (kt->stext > start))
+					start = kt->stext;
+				sflag++;
+			}
+			memtype = KVADDR;
+			sflag++;
+			break;
 
-	list = slab_chain_name_v1[s];
+		case 's':
+			if ((sp = symbol_search(optarg)))
+				start = sp->value;
+			else
+				start = htol(optarg, FAULT_ON_ERROR, NULL);
+			sflag++;
+			break;
 
-	errcnt = 0;
+		case 'e':
+                        if ((sp = symbol_search(optarg)))
+                                end = sp->value;
+                        else
+                        	end = htol(optarg, FAULT_ON_ERROR, NULL);
+                        break;
 
-        if (!readmem(si->slab, KVADDR, slab_s_buf,
-            SIZE(slab_s), "slab_s buffer", QUIET|RETURN_ON_ERROR)) {
-                error(INFO, "%s: %s list: bad slab pointer: %lx\n",
-                        si->curname, list, si->slab);
-		return FALSE;
-        }                        
+		case 'l':
+			len = stol(optarg, FAULT_ON_ERROR, NULL);
+			break;
 
-        list_head = (struct kernel_list_head *)
-		(slab_s_buf + OFFSET(slab_s_list));
+		case 'm':
+                        mask = htol(optarg, FAULT_ON_ERROR, NULL);
+                        break;
 
-	if (!IS_KVADDR((ulong)list_head->next) || 
-	    !accessible((ulong)list_head->next)) {
-                error(INFO, "%s: %s list: slab: %lx  bad next pointer: %lx\n",
-                        si->curname, list, si->slab,
-			(ulong)list_head->next);
-		errcnt++;
-	}
+                default:
+                        argerrs++;
+                        break;
+                }
+        }
 
-	if (last && (last != (ulong)list_head->prev)) {
-                error(INFO, "%s: %s list: slab: %lx  bad prev pointer: %lx\n",
-                        si->curname, list, si->slab,
-                        (ulong)list_head->prev);
-		errcnt++;
+	if (XEN_HYPER_MODE()) {
+		memtype = KVADDR;
+		if (!sflag)
+			error(FATAL, 
+				"the \"-s start\" option is required for"
+				" the Xen hypervisor\n");
 	}
 
-	inuse = UINT(slab_s_buf + OFFSET(slab_s_inuse));
-	if (inuse > si->c_num) {
-                error(INFO, "%s: %s list: slab: %lx  bad inuse counter: %ld\n",
-                        si->curname, list, si->slab, inuse);
-		errcnt++;
-	}
+        if (argerrs || !sflag || !args[optind] || (len && end))
+                cmd_usage(pc->curcmd, SYNOPSIS);
 
-	if (!last)
-		goto no_inuse_check_v1;
+	if (!memtype)
+		memtype = vaddr_type(start, CURRENT_CONTEXT());
 
-	switch (s) 
+	switch (memtype)
 	{
-	case 0: /* full -- but can be one singular list */
-                if (VALID_MEMBER(kmem_cache_s_slabs_full) && 
-		    (inuse != si->c_num)) {
-                        error(INFO,
-                            "%s: %s list: slab: %lx  bad inuse counter: %ld\n",
-                                si->curname, list, si->slab, inuse);
-                        errcnt++;
-                }
+	case UVADDR:
+		if (!IS_UVADDR(start, CURRENT_CONTEXT())) {
+			error(INFO, "invalid user virtual address: %lx\n", 
+				start);
+                	cmd_usage(pc->curcmd, SYNOPSIS);
+		}
 		break;
 
-	case 1: /* partial */
-		if ((inuse == 0) || (inuse == si->c_num)) {
-                	error(INFO, 
-		 	    "%s: %s list: slab: %lx  bad inuse counter: %ld\n",
-                        	si->curname,  list, si->slab, inuse);
-			errcnt++;
+	case KVADDR:
+		if (!IS_KVADDR(start)) {
+			error(INFO, "invalid kernel virtual address: %lx\n",
+				start);
+               		cmd_usage(pc->curcmd, SYNOPSIS);
 		}
 		break;
 
-	case 2: /* free */
-		if (inuse > 0) {
-                	error(INFO, 
-		 	    "%s: %s list: slab: %lx  bad inuse counter: %ld\n",
-                        	si->curname, list, si->slab, inuse);
-			errcnt++;
+	case AMBIGUOUS:	
+		error(INFO, 
+		    "ambiguous virtual address: %lx  (requires -u or -k)\n",
+			start);
+               	cmd_usage(pc->curcmd, SYNOPSIS);
+	}
+
+	if (!end && !len) {
+		switch (memtype)
+		{
+		case UVADDR:
+			end = uvaddr_end;
+			break;
+
+		case KVADDR:
+			if (XEN_HYPER_MODE())
+				end = (ulong)(-1);
+			else if (vt->vmalloc_start < machdep->identity_map_base)
+				end = (ulong)(-1);
+			else {
+				meminfo.memtype = KVADDR;
+				meminfo.spec_addr = 0;
+				meminfo.flags = (ADDRESS_SPECIFIED|GET_HIGHEST);
+				dump_vmlist(&meminfo);
+				end = meminfo.retval;
+				if (end < start)
+					end = (ulong)(-1);
+			}
+			break;
+		}
+	} else if (len)  
+		end = start + len;
+
+	switch (memtype)
+	{
+	case UVADDR:
+		if (is_kernel_thread(CURRENT_TASK()) || !task_mm(CURRENT_TASK(), TRUE))
+			error(FATAL, "current context has no user address space\n");
+		if (end > uvaddr_end) {
+			error(INFO, 
+	          "address range starts in user space and ends kernel space\n");
+               		cmd_usage(pc->curcmd, SYNOPSIS);
+		}
+			/* FALLTHROUGH */
+	case KVADDR:
+		if (end < start) {
+			error(INFO, 
+			   "ending address %lx is below starting address %lx\n",
+				end, start);
+               		cmd_usage(pc->curcmd, SYNOPSIS);
 		}
 		break;
 	}
 
-no_inuse_check_v1:
-	s_mem = ULONG(slab_s_buf + OFFSET(slab_s_s_mem));
-	if (!IS_KVADDR(s_mem) || !accessible(s_mem)) {
-                error(INFO, "%s: %s list: slab: %lx  bad s_mem pointer: %lx\n",
-                        si->curname, list, si->slab, s_mem);
-		errcnt++;
+	c = 0;
+	while (args[optind]) {
+		value_array[c] = htol(args[optind], FAULT_ON_ERROR, NULL);
+		c++;
+		optind++;
 	}
 
-	return(errcnt ? FALSE : TRUE);
+	search(start, end, mask, memtype, value_array, c);
 }
 
 /*
- *  Updated for 2.6 slab substructure.
+ *  Do the work for cmd_search().
  */
 
-static char *slab_chain_name_v2[] = {"partial", "full", "free"};
+#define SEARCHMASK(X) ((X) | mask) 
 
 static void
-do_slab_chain_percpu_v2(long cmd, struct meminfo *si)
+search(ulong start, ulong end, ulong mask, int memtype, ulong *value, int vcnt)
 {
-	int i, tmp, s;
-	int list_borked;
-	char *slab_buf;
-	ulong specified_slab;
-	ulong last;
-	ulong slab_chains[SLAB_CHAINS];
-
-	list_borked = 0;
-	si->slabsize = (power(2, si->order) * PAGESIZE());
-	si->cpucached_slab = 0;
-
-	slab_chains[0] = si->cache + OFFSET(kmem_cache_s_lists) +
-		OFFSET(kmem_list3_slabs_partial);
-	slab_chains[1] = si->cache + OFFSET(kmem_cache_s_lists) +
-                OFFSET(kmem_list3_slabs_full);
-        slab_chains[2] = si->cache + OFFSET(kmem_cache_s_lists) +
-                OFFSET(kmem_list3_slabs_free);
-
-        if (CRASHDEBUG(1)) {
-                fprintf(fp, "[ %s: %lx ", si->curname, si->cache);
-                fprintf(fp, "partial: %lx full: %lx free: %lx ]\n",
-                        slab_chains[0], slab_chains[1], slab_chains[2]);
-        }
-
-	switch (cmd)
-	{
-	case SLAB_GET_COUNTS:
-		si->flags |= SLAB_GET_COUNTS;
-		si->flags &= ~SLAB_WALKTHROUGH;
-		si->cpucached_cache = 0;
-        	si->num_slabs = si->inuse = 0;
-		gather_cpudata_list_v2(si); 
-
-		slab_buf = GETBUF(SIZE(slab));
-
-		for (s = 0; s < SLAB_CHAINS; s++) {
-			if (!slab_chains[s])
-				continue;
-
-	                if (!readmem(slab_chains[s],
-	                    KVADDR, &si->slab, sizeof(ulong),
-	                    "first slab", QUIET|RETURN_ON_ERROR)) {
-                                error(INFO, 
-				    "%s: %s list: bad slab pointer: %lx\n",
-                                        si->curname,
-					slab_chain_name_v2[s],
-                                        slab_chains[s]);
-				list_borked = 1;
-				continue;
-			}
-	
-			if (slab_data_saved(si)) {
-				FREEBUF(slab_buf);
-				return;
-			}
-	
-			if (si->slab == slab_chains[s]) 
-				continue;
-	
-			last = slab_chains[s];
+	int i, j;
+	ulong pp, next, *ubp;
+	int wordcnt, lastpage;
+	ulong page;
+	physaddr_t paddr;
+	char *pagebuf;
 
-			do {
-	                        if (received_SIGINT()) {
-					FREEBUF(slab_buf);
-	                                restart(0);
-				}
+	if (start & (sizeof(long)-1)) {
+		start &= ~(sizeof(long)-1);
+		error(INFO, "rounding down start address to: %lx\n", start);
+	}
 
-				if (!verify_slab_v2(si, last, s)) {
-					list_borked = 1;
-					continue;
-				}
-				last = si->slab - OFFSET(slab_list);
-	
-		                readmem(si->slab, KVADDR, slab_buf, 
-					SIZE(slab), "slab buffer", 
-					FAULT_ON_ERROR);
-	
-				tmp = INT(slab_buf + OFFSET(slab_inuse));
-				si->inuse += tmp;
-	
-				if (ACTIVE())
-					gather_cpudata_list_v2(si); 
+	pagebuf = GETBUF(PAGESIZE());
+	next = start;
 
-				si->s_mem = ULONG(slab_buf + 
-					OFFSET(slab_s_mem));
-				gather_slab_cached_count(si);
-	
-				si->num_slabs++;
-		
-				si->slab = ULONG(slab_buf + 
-					OFFSET(slab_list));
-				si->slab -= OFFSET(slab_list);
+	for (pp = VIRTPAGEBASE(start); next < end; next = pp) {
+		lastpage = (VIRTPAGEBASE(next) == VIRTPAGEBASE(end));
+		if (LKCD_DUMPFILE())
+			set_lkcd_nohash();
 
-				/*
-				 *  Check for slab transition. (Tony Dziedzic)
-				 */
-				for (i = 0; i < SLAB_CHAINS; i++) {
-     					if ((i != s) && 
-					    (si->slab == slab_chains[i])) {
-       						error(NOTE, 
-	  	                      "%s: slab chain inconsistency: %s list\n",
-							si->curname,
-							slab_chain_name_v2[s]);
-       						list_borked = 1;
-     					}
-				}
-		
-			} while (si->slab != slab_chains[s] && !list_borked);
+		/*
+		 *  Keep it virtual for Xen hypervisor.
+		 */
+		if (XEN_HYPER_MODE()) {
+                	if (!readmem(pp, KVADDR, pagebuf, PAGESIZE(),
+                    	    "search page", RETURN_ON_ERROR|QUIET)) {
+				if (CRASHDEBUG(1))
+					fprintf(fp, 
+					    "search suspended at: %lx\n", pp);
+				return;
+			}
+			goto virtual;
 		}
 
-		FREEBUF(slab_buf);
-		if (!list_borked)
-			save_slab_data(si);
-		break;
+                switch (memtype)
+                {
+                case UVADDR:
+                        if (!uvtop(CURRENT_CONTEXT(), pp, &paddr, 0) ||
+                            !phys_to_page(paddr, &page)) { 
+				if (!next_upage(CURRENT_CONTEXT(), pp, &pp)) 
+					return;
+                                continue;
+			}
+                        break;
 
-	case SLAB_WALKTHROUGH:
-		specified_slab = si->slab;
-		si->flags |= SLAB_WALKTHROUGH;
-		si->flags &= ~SLAB_GET_COUNTS;
+                case KVADDR:
+                        if (!kvtop(CURRENT_CONTEXT(), pp, &paddr, 0) ||
+                            !phys_to_page(paddr, &page)) {
+				if (!next_kpage(pp, &pp))
+					return;
+                                continue;
+			}
+                        break;
+                }
 
-		for (s = 0; s < SLAB_CHAINS; s++) {
-			if (!slab_chains[s])
-				continue;
+                if (!readmem(paddr, PHYSADDR, pagebuf, PAGESIZE(),
+                    "search page", RETURN_ON_ERROR|QUIET)) {
+			pp += PAGESIZE();
+			continue;
+		}
+virtual:
+		ubp = (ulong *)&pagebuf[next - pp];
+		if (lastpage) {
+			if (end == (ulong)(-1))
+				wordcnt = PAGESIZE()/sizeof(long);
+			else
+				wordcnt = (end - next)/sizeof(long);
+		} else
+			wordcnt = (PAGESIZE() - (next - pp))/sizeof(long);
 
-	        	if (!specified_slab) {
-	                	if (!readmem(slab_chains[s],
-	                            KVADDR, &si->slab, sizeof(ulong),
-	                            "slabs", QUIET|RETURN_ON_ERROR)) {
-                                        error(INFO,
-                                         "%s: %s list: bad slab pointer: %lx\n",
-                                                si->curname,
-						slab_chain_name_v2[s],
-                                                slab_chains[s]);
-					list_borked = 1;
-					continue;
-				}
-				last = slab_chains[s];
-			} else
-				last = 0;
-			
-			if (si->slab == slab_chains[s])
-				continue;
-	
-			if (CRASHDEBUG(1)) {
-				fprintf(fp, "search cache: [%s] ", si->curname);
-				if (si->flags & ADDRESS_SPECIFIED) 
-					fprintf(fp, "for %llx", si->spec_addr);
-				fprintf(fp, "\n");
+		for (i = 0; i < wordcnt; i++, ubp++, next += sizeof(long)) {
+			for (j = 0; j < vcnt; j++) {
+				if (SEARCHMASK(*ubp) == SEARCHMASK(value[j])) 
+					fprintf(fp, "%lx: %lx\n", next, *ubp);
 			}
-	
-		        do {
-	                        if (received_SIGINT())
-	                                restart(0);
-	
-                                if (!verify_slab_v2(si, last, s)) {
-                                        list_borked = 1;
-                                        continue;
-                                }
-                                last = si->slab - OFFSET(slab_list);
-
-		                dump_slab_percpu_v2(si);
-		
-		                if (si->found) {
-					return;
-				}
-		
-		                readmem(si->slab+OFFSET(slab_list),
-		                        KVADDR, &si->slab, sizeof(ulong),
-		                        "slab list", FAULT_ON_ERROR);
-		
-				si->slab -= OFFSET(slab_list);
-	
-		        } while (si->slab != slab_chains[s] && !list_borked);
 		}
 
-		break;
+		if (CRASHDEBUG(1))
+			if ((pp % (1024*1024)) == 0)
+				console("%lx\n", pp);
+
+		pp += PAGESIZE();
 	}
 }
 
+
 /*
- *  Try to preclude any attempt to translate a bogus slab structure.
+ *  Return the next mapped user virtual address page that comes after 
+ *  the passed-in address.
  */
 static int
-verify_slab_v2(struct meminfo *si, ulong last, int s)
+next_upage(struct task_context *tc, ulong vaddr, ulong *nextvaddr)
 {
-	char slab_buf[BUFSIZE];
-	struct kernel_list_head *list_head;
-	unsigned int inuse;
-	ulong s_mem;
-	char *list;
-	int errcnt;
+	ulong vma, total_vm;
+	int found;
+	char *vma_buf;
+        ulong vm_start, vm_end;
+	void *vm_next;
 
-	list = slab_chain_name_v2[s];
+        if (!tc->mm_struct)
+                return FALSE;
 
-	errcnt = 0;
+        fill_mm_struct(tc->mm_struct);
+	vma = ULONG(tt->mm_struct + OFFSET(mm_struct_mmap));
+	total_vm = ULONG(tt->mm_struct + OFFSET(mm_struct_total_vm));
 
-        if (!readmem(si->slab, KVADDR, slab_buf,
-            SIZE(slab), "slab buffer", QUIET|RETURN_ON_ERROR)) {
-                error(INFO, "%s: %s list: bad slab pointer: %lx\n",
-                        si->curname, list, si->slab);
+	if (!vma || (total_vm == 0))
 		return FALSE;
-        }                        
-
-        list_head = (struct kernel_list_head *)(slab_buf + OFFSET(slab_list));
-	if (!IS_KVADDR((ulong)list_head->next) || 
-	    !accessible((ulong)list_head->next)) {
-                error(INFO, "%s: %s list: slab: %lx  bad next pointer: %lx\n",
-                        si->curname, list, si->slab,
-			(ulong)list_head->next);
-		errcnt++;
-	}
-
-	if (last && (last != (ulong)list_head->prev)) {
-                error(INFO, "%s: %s list: slab: %lx  bad prev pointer: %lx\n",
-                        si->curname, list, si->slab,
-                        (ulong)list_head->prev);
-		errcnt++;
-	}
-
-	inuse = UINT(slab_buf + OFFSET(slab_inuse));
-	if (inuse > si->c_num) {
-                error(INFO, "%s: %s list: slab: %lx  bad inuse counter: %ld\n",
-                        si->curname, list, si->slab, inuse);
-		errcnt++;
-	}
-
-	if (!last)
-		goto no_inuse_check_v2;
 
-	switch (s) 
-	{
-	case 0: /* partial */
-                if ((inuse == 0) || (inuse == si->c_num)) {
-                	error(INFO, 
-		 	    "%s: %s list: slab: %lx  bad inuse counter: %ld\n",
-                        	si->curname, list, si->slab, inuse);
-			errcnt++;
-		}
-		break;
+	vaddr = VIRTPAGEBASE(vaddr) + PAGESIZE();  /* first possible page */
 
-	case 1: /* full */
-		if (inuse != si->c_num) {
-                	error(INFO, 
-		 	    "%s: %s list: slab: %lx  bad inuse counter: %ld\n",
-                        	si->curname, list, si->slab, inuse);
-			errcnt++;
-		}
-		break;
+        for (found = FALSE; vma; vma = (ulong)vm_next) {
+                vma_buf = fill_vma_cache(vma);
 
-	case 2: /* free */
-		if (inuse > 0) {
-                	error(INFO, 
-		 	    "%s: %s list: slab: %lx  bad inuse counter: %ld\n",
-                        	si->curname, list, si->slab, inuse);
-			errcnt++;
-		}
-		break;
-	}
+                vm_start = ULONG(vma_buf + OFFSET(vm_area_struct_vm_start));
+                vm_end = ULONG(vma_buf + OFFSET(vm_area_struct_vm_end));
+                vm_next = VOID_PTR(vma_buf + OFFSET(vm_area_struct_vm_next));
 
-no_inuse_check_v2:
-	s_mem = ULONG(slab_buf + OFFSET(slab_s_mem));
-	if (!IS_KVADDR(s_mem) || !accessible(s_mem)) {
-                error(INFO, "%s: %s list: slab: %lx  bad s_mem pointer: %lx\n",
-                        si->curname, list, si->slab, s_mem);
-		errcnt++;
+		if (vaddr <= vm_start) {
+			*nextvaddr = vm_start;
+			return TRUE;
+		}
+
+		if ((vaddr > vm_start) && (vaddr < vm_end)) {
+			*nextvaddr = vaddr;
+			return TRUE;
+		}
 	}
 
-	return(errcnt ? FALSE : TRUE);
+	return FALSE;
 }
 
 /*
- *  If it's a dumpfile, save the essential slab data to avoid re-reading 
- *  the whole slab chain more than once.  This may seem like overkill, but
- *  if the problem is a memory leak, or just the over-use of the buffer_head
- *  cache, it's painful to wait each time subsequent kmem -s or -i commands
- *  simply need the basic slab counts.
+ *  Return the next mapped kernel virtual address in the vmlist
+ *  that is equal to or comes after the passed-in address.
  */
-struct slab_data {
-	ulong cache_addr;
-	int num_slabs;
-	int inuse;
-	ulong cpucached_cache;
-};
-
-#define NO_SLAB_DATA ((void *)(-1))
-
-static void 
-save_slab_data(struct meminfo *si)
+static ulong
+next_vmlist_vaddr(ulong vaddr)
 {
-	int i;
+	ulong i, count;
+	struct meminfo meminfo, *mi;
 
-	if (ACTIVE())
-		return;
+	mi = &meminfo;
+	BZERO(mi, sizeof(struct meminfo));
 
-	if (vt->slab_data == NO_SLAB_DATA)
-		return;
+        mi->flags = GET_VMLIST_COUNT;
+        dump_vmlist(mi);
+	count = mi->retval;
 
-	if (!vt->slab_data) {
-        	if (!(vt->slab_data = (struct slab_data *)
-            	    malloc(sizeof(struct slab_data) * vt->kmem_cache_count))) {
-                	error(INFO, "cannot malloc slab_data table");
-			vt->slab_data = NO_SLAB_DATA;
-			return;
-		}
-		for (i = 0; i < vt->kmem_cache_count; i++) {
-			vt->slab_data[i].cache_addr = (ulong)NO_SLAB_DATA;
-			vt->slab_data[i].num_slabs = 0;
-			vt->slab_data[i].inuse = 0;
-			vt->slab_data[i].cpucached_cache = 0;
-		}
-	}
+	if (!count)
+		return vaddr;
 
-	for (i = 0; i < vt->kmem_cache_count; i++) {
-		if (vt->slab_data[i].cache_addr == si->cache) 
-			break;
+	mi->vmlist = (struct vmlist *)GETBUF(sizeof(struct vmlist)*count);
+        mi->flags = GET_VMLIST;
+        dump_vmlist(mi);
 
-		if (vt->slab_data[i].cache_addr == (ulong)NO_SLAB_DATA) {
-			vt->slab_data[i].cache_addr = si->cache; 
-			vt->slab_data[i].num_slabs = si->num_slabs; 
-			vt->slab_data[i].inuse = si->inuse; 
-			vt->slab_data[i].cpucached_cache = si->cpucached_cache;
+	for (i = 0; i < count; i++) {
+		if (vaddr <= mi->vmlist[i].addr) {
+			vaddr = mi->vmlist[i].addr;
 			break;
 		}
+		if (vaddr < (mi->vmlist[i].addr + mi->vmlist[i].size))
+			break;
 	}
+
+	FREEBUF(mi->vmlist);
+
+	return vaddr;
 }
 
-static int 
-slab_data_saved(struct meminfo *si)
+
+/*
+ *  Return the next kernel virtual address page that comes after
+ *  the passed-in, untranslatable, address.
+ */
+static int
+next_kpage(ulong vaddr, ulong *nextvaddr)
 {
-	int i;
+        ulong vaddr_orig;
 
-	if (ACTIVE() || !vt->slab_data || (vt->slab_data == NO_SLAB_DATA)) 
-		return FALSE;
+	vaddr_orig = vaddr;
+	vaddr = VIRTPAGEBASE(vaddr) + PAGESIZE();  /* first possible page */
 
-	for (i = 0; i < vt->kmem_cache_count; i++) {
-		if (vt->slab_data[i].cache_addr == si->cache) {
-			si->inuse = vt->slab_data[i].inuse;
-			si->num_slabs = vt->slab_data[i].num_slabs;
-			si->cpucached_cache = vt->slab_data[i].cpucached_cache;
+        if (vaddr < vaddr_orig)  /* wrapped back to zero? */
+                return FALSE;
+
+	if (IS_VMALLOC_ADDR(vaddr_orig) || 
+	    (machine_type("IA64") && IS_VMALLOC_ADDR(vaddr))) {
+
+		if (IS_VMALLOC_ADDR(vaddr) && 
+		    (vaddr < last_vmalloc_address())) {
+			if (machine_type("X86_64")) 
+				vaddr = next_vmlist_vaddr(vaddr);
+			*nextvaddr = vaddr;
+			return TRUE;
+		}
+
+		if (vt->vmalloc_start < machdep->identity_map_base) {   
+			*nextvaddr = machdep->identity_map_base;
 			return TRUE;
 		}
+
+		return FALSE;	
 	}
 
-	return FALSE;
+	if (next_identity_mapping(vaddr, nextvaddr))
+                return TRUE;
+
+	if (vt->vmalloc_start > vaddr) {
+		*nextvaddr = vt->vmalloc_start;
+		return TRUE;
+	} else
+        	return FALSE;
 }
 
-static void
-dump_saved_slab_data(void)
+/*
+ *  Display swap statistics.
+ */
+void
+cmd_swap(void)
 {
-	int i;
+        int c;
 
-	if (!vt->slab_data || (vt->slab_data == NO_SLAB_DATA))
-		return;
+        while ((c = getopt(argcnt, args, "")) != EOF) {
+                switch(c)
+                {
+                default:
+                        argerrs++;
+                        break;
+                }
+        }
 
-	for (i = 0; i < vt->kmem_cache_count; i++) {
-		if (vt->slab_data[i].cache_addr == (ulong)NO_SLAB_DATA)
-			break;
+        if (argerrs)
+                cmd_usage(pc->curcmd, SYNOPSIS);
 
-		fprintf(fp, 
-             "     cache: %lx inuse: %5d num_slabs: %3d cpucached_cache: %ld\n",
-			vt->slab_data[i].cache_addr,
-			vt->slab_data[i].inuse,
-			vt->slab_data[i].num_slabs,
-			vt->slab_data[i].cpucached_cache);
-	}
+	dump_swap_info(VERBOSE, NULL, NULL);
 }
 
 /*
- *  Dump the contents of a kmem slab.
+ *  Do the work for cmd_swap().
  */
 
-static void
-dump_slab(struct meminfo *si)
+#define SWP_USED        1
+#define SWAP_MAP_BAD    0x8000
+
+char *swap_info_hdr = \
+"FILENAME           TYPE         SIZE      USED   PCT  PRIORITY\n";
+
+static int
+dump_swap_info(ulong swapflags, ulong *totalswap_pages, ulong *totalused_pages)
 {
-	uint16_t s_offset;
+	int i, j;
+	int flags, swap_device, pages, prio, usedswap;
+	ulong swap_file, max, swap_map, pct;
+	ulong vfsmnt;
+	ulong swap_info;
+	ushort *map;
+	ulong totalswap, totalused;
+	char buf[BUFSIZE];
 
-	si->s_mem = ULONG(si->slab_buf + OFFSET(kmem_slab_s_s_mem));
-	si->s_mem = PTOB(BTOP(si->s_mem));
+	if (!symbol_exists("nr_swapfiles"))
+		error(FATAL, "nr_swapfiles doesn't exist in this kernel!\n");
 
-        if (si->flags & ADDRESS_SPECIFIED)  {
-                if (INSLAB(si->slab, si) && (si->spec_addr >= si->slab) &&
-                    (si->spec_addr < (si->slab+SIZE(kmem_slab_s)))){
-                	si->found = KMEM_SLAB_ADDR;
-                        return;
-                }
-		if (INSLAB(si->spec_addr, si))
-			si->found = KMEM_ON_SLAB;  /* But don't return yet... */
-		else
-			return;
-        }
+        if (!symbol_exists("swap_info"))
+                error(FATAL, "swap_info doesn't exist in this kernel!\n");
 
-	si->s_freep = VOID_PTR(si->slab_buf + OFFSET(kmem_slab_s_s_freep));
-	si->s_inuse = ULONG(si->slab_buf + OFFSET(kmem_slab_s_s_inuse));
-	si->s_index = ULONG_PTR(si->slab_buf + OFFSET(kmem_slab_s_s_index));
-	s_offset = USHORT(si->slab_buf + OFFSET(kmem_slab_s_s_offset));
+	swap_info = symbol_value("swap_info");
 
-	if (!(si->flags & ADDRESS_SPECIFIED)) {
-		fprintf(fp, slab_hdr);
-		DUMP_SLAB_INFO();
-	}
+	if (swapflags & VERBOSE)
+		fprintf(fp, swap_info_hdr);
 
-	dump_slab_objects(si);
-}
+	totalswap = totalused = 0;
 
-/*
- *  dump_slab() adapted for newer percpu slab format.
- */
+	for (i = 0; i < vt->nr_swapfiles; i++, 
+	    swap_info += SIZE(swap_info_struct)) {
+		fill_swap_info(swap_info);
 
-static void
-dump_slab_percpu_v1(struct meminfo *si)
-{
-	int tmp;
+		flags = INT(vt->swap_info_struct + 
+			OFFSET(swap_info_struct_flags));
 
-        readmem(si->slab+OFFSET(slab_s_s_mem),
-                KVADDR, &si->s_mem, sizeof(ulong),
-                "s_mem", FAULT_ON_ERROR);
+		if (!(flags & SWP_USED))
+			continue;
 
-	/*
-	 * Include the array of kmem_bufctl_t's appended to slab.
-	 */
-	tmp = SIZE(slab_s) + (SIZE(kmem_bufctl_t) * si->c_num);
+		swap_file = ULONG(vt->swap_info_struct + 
+			OFFSET(swap_info_struct_swap_file));
 
-        if (si->flags & ADDRESS_SPECIFIED)  {
-                if (INSLAB_PERCPU(si->slab, si) && 
-		    (si->spec_addr >= si->slab) &&
-                    (si->spec_addr < (si->slab+tmp))) {
-			if (si->spec_addr >= (si->slab + SIZE(slab_s)))
-				si->found = KMEM_BUFCTL_ADDR;
-			else
-                		si->found = KMEM_SLAB_ADDR;
-                } else if (INSLAB_PERCPU(si->spec_addr, si))
-			si->found = KMEM_ON_SLAB;  /* But don't return yet... */
+                swap_device = INT(vt->swap_info_struct +
+                        OFFSET_OPTION(swap_info_struct_swap_device, 
+			swap_info_struct_old_block_size));
+
+                pages = INT(vt->swap_info_struct +
+                        OFFSET(swap_info_struct_pages));
+
+		totalswap += pages;
+		pages <<= (PAGESHIFT() - 10);
+
+                prio = INT(vt->swap_info_struct + 
+			OFFSET(swap_info_struct_prio));
+
+		if (MEMBER_SIZE("swap_info_struct", "max") == sizeof(int))
+			max = UINT(vt->swap_info_struct +
+                                OFFSET(swap_info_struct_max));
 		else
-			return;
-        }
+                	max = ULONG(vt->swap_info_struct +
+                        	OFFSET(swap_info_struct_max));
+
+                swap_map = ULONG(vt->swap_info_struct +
+                        OFFSET(swap_info_struct_swap_map));
+
+		if (swap_file) {
+			if (VALID_MEMBER(swap_info_struct_swap_vfsmnt)) {
+                		vfsmnt = ULONG(vt->swap_info_struct +
+                        		OFFSET(swap_info_struct_swap_vfsmnt));
+				get_pathname(swap_file, buf, BUFSIZE, 
+					1, vfsmnt);
+			} else if (VALID_MEMBER
+				(swap_info_struct_old_block_size)) {
+				get_pathname(file_to_dentry(swap_file), 
+					buf, BUFSIZE, 1, file_to_vfsmnt(swap_file));
+			} else {
+				get_pathname(swap_file, buf, BUFSIZE, 1, 0);
+			}
+		} else
+			sprintf(buf, "(unknown)");
+
+		map = (ushort *)GETBUF(sizeof(ushort) * max);
+
+		if (!readmem(swap_map, KVADDR, map, 
+		    sizeof(ushort) * max, "swap_info swap_map data",
+		    RETURN_ON_ERROR|QUIET)) {
+			if (swapflags & RETURN_ON_ERROR) {
+				*totalswap_pages = swap_map;
+				*totalused_pages = i;
+				return FALSE;
+			} else 
+				error(FATAL, 
+		              "swap_info[%d].swap_map at %lx is unaccessible\n",
+                        		i, swap_map);
+		}
 
-        readmem(si->slab+OFFSET(slab_s_inuse),
-                KVADDR, &tmp, sizeof(int),
-                "inuse", FAULT_ON_ERROR);
-	si->s_inuse = tmp;
+		usedswap = 0;
+                for (j = 0; j < max; j++) {
+                        switch (map[j])
+                        {
+                        case SWAP_MAP_BAD:
+                        case 0:
+                                continue;
+                        default:
+                                usedswap++;
+                        }
+		}
 
-        readmem(si->slab+OFFSET(slab_s_free),
-                KVADDR, &si->free, SIZE(kmem_bufctl_t),
-                "kmem_bufctl_t", FAULT_ON_ERROR);
+		FREEBUF(map);
 
-	gather_slab_free_list_percpu(si);
-	gather_slab_cached_count(si);
+		totalused += usedswap;
+		usedswap <<= (PAGESHIFT() - 10);
+		pct = (usedswap * 100)/pages;
 
-	if (!(si->flags & ADDRESS_SPECIFIED)) {
-		fprintf(fp, slab_hdr);
-		DUMP_SLAB_INFO();
+		if (swapflags & VERBOSE)
+			fprintf(fp, "%-15s  %s    %7dk %7dk  %2ld%%     %d\n", 
+				buf, swap_device ? "PARTITION" : "  FILE   ", 
+				pages, usedswap, pct, prio);
 	}
 
-	dump_slab_objects_percpu(si);
-}
+	if (totalswap_pages)
+		*totalswap_pages = totalswap;
+	if (totalused_pages)
+		*totalused_pages = totalused;
 
+	return TRUE;
+}
 
 /*
- *  Updated for 2.6 slab substructure.
+ *  Translate a PTE into a swap device and offset string.
  */
-static void
-dump_slab_percpu_v2(struct meminfo *si)
+char *
+swap_location(ulonglong pte, char *buf)
 {
-	int tmp;
-
-        readmem(si->slab+OFFSET(slab_s_mem),
-                KVADDR, &si->s_mem, sizeof(ulong),
-                "s_mem", FAULT_ON_ERROR);
-
-	/*
-	 * Include the array of kmem_bufctl_t's appended to slab.
-	 */
-	tmp = SIZE(slab) + (SIZE(kmem_bufctl_t) * si->c_num);
-
-        if (si->flags & ADDRESS_SPECIFIED)  {
-                if (INSLAB_PERCPU(si->slab, si) && 
-		    (si->spec_addr >= si->slab) &&
-                    (si->spec_addr < (si->slab+tmp))) {
-			if (si->spec_addr >= (si->slab + SIZE(slab)))
-				si->found = KMEM_BUFCTL_ADDR;
-			else
-                		si->found = KMEM_SLAB_ADDR;
-                } else if (INSLAB_PERCPU(si->spec_addr, si))
-			si->found = KMEM_ON_SLAB;  /* But don't return yet... */
-		else
-			return;
-        }
-
-        readmem(si->slab+OFFSET(slab_inuse),
-                KVADDR, &tmp, sizeof(int),
-                "inuse", FAULT_ON_ERROR);
-	si->s_inuse = tmp;
-
-        readmem(si->slab+OFFSET(slab_free),
-                KVADDR, &si->free, SIZE(kmem_bufctl_t),
-                "kmem_bufctl_t", FAULT_ON_ERROR);
+	char swapdev[BUFSIZE];
 
-	gather_slab_free_list_percpu(si);
-	gather_slab_cached_count(si);
+        if (!pte)
+                return NULL;
 
-	if (!(si->flags & ADDRESS_SPECIFIED)) {
-		fprintf(fp, slab_hdr);
-		DUMP_SLAB_INFO();
-	}
+	if (THIS_KERNEL_VERSION >= LINUX(2,6,0))
+		sprintf(buf, "%s  OFFSET: %lld", 
+			get_swapdev(__swp_type(pte), swapdev), __swp_offset(pte));
+	else
+		sprintf(buf, "%s  OFFSET: %llx", 
+			get_swapdev(SWP_TYPE(pte), swapdev), SWP_OFFSET(pte));
 
-	dump_slab_objects_percpu(si);
+        return buf;
 }
 
-
-
 /*
- *  Gather the free objects in a slab into the si->addrlist, checking for
- *  specified addresses that are in-slab kmem_bufctls, and making error checks 
- *  along the way.  Object address checks are deferred to dump_slab_objects().
+ *  Given the type field from a PTE, return the name of the swap device.
  */
-
-#define INOBJECT(addr, obj) ((addr >= obj) && (addr < (obj+si->size)))
-
-static void
-gather_slab_free_list(struct meminfo *si)
+static char *
+get_swapdev(ulong type, char *buf)
 {
-	ulong *next, obj;
-	ulong expected, cnt;
-
-	BNEG(si->addrlist, sizeof(ulong) * (si->c_num+1));
+	unsigned int i, swap_info_len;
+	ulong swap_info, swap_file;
+	ulong vfsmnt;
 
-	if (!si->s_freep)
-		return;
+        if (!symbol_exists("nr_swapfiles"))
+                error(FATAL, "nr_swapfiles doesn't exist in this kernel!\n");
 
-	cnt = 0;
-	expected = si->c_num - si->s_inuse;
+        if (!symbol_exists("swap_info"))
+                error(FATAL, "swap_info doesn't exist in this kernel!\n");
 
-	next = si->s_freep; 
-	do {
+        swap_info = symbol_value("swap_info");
 
-		if (cnt == si->c_num) {
-			error(INFO, 
-		     "\"%s\" cache: too many objects found in slab free list\n",
-				si->curname);
-			si->errors++;
-			return;
-		}
+	swap_info_len = (i = ARRAY_LENGTH(swap_info)) ?
+		i : get_array_length("swap_info", NULL, 0);
 
-		/*
-                 *  Off-slab kmem_bufctls are contained in arrays of object 
-		 *  pointers that point to:
-	         *    1. next kmem_bufctl (or NULL) if the object is free.
-	         *    2. to the object if it the object is in use.
-                 *
-	 	 *  On-slab kmem_bufctls resides just after the object itself,
-	         *  and point to:
-	         *    1. next kmem_bufctl (or NULL) if object is free.
-	         *    2. the containing slab if the object is in use.
-		 */
+        sprintf(buf, "(unknown swap location)");
 
-	        if (si->c_flags & SLAB_CFLGS_BUFCTL) 
-                	obj = si->s_mem + ((next - si->s_index) * si->c_offset);
-		else 
-			obj = (ulong)next - si->c_offset;
+	if (type >= swap_info_len)
+		return buf;
 
-		si->addrlist[cnt] = obj; 
+	swap_info += (SIZE(swap_info_struct) * type);
+	fill_swap_info(swap_info);
+	swap_file = ULONG(vt->swap_info_struct + 
+		OFFSET(swap_info_struct_swap_file));
 
-		if (si->flags & ADDRESS_SPECIFIED) {
-			if (INSLAB(next, si) && 
-		            (si->spec_addr >= (ulong)next) &&
-			    (si->spec_addr < (ulong)(next + 1))) {
-				si->found = KMEM_BUFCTL_ADDR;
-				return;
-			}
+        if (swap_file) {
+		if (VALID_MEMBER(swap_info_struct_swap_vfsmnt)) {
+			vfsmnt = ULONG(vt->swap_info_struct + 
+				OFFSET(swap_info_struct_swap_vfsmnt));
+        		get_pathname(swap_file, buf, BUFSIZE, 1, vfsmnt);
+                } else if (VALID_MEMBER (swap_info_struct_old_block_size)) {
+                        get_pathname(file_to_dentry(swap_file),
+                        	buf, BUFSIZE, 1, 0);
+		} else {
+        		get_pathname(swap_file, buf, BUFSIZE, 1, 0);
 		}
+        } 
 
-		cnt++;
+	return buf;
+}
 
-		if (!INSLAB(obj, si)) {
-			error(INFO, 
-		       "\"%s\" cache: address not contained within slab: %lx\n",
-				si->curname, obj);
-			si->errors++;
-		}
+/*
+ *  If not currently stashed, cache the passed-in swap_info_struct.
+ */
+static void
+fill_swap_info(ulong swap_info)
+{
+	if (vt->last_swap_read == swap_info)
+		return;
 
-        	readmem((ulong)next, KVADDR, &next, sizeof(void *),
-                	"s_freep chain entry", FAULT_ON_ERROR);
-	} while (next); 
+	if (!vt->swap_info_struct && !(vt->swap_info_struct = (char *)
+        	malloc(SIZE(swap_info_struct))))
+			error(FATAL, "cannot malloc swap_info_struct space\n");
+	
+        readmem(swap_info, KVADDR, vt->swap_info_struct, SIZE(swap_info_struct),
+                "fill_swap_info", FAULT_ON_ERROR);
 
-	if (cnt != expected) {
-		error(INFO, 
-	       "\"%s\" cache: free object mismatch: expected: %ld found: %ld\n",
-			si->curname, expected, cnt); 
-		si->errors++;
-	}
+	vt->last_swap_read = swap_info;
 }
 
-
 /*
- *  gather_slab_free_list() adapted for newer percpu slab format.
+ *  If active, clear references to the swap_info references.
  */
+void
+clear_swap_info_cache(void)
+{
+	if (ACTIVE())
+		vt->last_swap_read = 0;
+}
 
-#define BUFCTL_END 0xffffFFFF
 
-static void
-gather_slab_free_list_percpu(struct meminfo *si)
+/*
+ *  Translage a vm_area_struct and virtual address into a filename
+ *  and offset string.
+ */ 
+
+#define PAGE_CACHE_SHIFT  (machdep->pageshift) /* This is supposed to change! */
+
+static char *
+vma_file_offset(ulong vma, ulong vaddr, char *buf)
 {
-	int i;
-	ulong obj;
-	ulong expected, cnt;
-	int free_index;
-	ulong kmembp;
-	short *kbp;
+	ulong vm_file, vm_start, vm_offset, vm_pgoff, dentry, offset;
+	ulong vfsmnt;
+	char file[BUFSIZE];
+	char *vma_buf, *file_buf;
 
-	BNEG(si->addrlist, sizeof(ulong) * (si->c_num+1));
+	if (!vma)
+		return NULL;
 
-	if (CRASHDEBUG(1)) 
-		fprintf(fp, "slab: %lx si->s_inuse: %ld si->c_num: %ld\n", 
-			si->slab, si->s_inuse, si->c_num);
+        vma_buf = fill_vma_cache(vma);
+
+        vm_file = ULONG(vma_buf + OFFSET(vm_area_struct_vm_file));
+
+	if (!vm_file) 
+		goto no_file_offset;
+
+        file_buf = fill_file_cache(vm_file);
+        dentry = ULONG(file_buf + OFFSET(file_f_dentry));
+
+	if (!dentry) 
+		goto no_file_offset;
 
-	if (si->s_inuse == si->c_num )
-		return;
+	file[0] = NULLCHAR;
+	if (VALID_MEMBER(file_f_vfsmnt)) {
+        	vfsmnt = ULONG(file_buf + OFFSET(file_f_vfsmnt));
+               	get_pathname(dentry, file, BUFSIZE, 1, vfsmnt);
+	} else 
+               	get_pathname(dentry, file, BUFSIZE, 1, 0);
 
-	kmembp = si->slab + SIZE_OPTION(slab_s, slab);
-        readmem((ulong)kmembp, KVADDR, si->kmem_bufctl, 
-		SIZE(kmem_bufctl_t) * si->c_num,
-                "kmem_bufctl array", FAULT_ON_ERROR);
+	if (!strlen(file)) 
+		goto no_file_offset;
 
-	if (CRASHDEBUG(1)) {
-		for (i = 0; (SIZE(kmem_bufctl_t) == sizeof(int)) && 
-		     (i < si->c_num); i++) 
-			fprintf(fp, "%d ", si->kmem_bufctl[i]);
+        vm_start = ULONG(vma_buf + OFFSET(vm_area_struct_vm_start));
 
-		for (kbp = (short *)&si->kmem_bufctl[0], i = 0; 
-		     (SIZE(kmem_bufctl_t) == sizeof(short)) && (i < si->c_num);
-		     i++) 
-			fprintf(fp, "%d ", *(kbp + i));
+	vm_offset = vm_pgoff = 0xdeadbeef;
 
-		fprintf(fp, "\n");
+	if (VALID_MEMBER(vm_area_struct_vm_offset)) 
+        	vm_offset = ULONG(vma_buf + 
+			OFFSET(vm_area_struct_vm_offset));
+	else if (VALID_MEMBER(vm_area_struct_vm_pgoff))
+        	vm_pgoff = ULONG(vma_buf + 
+			OFFSET(vm_area_struct_vm_pgoff));
+	else 
+		goto no_file_offset;
+
+	if (vm_offset != 0xdeadbeef) 
+		offset = VIRTPAGEBASE(vaddr) - vm_start + vm_offset;
+	else if (vm_pgoff != 0xdeadbeef) {
+		offset = ((vaddr - vm_start) >> PAGE_CACHE_SHIFT) + vm_pgoff;
+		offset <<= PAGE_CACHE_SHIFT;
 	}
 
-	cnt = 0;
-	expected = si->c_num - si->s_inuse;
+	sprintf(buf, "%s  OFFSET: %lx", file, offset);
 
-	if (SIZE(kmem_bufctl_t) == sizeof(int)) {
-		for (free_index = si->free; free_index != BUFCTL_END;
-		     free_index = si->kmem_bufctl[free_index]) {
-	
-	                if (cnt == si->c_num) {
-	                        error(INFO,
-                     "\"%s\" cache: too many objects found in slab free list\n",
-	                                si->curname);
-	                        si->errors++;
-	                        return;
-	                }
-	
-			obj = si->s_mem + (free_index*si->size);
-			si->addrlist[cnt] = obj; 
-			cnt++;
-		}
-	} else if (SIZE(kmem_bufctl_t) == sizeof(short)) {
-		kbp = (short *)&si->kmem_bufctl[0];
+	return buf;
 
-                for (free_index = si->free; free_index != BUFCTL_END;
-                     free_index = (int)*(kbp + free_index)) {
+no_file_offset:
+	return NULL;
+}
 
-                        if (cnt == si->c_num) {
-                                error(INFO,
-                     "\"%s\" cache: too many objects found in slab free list\n",                                        si->curname);
-                                si->errors++;
-                                return;
-                        }
+/*
+ *  Translate a PTE into its physical address and flags.
+ */
+void
+cmd_pte(void)
+{
+        int c;
+	ulonglong pte;
 
-                        obj = si->s_mem + (free_index*si->size);
-                        si->addrlist[cnt] = obj;
-                        cnt++;
+        while ((c = getopt(argcnt, args, "")) != EOF) {
+                switch(c)
+                {
+                default:
+                        argerrs++;
+                        break;
                 }
-	} else 
-		error(FATAL, 
-                "size of kmem_bufctl_t (%d) not sizeof(int) or sizeof(short)\n",
-			SIZE(kmem_bufctl_t));
+        }
 
-	if (cnt != expected) {
-		error(INFO, 
-	       "\"%s\" cache: free object mismatch: expected: %ld found: %ld\n",
-			si->curname, expected, cnt); 
-		si->errors++;
+        if (argerrs)
+                cmd_usage(pc->curcmd, SYNOPSIS);
+
+	while (args[optind]) {
+		pte = htoll(args[optind], FAULT_ON_ERROR, NULL);
+		machdep->translate_pte((ulong)pte, NULL, pte);
+		optind++;
 	}
-}
 
+}
 
+static char *node_zone_hdr = "ZONE  NAME         SIZE";
 
 /*
- *  Dump the FREE, [ALLOCATED] and <CACHED> objects of a slab.
- */  
-
-#define DUMP_SLAB_OBJECT() \
-        for (j = on_free_list = 0; j < si->c_num; j++) {	\
-                if (obj == si->addrlist[j]) {			\
-                        on_free_list = TRUE;			\
-                        break;					\
-                }						\
-        }							\
-								\
-        if (on_free_list) {					\
-                if (!(si->flags & ADDRESS_SPECIFIED))		\
-                        fprintf(fp, "   %lx\n", obj);		\
-                if (si->flags & ADDRESS_SPECIFIED) {		\
-                        if (INOBJECT(si->spec_addr, obj)) {	\
-                                si->found =			\
-                                    KMEM_OBJECT_ADDR_FREE;	\
-                                return;				\
-                        }					\
-                }						\
-        } else {						\
-                if (!(si->flags & ADDRESS_SPECIFIED))		\
-                        fprintf(fp, "  [%lx]\n", obj);		\
-                cnt++;						\
-                if (si->flags & ADDRESS_SPECIFIED) {		\
-                        if (INOBJECT(si->spec_addr, obj)) {	\
-                                si->found =			\
-                                    KMEM_OBJECT_ADDR_INUSE;	\
-                                return;				\
-                        }					\
-                }						\
-        }
-
+ *  On systems supporting memory nodes, display the basic per-node data.
+ */
 static void
-dump_slab_objects(struct meminfo *si)
+dump_memory_nodes(int initialize)
 {
 	int i, j;
-	ulong *next;
-	int on_free_list; 
-	ulong cnt, expected;
-	ulong bufctl, obj;
-
-	gather_slab_free_list(si);
-
-	if ((si->flags & ADDRESS_SPECIFIED) && (si->found & ~KMEM_ON_SLAB))
-		return;
-
-        cnt = 0;
-        expected = si->s_inuse;
+	int n, id, node, flen, slen, badaddr;
+	ulong node_mem_map;
+        ulong node_start_paddr;
+	ulong node_start_pfn;
+        ulong node_start_mapnr;
+	ulong node_spanned_pages, node_present_pages;
+        ulong free_pages, zone_size, node_size, cum_zone_size;
+	ulong zone_start_paddr, zone_start_mapnr, zone_mem_map;
+	physaddr_t phys;
+	ulong pp;
+	ulong zone_start_pfn;
+	ulong bdata;
+	ulong pgdat;
+	ulong node_zones;
+	ulong value;
+	char buf1[BUFSIZE];
+	char buf2[BUFSIZE];
+	char buf3[BUFSIZE];
+	char buf4[BUFSIZE];
+	char buf5[BUFSIZE];
+	struct node_table *nt;
 
-        if (CRASHDEBUG(1))
-                for (i = 0; i < si->c_num; i++) {
-                        fprintf(fp, "si->addrlist[%d]: %lx\n", 
-				i, si->addrlist[i]);
+	if (!(vt->flags & (NODES|NODES_ONLINE)) && initialize) {
+		nt = &vt->node_table[0];
+		nt->node_id = 0;
+		if (symbol_exists("contig_page_data"))
+			nt->pgdat = symbol_value("contig_page_data");
+                else
+			nt->pgdat = 0;
+		nt->size = vt->total_pages;
+		nt->mem_map = vt->mem_map;
+		nt->start_paddr = 0;
+		nt->start_mapnr = 0;
+                if (CRASHDEBUG(1)) {
+                        fprintf(fp, "node_table[%d]: \n", 0);
+                        fprintf(fp, "             id: %d\n", nt->node_id);
+                        fprintf(fp, "          pgdat: %lx\n", nt->pgdat);
+                        fprintf(fp, "           size: %ld\n", nt->size);
+                        fprintf(fp, "        present: %ld\n", nt->present);
+                        fprintf(fp, "        mem_map: %lx\n", nt->mem_map);
+                        fprintf(fp, "    start_paddr: %llx\n", nt->start_paddr);
+                        fprintf(fp, "    start_mapnr: %ld\n", nt->start_mapnr);
                 }
+		return;
+	}
 
-        if (!(si->flags & ADDRESS_SPECIFIED)) 
-		fprintf(fp, free_inuse_hdr);
-
-        /* For on-slab bufctls, c_offset is the distance between the start of
-         * an obj and its related bufctl.  For off-slab bufctls, c_offset is
-         * the distance between objs in the slab.
-         */
-
-        if (si->c_flags & SLAB_CFLGS_BUFCTL) {
-		for (i = 0, next = si->s_index; i < si->c_num; i++, next++){
-                	obj = si->s_mem + 
-				((next - si->s_index) * si->c_offset);
-			DUMP_SLAB_OBJECT();
-		}
-	} else {
+	if (initialize) {
+		pgdat = UNINITIALIZED;
 		/*
-		 *  Get the "real" s_mem, i.e., without the offset stripped off.
-		 *  It contains the address of the first object.
+		 *  This order may have to change based upon architecture...
 		 */
-        	readmem(si->slab+OFFSET(kmem_slab_s_s_mem),
-                	KVADDR, &obj, sizeof(ulong),
-                	"s_mem", FAULT_ON_ERROR);
+		if (symbol_exists("pgdat_list") && 
+		    (VALID_MEMBER(pglist_data_node_next) || 
+		     VALID_MEMBER(pglist_data_pgdat_next))) {
+                        get_symbol_data("pgdat_list", sizeof(void *), &pgdat);
+			vt->flags &= ~NODES_ONLINE;
+		} else if (vt->flags & NODES_ONLINE) {
+			if ((node = next_online_node(0)) < 0) {
+				error(WARNING, 
+				   "cannot determine first node from node_online_map\n\n");
+				return;
+			} 
+			if (!(pgdat = next_online_pgdat(node))) { 
+				error(WARNING, 
+				   "cannot determine pgdat list for this kernel/architecture\n\n");
+				return;
+			}
+		} 
+	} else
+		pgdat = vt->node_table[0].pgdat;
 
-		for (i = 0; i < si->c_num; i++) {
-			DUMP_SLAB_OBJECT();
+	if (initialize && (pgdat == UNINITIALIZED)) {
+		error(WARNING, "cannot initialize pgdat list\n\n");
+		return;
+	}
 
-                	if (si->flags & ADDRESS_SPECIFIED) {
-				bufctl = obj + si->c_offset;
+	for (n = 0, badaddr = FALSE; pgdat; n++) {
+		if (n >= vt->numnodes)
+			error(FATAL, "numnodes out of sync with pgdat_list?\n");
 
-                        	if ((si->spec_addr >= bufctl) &&
-                                    (si->spec_addr < 
-				    (bufctl + SIZE(kmem_bufctl_t)))) {
-                                	si->found = KMEM_BUFCTL_ADDR;
-                                	return;
-                        	}
-                	}
+		nt = &vt->node_table[n];
 
-			obj += (si->c_offset + SIZE(kmem_bufctl_t));
+		readmem(pgdat+OFFSET(pglist_data_node_id), KVADDR, &id,
+			sizeof(int), "pglist node_id", FAULT_ON_ERROR);
+
+		if (VALID_MEMBER(pglist_data_node_mem_map)) {
+			readmem(pgdat+OFFSET(pglist_data_node_mem_map), KVADDR, 
+				&node_mem_map, sizeof(ulong), 
+				"node_mem_map", FAULT_ON_ERROR);
+		} else {
+			node_mem_map = BADADDR;
+			badaddr = TRUE;
 		}
-	}
 
-        if (cnt != expected) {
-                error(INFO,
-              "\"%s\" cache: inuse object mismatch: expected: %ld found: %ld\n",
-                        si->curname, expected, cnt);
-                si->errors++;
-        }
+		if (VALID_MEMBER(pglist_data_node_start_paddr))
+			readmem(pgdat+OFFSET(pglist_data_node_start_paddr), 
+				KVADDR, &node_start_paddr, sizeof(ulong), 
+				"pglist node_start_paddr", FAULT_ON_ERROR);
+		else if (VALID_MEMBER(pglist_data_node_start_pfn)) {
+			readmem(pgdat+OFFSET(pglist_data_node_start_pfn), 
+				KVADDR, &node_start_pfn, sizeof(ulong), 
+				"pglist node_start_pfn", FAULT_ON_ERROR);
+				node_start_mapnr = node_start_pfn;
+				node_start_paddr = PTOB(node_start_pfn);
+			if (badaddr && IS_SPARSEMEM()) {
+				phys = PTOB(node_start_pfn);
+                                if (phys_to_page(phys, &pp))
+                                	node_mem_map = pp;
+			}
+		} else error(INFO, 
+			"cannot determine zone starting physical address\n");
 
-}
+		if (VALID_MEMBER(pglist_data_node_start_mapnr))
+			readmem(pgdat+OFFSET(pglist_data_node_start_mapnr), 
+				KVADDR, &node_start_mapnr, sizeof(ulong), 
+				"pglist node_start_mapnr", FAULT_ON_ERROR);
 
+		if (VALID_MEMBER(pglist_data_node_size)) 
+			readmem(pgdat+OFFSET(pglist_data_node_size), 
+				KVADDR, &node_size, sizeof(ulong), 
+				"pglist node_size", FAULT_ON_ERROR);
+		else if (VALID_MEMBER(pglist_data_node_spanned_pages)) {
+			readmem(pgdat+OFFSET(pglist_data_node_spanned_pages), 
+				KVADDR, &node_spanned_pages, sizeof(ulong), 
+				"pglist node_spanned_pages", FAULT_ON_ERROR);
+			node_size = node_spanned_pages;
+		} else error(INFO, "cannot determine zone size\n");
 
-/*
- *  dump_slab_objects() adapted for newer percpu slab format.
- */
+		if (VALID_MEMBER(pglist_data_node_present_pages))
+                        readmem(pgdat+OFFSET(pglist_data_node_present_pages),
+                                KVADDR, &node_present_pages, sizeof(ulong),
+                                "pglist node_present_pages", FAULT_ON_ERROR);
+		else
+			node_present_pages = 0;
 
-static void
-dump_slab_objects_percpu(struct meminfo *si)
-{
-	int i, j;
-	int on_free_list, on_cpudata_list; 
-	ulong cnt, expected;
-	ulong obj;
+		readmem(pgdat+OFFSET(pglist_data_bdata), KVADDR, &bdata,
+			sizeof(ulong), "pglist bdata", FAULT_ON_ERROR);
 
-	if ((si->flags & ADDRESS_SPECIFIED) && (si->found & ~KMEM_ON_SLAB))
-		return;
+		if (initialize) {
+			nt->node_id = id;
+			nt->pgdat = pgdat;
+			if (VALID_MEMBER(zone_struct_memsize)) 
+				nt->size = 0;  /* initialize below */
+			else 
+				nt->size = node_size;
+			nt->present = node_present_pages;
+			nt->mem_map = node_mem_map;
+			nt->start_paddr = node_start_paddr;
+			nt->start_mapnr = node_start_mapnr;
 
-        cnt = 0;
-        expected = si->s_inuse;
+			if (CRASHDEBUG(1)) {
+                		fprintf(fp, "node_table[%d]: \n", n);
+                		fprintf(fp, "             id: %d\n", nt->node_id);
+                		fprintf(fp, "          pgdat: %lx\n", nt->pgdat);
+                		fprintf(fp, "           size: %ld\n", nt->size);
+                		fprintf(fp, "        present: %ld\n", nt->present);
+                		fprintf(fp, "        mem_map: %lx\n", nt->mem_map);
+                		fprintf(fp, "    start_paddr: %llx\n", nt->start_paddr);
+                		fprintf(fp, "    start_mapnr: %ld\n", nt->start_mapnr);
+			}
+		}
 
-        if (CRASHDEBUG(1))
-                for (i = 0; i < si->c_num; i++) {
-                        fprintf(fp, "si->addrlist[%d]: %lx\n", 
-				i, si->addrlist[i]);
-                }
+		if (!initialize) {
+			if (n) {
+				fprintf(fp, "\n");
+				pad_line(fp, slen, '-');
+			}
+			flen = MAX(VADDR_PRLEN, strlen("BOOTMEM_DATA"));
+			fprintf(fp, "%sNODE  %s  %s  %s  %s\n", 
+			    n ? "\n\n" : "",
+			    mkstring(buf1, 8, CENTER, "SIZE"),
+			    mkstring(buf2, flen, CENTER|LJUST, "PGLIST_DATA"),
+			    mkstring(buf3, flen, CENTER|LJUST, "BOOTMEM_DATA"),
+			    mkstring(buf4, flen, CENTER|LJUST, "NODE_ZONES"));
 
-        if (!(si->flags & ADDRESS_SPECIFIED)) 
-		fprintf(fp, free_inuse_hdr);
+			node_zones = pgdat + OFFSET(pglist_data_node_zones);
+			sprintf(buf5, " %2d   %s  %s  %s  %s\n", id, 
+			    mkstring(buf1, 8, CENTER|LJUST|LONG_DEC, 
+				MKSTR(node_size)),
+			    mkstring(buf2, flen, CENTER|LJUST|LONG_HEX, 
+				MKSTR(pgdat)),
+			    mkstring(buf3, flen, CENTER|LONG_HEX, 
+				MKSTR(bdata)),
+			    mkstring(buf4, flen, CENTER|LJUST|LONG_HEX,
+                                MKSTR(node_zones)));
+			fprintf(fp, "%s", buf5);
 
-	for (i = 0, obj = si->s_mem; i < si->c_num; i++, obj += si->size) {
-		on_free_list = FALSE;
-		on_cpudata_list = FALSE;
+			j = 12 + strlen(buf1) + strlen(buf2) + strlen(buf3) +
+				count_leading_spaces(buf4);
+                	for (i = 1; i < vt->nr_zones; i++) {
+				node_zones += SIZE_OPTION(zone_struct, zone);
+				INDENT(j);
+				fprintf(fp, "%lx\n", node_zones);
+			}
+	
+	                fprintf(fp, "%s  START_PADDR  START_MAPNR\n",
+	                    mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, 
+				"MEM_MAP"));
+	                fprintf(fp, "%s  %s  %s\n",
+	                    mkstring(buf1, VADDR_PRLEN,
+	                        CENTER|LONG_HEX, MKSTR(node_mem_map)),
+	                    mkstring(buf2, strlen("START_PADDR"),
+	                        CENTER|LONG_HEX|RJUST, MKSTR(node_start_paddr)),
+	                    mkstring(buf3, strlen("START_MAPNR"),
+	                        CENTER|LONG_DEC|RJUST, 
+				    MKSTR(node_start_mapnr)));
+	
+			sprintf(buf2, "%s  %s  START_PADDR  START_MAPNR", 
+				node_zone_hdr,
+				mkstring(buf1, VADDR_PRLEN, CENTER|RJUST, 
+				    "MEM_MAP"));
+			slen = strlen(buf2);
+			fprintf(fp, "\n%s\n", buf2);
+		}
 
-	        for (j = 0; j < si->c_num; j++) {        
-	                if (obj == si->addrlist[j]) {                   
-	                        on_free_list = TRUE;                    
-	                        break;                                  
-	                }                                               
-	        }                                                       
+       		node_zones = pgdat + OFFSET(pglist_data_node_zones);
+		cum_zone_size = 0;
+		for (i = 0; i < vt->nr_zones; i++) {
+			if (CRASHDEBUG(7))
+				fprintf(fp, "zone %d at %lx\n", i, node_zones);
 
-		on_cpudata_list = check_cpudata_list(si, obj);
+			if (VALID_MEMBER(zone_struct_size))
+                		readmem(node_zones+OFFSET(zone_struct_size), 
+				    	KVADDR, &zone_size, sizeof(ulong),
+                        		"zone_struct size", FAULT_ON_ERROR);
+			else if (VALID_MEMBER(zone_struct_memsize)) {
+                		readmem(node_zones+OFFSET(zone_struct_memsize), 
+				    	KVADDR, &zone_size, sizeof(ulong),
+                        		"zone_struct memsize", FAULT_ON_ERROR);
+				nt->size += zone_size;
+			} else if (VALID_MEMBER(zone_spanned_pages)) {
+                		readmem(node_zones+ OFFSET(zone_spanned_pages), 
+				    	KVADDR, &zone_size, sizeof(ulong),
+                        		"zone spanned_pages", FAULT_ON_ERROR);
+			} else error(FATAL, 
+			    "zone_struct has neither size nor memsize field\n");
 
-		if (on_free_list && on_cpudata_list) {
-			error(INFO, 
-		    "\"%s\" cache: object %lx on both free and cpudata lists\n",
-				si->curname, obj);
-			si->errors++;
-		}
-	                                                               
-	        if (on_free_list) {                                     
-	                if (!(si->flags & ADDRESS_SPECIFIED))           
-	                        fprintf(fp, "   %lx\n", obj);           
-	                if (si->flags & ADDRESS_SPECIFIED) {            
-	                        if (INOBJECT(si->spec_addr, obj)) {     
-	                                si->found =                     
-	                                    KMEM_OBJECT_ADDR_FREE;      
-	                                return;                         
-	                        }                                       
-	                }                                               
-		} else if (on_cpudata_list) {
-                        if (!(si->flags & ADDRESS_SPECIFIED))
-                                fprintf(fp, "   %lx  (cpu %d cache)\n", obj,
-					si->cpu);
-                        cnt++;    
-                        if (si->flags & ADDRESS_SPECIFIED) {
-                                if (INOBJECT(si->spec_addr, obj)) {
-                                        si->found =
-                                            KMEM_OBJECT_ADDR_CACHED;
-                                        return;
-                                } 
-                        }
-	        } else {                                                
-	                if (!(si->flags & ADDRESS_SPECIFIED))           
-	                        fprintf(fp, "  [%lx]\n", obj);          
-	                cnt++;                                          
-	                if (si->flags & ADDRESS_SPECIFIED) {            
-	                        if (INOBJECT(si->spec_addr, obj)) {     
-	                                si->found =                     
-	                                    KMEM_OBJECT_ADDR_INUSE;     
-	                                return;                         
-	                        }                                       
-	                }                                               
-	        }
-	}
+                	readmem(node_zones+ 
+				OFFSET_OPTION(zone_struct_free_pages,
+				zone_free_pages), KVADDR, &free_pages, 
+				sizeof(ulong), "zone[_struct] free_pages", 
+				FAULT_ON_ERROR);
+                	readmem(node_zones+OFFSET_OPTION(zone_struct_name,
+				zone_name), KVADDR, &value, sizeof(void *),
+                        	"zone[_struct] name", FAULT_ON_ERROR);
+                	if (!read_string(value, buf1, BUFSIZE-1))
+                        	sprintf(buf1, "(unknown) ");
+			if (VALID_STRUCT(zone_struct)) {
+				if (VALID_MEMBER(zone_struct_zone_start_paddr))
+				{
+                        		readmem(node_zones+OFFSET
+					    (zone_struct_zone_start_paddr),
+                                	    KVADDR, &zone_start_paddr, 
+					    sizeof(ulong), 
+					    "node_zones zone_start_paddr", 
+					    FAULT_ON_ERROR);
+				} else {
+					readmem(node_zones+
+					    OFFSET(zone_struct_zone_start_pfn),
+					    KVADDR, &zone_start_pfn,
+					    sizeof(ulong),
+					    "node_zones zone_start_pfn",
+					    FAULT_ON_ERROR);
+					    zone_start_paddr = 
+						PTOB(zone_start_pfn);
+				}
+                        	readmem(node_zones+
+					OFFSET(zone_struct_zone_start_mapnr),
+                                	KVADDR, &zone_start_mapnr, 
+					sizeof(ulong), 
+					"node_zones zone_start_mapnr", 
+					FAULT_ON_ERROR);
+			} else {
+                                readmem(node_zones+
+                                        OFFSET(zone_zone_start_pfn),
+                                        KVADDR, &zone_start_pfn,
+                                        sizeof(ulong),
+                                        "node_zones zone_start_pfn",
+                                        FAULT_ON_ERROR);
+				zone_start_paddr = PTOB(zone_start_pfn);
 
-        if (cnt != expected) {
-                error(INFO,
-              "\"%s\" cache: inuse object mismatch: expected: %ld found: %ld\n",
-                        si->curname, expected, cnt);
-                si->errors++;
-        }
-}
+				if (IS_SPARSEMEM()) {
+					zone_mem_map = 0;
+					zone_start_mapnr = 0;
+					if (zone_size) {
+						phys = PTOB(zone_start_pfn);
+						zone_start_mapnr = phys/PAGESIZE();
+					}
 
-/*
- *  Determine how many of the "inuse" slab objects are actually cached
- *  in the kmem_cache_s header.  Set the per-slab count and update the 
- *  cumulative per-cache count.
- */
+				} else if (!(vt->flags & NODES) && 
+				    INVALID_MEMBER(zone_zone_mem_map)) {
+					readmem(pgdat+OFFSET(pglist_data_node_mem_map),
+                                    	    KVADDR, &zone_mem_map, sizeof(void *),
+                                    	    "contig_page_data mem_map", FAULT_ON_ERROR);
+					if (zone_size)
+						zone_mem_map += cum_zone_size * SIZE(page);
+				} else readmem(node_zones+
+                                        OFFSET(zone_zone_mem_map),
+                                        KVADDR, &zone_mem_map,
+                                        sizeof(ulong),
+                                        "node_zones zone_mem_map",
+                                        FAULT_ON_ERROR);
 
-static void
-gather_slab_cached_count(struct meminfo *si)
-{
-	int i;
-	ulong obj;
+				if (zone_mem_map)
+					zone_start_mapnr = 
+				    	    (zone_mem_map - node_mem_map) / 
+					    SIZE(page);
+				else if (!IS_SPARSEMEM())
+					zone_start_mapnr = 0;
+			}
 
-	si->cpucached_slab = 0;
+			if (IS_SPARSEMEM()) {
+				zone_mem_map = 0;
+				if (zone_size) {
+					phys = PTOB(zone_start_pfn);
+					if (phys_to_page(phys, &pp))
+						zone_mem_map = pp;
+				}
+			} else if (!(vt->flags & NODES) && 
+			    INVALID_MEMBER(zone_struct_zone_mem_map) &&
+			    INVALID_MEMBER(zone_zone_mem_map)) {
+                		readmem(pgdat+OFFSET(pglist_data_node_mem_map),
+				    KVADDR, &zone_mem_map, sizeof(void *), 
+				    "contig_page_data mem_map", FAULT_ON_ERROR);
+				if (zone_size)
+					zone_mem_map += cum_zone_size * SIZE(page);
+				else
+					zone_mem_map = 0;
+			} else 
+				readmem(node_zones+
+				    OFFSET_OPTION(zone_struct_zone_mem_map,
+				    zone_zone_mem_map), KVADDR, &zone_mem_map, 
+				    sizeof(ulong), "node_zones zone_mem_map", 
+				    FAULT_ON_ERROR);
 
-        for (i = 0, obj = si->s_mem; i < si->c_num; i++, obj += si->size) {
-		if (check_cpudata_list(si, obj)) {
-			si->cpucached_slab++;
-			if (si->flags & SLAB_GET_COUNTS) {
-				si->cpucached_cache++;
+			if (!initialize) {
+				fprintf(fp, " %2d   %-9s %7ld  ", 
+					i, buf1, zone_size);
+				cum_zone_size += zone_size;
+				fprintf(fp, "%s  %s  %s\n",
+	                    	    mkstring(buf1, VADDR_PRLEN,
+	                        	RJUST|LONG_HEX,MKSTR(zone_mem_map)),
+	                            mkstring(buf2, strlen("START_PADDR"),
+	                        	LONG_HEX|RJUST,MKSTR(zone_start_paddr)),
+	                    	    mkstring(buf3, strlen("START_MAPNR"),
+	                        	LONG_DEC|RJUST,
+					MKSTR(zone_start_mapnr)));
 			}
+
+			node_zones += SIZE_OPTION(zone_struct, zone);
+		}
+
+		if (initialize) {
+			if (vt->flags & NODES_ONLINE) {
+				if ((node = next_online_node(node+1)) < 0)
+					pgdat = 0;
+                        	else if (!(pgdat = next_online_pgdat(node))) {
+                                	error(WARNING,
+                   "cannot determine pgdat list for this kernel/architecture (node %d)\n\n", 
+						node);
+					pgdat = 0;
+                        	}
+			} else 
+				readmem(pgdat + OFFSET_OPTION(pglist_data_node_next,
+					pglist_data_pgdat_next), KVADDR,
+					&pgdat, sizeof(void *), "pglist_data node_next",
+					FAULT_ON_ERROR);
+		} else {
+			if ((n+1) < vt->numnodes)
+				pgdat = vt->node_table[n+1].pgdat;
+			else
+				pgdat = 0;
 		}
+	} 
+
+	if (n != vt->numnodes) {
+		if (CRASHDEBUG(2))
+			error(NOTE, "changing numnodes from %d to %d\n",
+				vt->numnodes, n);
+		vt->numnodes = n;
 	}
-}
 
-/*
- *  Populate the percpu object list for a given slab.
- */
+	if (!initialize && IS_SPARSEMEM())
+		dump_mem_sections();
+}
 
 static void
-gather_cpudata_list_v1(struct meminfo *si)
+dump_zone_stats(void)
 {
-        int i, j;
-	int avail;
-        ulong cpudata[NR_CPUS];
+	int i, n;
+	ulong pgdat, node_zones;
+	char *zonebuf;
+	char buf1[BUFSIZE];
+	int ivalue;
+	ulong value1;
+	ulong value2;
+	ulong value3;
+	ulong value4;
+	ulong value5;
+	ulong value6;
+
+	pgdat = vt->node_table[0].pgdat;
+	zonebuf = GETBUF(SIZE_OPTION(zone_struct, zone));
+	vm_stat_init();
 
-        if (INVALID_MEMBER(kmem_cache_s_cpudata))
-                return;
+        for (n = 0; pgdat; n++) {
+                node_zones = pgdat + OFFSET(pglist_data_node_zones);
 
-        readmem(si->cache+OFFSET(kmem_cache_s_cpudata),
-                KVADDR, &cpudata[0], 
-		sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_cpudata),
-                "cpudata array", FAULT_ON_ERROR);
+                for (i = 0; i < vt->nr_zones; i++) {
 
-        for (i = 0; (i < ARRAY_LENGTH(kmem_cache_s_cpudata)) && 
-	     cpudata[i]; i++) {
-		BZERO(si->cpudata[i], sizeof(ulong) * vt->kmem_max_limit);
+			if (!readmem(node_zones, KVADDR, zonebuf,
+			    SIZE_OPTION(zone_struct, zone),
+			    "zone buffer", FAULT_ON_ERROR))
+				break; 
 
-                readmem(cpudata[i]+OFFSET(cpucache_s_avail),
-                        KVADDR, &avail, sizeof(int),
-                        "cpucache avail", FAULT_ON_ERROR);
+			value1 = ULONG(zonebuf + 
+				OFFSET_OPTION(zone_struct_name, zone_name));
 
-		if (!avail) 
-			continue;
+                        if (!read_string(value1, buf1, BUFSIZE-1))
+                                sprintf(buf1, "(unknown) ");
 
-		if (avail > vt->kmem_max_limit) {
-			error(INFO, 
-	  	  "\"%s\" cache: cpucache_s.avail %d greater than limit %ld\n",
-				si->curname, avail, vt->kmem_max_limit);
-			si->errors++;
+			if (VALID_MEMBER(zone_struct_size))
+				value1 = value6 = ULONG(zonebuf + 
+					OFFSET(zone_struct_size));
+			else if (VALID_MEMBER(zone_struct_memsize)) {
+				value1 = value6 = ULONG(zonebuf + 
+					OFFSET(zone_struct_memsize));
+			} else if (VALID_MEMBER(zone_spanned_pages)) {
+				value1 = ULONG(zonebuf + 
+					OFFSET(zone_spanned_pages));
+				value6 = ULONG(zonebuf + 
+					OFFSET(zone_present_pages));
+			} else error(FATAL, 
+			    	"zone struct has unknown size field\n");
+
+			value2 = ULONG(zonebuf + OFFSET_OPTION(zone_pages_min,
+				zone_struct_pages_min));
+			value3 = ULONG(zonebuf + OFFSET_OPTION(zone_pages_low,
+				zone_struct_pages_low));
+			value4 = ULONG(zonebuf + OFFSET_OPTION(zone_pages_high,
+				zone_struct_pages_high));
+			value5 = ULONG(zonebuf + OFFSET_OPTION(zone_free_pages,
+				zone_struct_free_pages));
+
+			fprintf(fp, 
+			    "NODE: %d  ZONE: %d  ADDR: %lx  NAME: \"%s\"\n", 
+				n, i, node_zones, buf1);
+
+			if (!value1) {
+				fprintf(fp, "  [unpopulated]\n");
+				goto next_zone;
+			}
+			fprintf(fp, "  SIZE: %ld", value1);
+			if (value6 < value1) 
+				fprintf(fp, "  PRESENT: %ld", value6);
+			fprintf(fp, "  MIN/LOW/HIGH: %ld/%ld/%ld",
+				value2, value3, value4);
+
+			if (VALID_MEMBER(zone_vm_stat)) 
+			    	dump_vm_stat("NR_FREE_PAGES", (long *)&value5, 
+			    		node_zones + OFFSET(zone_vm_stat));
+
+			if (VALID_MEMBER(zone_nr_active) && 
+			    VALID_MEMBER(zone_nr_inactive)) {
+				value1 = ULONG(zonebuf + 
+					OFFSET(zone_nr_active));
+				value2 = ULONG(zonebuf + 
+					OFFSET(zone_nr_inactive));
+				fprintf(fp, 
+			    "\n  NR_ACTIVE: %ld  NR_INACTIVE: %ld  FREE: %ld\n",
+					value1, value2, value5); 
+				if (VALID_MEMBER(zone_vm_stat)) {
+					fprintf(fp, "  VM_STAT:\n");
+					dump_vm_stat(NULL, NULL, node_zones +
+						OFFSET(zone_vm_stat));
+				}
+			} else if (VALID_MEMBER(zone_vm_stat) &&
+				dump_vm_stat("NR_ACTIVE", (long *)&value1, 
+				node_zones + OFFSET(zone_vm_stat)) &&
+				dump_vm_stat("NR_INACTIVE", (long *)&value2, 
+				node_zones + OFFSET(zone_vm_stat))) {
+				fprintf(fp, "\n  VM_STAT:\n");
+				dump_vm_stat(NULL, NULL, node_zones + 
+					OFFSET(zone_vm_stat));
+			} else {
+				fprintf(fp, "  FREE: %ld\n", value5); 
+				goto next_zone;
+			}
+
+			if (VALID_MEMBER(zone_all_unreclaimable)) {
+				ivalue = UINT(zonebuf + 
+					OFFSET(zone_all_unreclaimable));
+				fprintf(fp, "  ALL_UNRECLAIMABLE: %s  ", 
+					ivalue ? "yes" : "no");
+			} else if (VALID_MEMBER(zone_flags) &&
+				enumerator_value("ZONE_ALL_UNRECLAIMABLE", 
+				(long *)&value1)) {
+				value2 = ULONG(zonebuf + OFFSET(zone_flags));
+				value3 = value2 & (1 << value1);
+				fprintf(fp, "  ALL_UNRECLAIMABLE: %s  ", 
+					value3 ? "yes" : "no");
+			}
+
+			if (VALID_MEMBER(zone_pages_scanned)) {
+				value1 = ULONG(zonebuf + 
+					OFFSET(zone_pages_scanned));
+				fprintf(fp, "PAGES_SCANNED: %ld  ", value1);
+			} 
+			fprintf(fp, "\n");
+
+next_zone:
+			fprintf(fp, "\n");
+			node_zones += SIZE_OPTION(zone_struct, zone);
 		}
 
-		if (CRASHDEBUG(2))
-			fprintf(fp, "%s: cpu[%d] avail: %d\n", 
-				si->curname, i, avail);
+		if ((n+1) < vt->numnodes)
+			pgdat = vt->node_table[n+1].pgdat;
+		else
+			pgdat = 0;
+	}
 
-                readmem(cpudata[i]+SIZE(cpucache_s),
-                        KVADDR, si->cpudata[i],
-			sizeof(void *) * avail,
-                        "cpucache avail", FAULT_ON_ERROR);
+	FREEBUF(zonebuf);
 
-		if (CRASHDEBUG(2))
-			for (j = 0; j < avail; j++)
-				fprintf(fp, "  %lx\n", si->cpudata[i][j]);
-        }
 }
 
 /*
- *  Updated for 2.6 slab percpu data structure.
+ *  Gather essential information regarding each memory node.
  */
 static void
-gather_cpudata_list_v2(struct meminfo *si)
+node_table_init(void)
 {
-        int i, j;
-	int avail;
-        ulong cpudata[NR_CPUS];
-
-        readmem(si->cache+OFFSET(kmem_cache_s_array),
-                KVADDR, &cpudata[0], 
-		sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_array),
-                "array_cache array", FAULT_ON_ERROR);
+	int n;
+	ulong pgdat;
 
-        for (i = 0; (i < ARRAY_LENGTH(kmem_cache_s_array)) && 
-	     cpudata[i]; i++) {
-		BZERO(si->cpudata[i], sizeof(ulong) * vt->kmem_max_limit);
+	/*
+	 *  Override numnodes -- some kernels may leave it at 1 on a system
+	 *  with multiple memory nodes.
+	 */
+	if ((vt->flags & NODES) && (VALID_MEMBER(pglist_data_node_next) || 
+	    VALID_MEMBER(pglist_data_pgdat_next))) {
 
-                readmem(cpudata[i]+OFFSET(array_cache_avail),
-                        KVADDR, &avail, sizeof(int),
-                        "array cache avail", FAULT_ON_ERROR);
+	        get_symbol_data("pgdat_list", sizeof(void *), &pgdat);
+	
+	        for (n = 0; pgdat; n++) {
+	                readmem(pgdat + OFFSET_OPTION(pglist_data_node_next,
+	                        pglist_data_pgdat_next), KVADDR,
+	                        &pgdat, sizeof(void *), "pglist_data node_next",
+	                        FAULT_ON_ERROR);
+		}
+		if (n != vt->numnodes) {
+			if (CRASHDEBUG(2))
+				error(NOTE, "changing numnodes from %d to %d\n",
+					vt->numnodes, n);
+			vt->numnodes = n;
+		}
+	} else
+		vt->flags &= ~NODES;
 
-		if (!avail) 
-			continue;
+       	if (!(vt->node_table = (struct node_table *)
+	    malloc(sizeof(struct node_table) * vt->numnodes)))
+		error(FATAL, "cannot malloc node_table %s(%d nodes)",
+			vt->numnodes > 1 ? "array " : "", vt->numnodes);
 
-		if (avail > vt->kmem_max_limit) {
-			error(INFO, 
-	  	  "\"%s\" cache: array_cache.avail %d greater than limit %ld\n",
-				si->curname, avail, vt->kmem_max_limit);
-			si->errors++;
-		}
+	BZERO(vt->node_table, sizeof(struct node_table) * vt->numnodes);
 
-		if (CRASHDEBUG(2))
-			fprintf(fp, "%s: cpu[%d] avail: %d\n", 
-				si->curname, i, avail);
+	dump_memory_nodes(MEMORY_NODES_INITIALIZE);
 
-                readmem(cpudata[i]+SIZE(array_cache),
-                        KVADDR, si->cpudata[i],
-			sizeof(void *) * avail,
-                        "array_cache avail", FAULT_ON_ERROR);
+        qsort((void *)vt->node_table, (size_t)vt->numnodes,
+                sizeof(struct node_table), compare_node_data);
 
-		if (CRASHDEBUG(2))
-			for (j = 0; j < avail; j++)
-				fprintf(fp, "  %lx\n", si->cpudata[i][j]);
-        }
+	if (CRASHDEBUG(2))
+		dump_memory_nodes(MEMORY_NODES_DUMP);
 }
 
 /*
- *  Check whether a given address is contained in the previously-gathered
- *  percpu object cache.
+ *  The comparison function must return an integer less  than,
+ *  equal  to,  or  greater than zero if the first argument is
+ *  considered to be respectively  less  than,  equal  to,  or
+ *  greater than the second.  If two members compare as equal,
+ *  their order in the sorted array is undefined.
  */
 
 static int
-check_cpudata_list(struct meminfo *si, ulong obj)
+compare_node_data(const void *v1, const void *v2)
 {
-        int i, j;
+        struct node_table *t1, *t2;
 
-        for (i = 0; i < vt->kmem_max_cpus; i++) {
-                for (j = 0; si->cpudata[i][j]; j++)
-			if (si->cpudata[i][j] == obj) {
-				si->cpu = i;
-				return TRUE;
-			}
-	}
+        t1 = (struct node_table *)v1;
+        t2 = (struct node_table *)v2;
 
-	return FALSE;
+        return (t1->node_id < t2->node_id ? -1 :
+                t1->node_id == t2->node_id ? 0 : 1);
 }
 
 
 /*
- *  Search the various memory subsystems for instances of this address.
- *  Start with the most specific areas, ending up with at least the 
- *  mem_map page data.
+ *  Depending upon the processor, and whether we're running live or on a 
+ *  dumpfile, get the system page size.
  */
-static void
-kmem_search(struct meminfo *mi)
+uint
+memory_page_size(void)
 {
-	struct syment *sp;
-	struct meminfo tmp_meminfo;
-	char buf[BUFSIZE];
-	ulong vaddr, orig_flags;
-	physaddr_t paddr;
-	ulong offset;
-
-	switch (mi->memtype)
-	{
-	case KVADDR:
-		vaddr = mi->spec_addr;
-		break;
+	uint psz;
 
-	case PHYSADDR:
-		vaddr = mi->spec_addr < VTOP(vt->high_memory) ?
-			PTOV(mi->spec_addr) : BADADDR;
-		break;
-	}
+	if (machdep->pagesize)
+		return machdep->pagesize;
 
-	orig_flags = mi->flags;
-	mi->retval = 0;
+	if (REMOTE_MEMSRC()) 
+		return remote_page_size();
 
-	/*
-	 *  Check first for a possible symbolic display of the virtual
-	 *  address associated with mi->spec_addr or PTOV(mi->spec_addr).
-	 */
-	if (((vaddr >= kt->stext) && (vaddr <= kt->end)) ||
-	    IS_MODULE_VADDR(mi->spec_addr)) {
-		if ((sp = value_search(vaddr, &offset))) {
-			show_symbol(sp, offset, SHOW_LINENUM | SHOW_RADIX());
-			fprintf(fp, "\n");
-		}
-	}
+	switch (pc->flags & MEMORY_SOURCES)
+	{
+	case DISKDUMP:
+		psz = diskdump_page_size();
+		break;
 
-	/*
-	 *  Check for a valid mapped address.
-	 */
-	if ((mi->memtype == KVADDR) && IS_VMALLOC_ADDR(mi->spec_addr)) {
-		if (kvtop(NULL, mi->spec_addr, &paddr, 0)) {
-			mi->flags = orig_flags;
-        		dump_vmlist(mi);
-			fprintf(fp, "\n");
-			mi->spec_addr = paddr;
-			mi->memtype = PHYSADDR;
-		}
-		goto mem_map;
-	}
-	/*
-	 *  If the address is physical, check whether it's in vmalloc space.
-	 */
+        case XENDUMP:
+                psz = xendump_page_size();
+                break;
 
-	if (mi->memtype == PHYSADDR) {
-		mi->flags = orig_flags;
-		mi->flags |= GET_PHYS_TO_VMALLOC;
-		mi->retval = 0;
-        	dump_vmlist(mi);
-		mi->flags &= ~GET_PHYS_TO_VMALLOC;
+	case KDUMP:
+		psz = kdump_page_size();
+		break;
 
-		if (mi->retval) {
-			if ((sp = value_search(mi->retval, &offset))) {
-                        	show_symbol(sp, offset, 
-					SHOW_LINENUM | SHOW_RADIX());
-                        	fprintf(fp, "\n");
-                	}
-        		dump_vmlist(mi);
-			fprintf(fp, "\n");
-			goto mem_map;
-		}
-	}
+	case NETDUMP:
+		psz = netdump_page_size();
+		break;
 
-	/*
-         *  Check whether the containing page belongs to the slab subsystem.
-	 */
-	mi->flags = orig_flags;
-	mi->retval = 0;
-	if ((vaddr != BADADDR) && vaddr_to_kmem_cache(vaddr, buf)) {
-		BZERO(&tmp_meminfo, sizeof(struct meminfo));
-		tmp_meminfo.spec_addr = vaddr;
-		tmp_meminfo.memtype = KVADDR;
-		tmp_meminfo.flags = mi->flags;
-		vt->dump_kmem_cache(&tmp_meminfo);
-		fprintf(fp, "\n");
-	}
+	case MCLXCD:
+		psz = (uint)mclx_page_size();
+		break;
 
-	/*
-	 *  Check free list.
-	 */
-	mi->flags = orig_flags;
-	mi->retval = 0;
-	vt->dump_free_pages(mi);
-	if (mi->retval)
-		fprintf(fp, "\n");
+	case LKCD:
+#if 0							/* REMIND: */
+		psz = lkcd_page_size();			/* dh_dump_page_size is HW page size; should add dh_page_size */
+#else
+		psz = (uint)getpagesize();
+#endif
+		break;
 
-	if (vt->page_hash_table) {
-		/*
-		 *  Check the page cache.
-		 */
-		mi->flags = orig_flags;
-		mi->retval = 0;
-		dump_page_hash_table(mi);
-		if (mi->retval)
-			fprintf(fp, "\n");
-	}
+	case DEVMEM:                      
+	case MEMMOD:
+	case CRASHBUILTIN:
+		psz = (uint)getpagesize();  
+		break;
 
-mem_map:
-	mi->flags = orig_flags;
-        dump_mem_map(mi);
+	case S390D:
+		psz = s390_page_size();
+		break;
 
-	if (!mi->retval)
-		fprintf(fp, "%llx: address not found\n", mi->spec_addr);
+	default:
+		error(FATAL, "memory_page_size: invalid pc->flags: %lx\n", 
+			pc->flags & MEMORY_SOURCES); 
+	}
 
+	return psz;
 }
 
 /*
- *  Determine whether an address is a page pointer from the mem_map[] array.
- *  If the caller requests it, return the associated physical address.
+ *  If the page size cannot be determined by the dumpfile (like kdump),
+ *  and the processor default cannot be used, allow the force-feeding
+ *  of a crash command-line page size option.
  */
-int
-is_page_ptr(ulong addr, physaddr_t *phys)
+void
+force_page_size(char *s)
 {
-	int n;
-        ulong ppstart, ppend;
-	struct node_table *nt;
-	ulong pgnum, node_size;
+	int k, err;
+	ulong psize;
 
-	for (n = 0; n < vt->numnodes; n++) {
-		nt = &vt->node_table[n];
-                if ((vt->flags & V_MEM_MAP) && (vt->numnodes == 1))
-	        	node_size = vt->max_mapnr;
-		else
-	        	node_size = nt->size;
+	k = 1;
+	err = FALSE;
 
-        	ppstart = nt->mem_map;
-		ppend = ppstart + (node_size * SIZE(page));
+	switch (LASTCHAR(s))
+	{
+	case 'k':
+	case 'K':
+		LASTCHAR(s) = NULLCHAR;
+		if (!decimal(s, 0)) {
+			err = TRUE;
+			break;
+		}
+		k = 1024;
 
-		if ((addr < ppstart) || (addr >= ppend))
-                	continue;
+		/* FALLTHROUGH */
 
-		/*
-		 *  We're in the mem_map range -- but it is a page pointer?
-		 */
-	        if ((addr - ppstart) % SIZE(page))
-			return FALSE;
+	default:
+        	if (decimal(s, 0))
+                	psize = dtol(s, QUIET|RETURN_ON_ERROR, &err);
+        	else if (hexadecimal(s, 0))
+                	psize = htol(s, QUIET|RETURN_ON_ERROR, &err);
+		else
+			err = TRUE;
+		break;
+	}
 
-		if (phys) {
-			pgnum = (addr - nt->mem_map) / SIZE(page);
-			*phys = (pgnum * PAGESIZE()) + nt->start_paddr;
-		}
+	if (err) 
+		error(INFO, "invalid page size: %s\n", s);
+	else
+		machdep->pagesize = psize * k;
+}
 
-		return TRUE;
-	}
 
-	return FALSE;
+/*
+ *  Return the vmalloc address referenced by the first vm_struct
+ *  on the vmlist.  This can normally be used by the machine-specific
+ *  xxx_vmalloc_start() routines.
+ */
 
-#ifdef PRE_NODES
-        ppstart = vt->mem_map;
-	ppend = ppstart + (vt->total_pages * vt->page_struct_len);
+ulong
+first_vmalloc_address(void)
+{
+        ulong vmlist, addr;
 
-	if ((addr < ppstart) || (addr >= ppend)) 
-		return FALSE;
+        get_symbol_data("vmlist", sizeof(void *), &vmlist);
 
-	if ((addr - ppstart) % vt->page_struct_len)
-		return FALSE;
+	if (!vmlist)
+		return 0;
 
-	return TRUE;
-#endif
+        if (!readmem(vmlist+OFFSET(vm_struct_addr), KVADDR, &addr, 
+	    sizeof(void *), "first vmlist addr", RETURN_ON_ERROR)) 
+		non_matching_kernel();
+
+        return addr;
 }
 
 /*
- *  Return the physical address associated with this page pointer.
+ *  Return the current vmalloc address limit, storing it 
+ *  if it's a dumpfile.
  */
-static int 
-page_to_phys(ulong pp, physaddr_t *phys)
+
+static ulong
+last_vmalloc_address(void)
 {
-	return(is_page_ptr(pp, phys));
-}
+	struct meminfo meminfo;
+	static ulong vmalloc_limit = 0;
+
+	if (!vmalloc_limit) {
+		BZERO(&meminfo, sizeof(struct meminfo));
+		meminfo.memtype = KVADDR;
+		meminfo.spec_addr = 0;
+		meminfo.flags = (ADDRESS_SPECIFIED|GET_HIGHEST);
+		dump_vmlist(&meminfo);
+		vmalloc_limit = meminfo.retval;
+	}
 
+	return vmalloc_limit;
+}
 
 /*
- *  Return the page pointer associated with this physical address.
+ *  Determine whether an identity-mapped virtual address
+ *  refers to an existant physical page, and if not bump
+ *  it up to the next node.
  */
-static int 
-phys_to_page(physaddr_t phys, ulong *pp)
+static int
+next_identity_mapping(ulong vaddr, ulong *nextvaddr)
 {
 	int n;
-        ulong pgnum;
         struct node_table *nt;
-	physaddr_t pstart, pend;
+        ulonglong paddr, pstart, pend;
 	ulong node_size;
 
+	paddr = VTOP(vaddr);
+
         for (n = 0; n < vt->numnodes; n++) {
                 nt = &vt->node_table[n];
                 if ((vt->flags & V_MEM_MAP) && (vt->numnodes == 1))
                         node_size = vt->max_mapnr;
                 else
-                        node_size = nt->size;
+	                node_size = nt->size;
 
                 pstart = nt->start_paddr;
                 pend = pstart + ((ulonglong)node_size * PAGESIZE());
 
-                if ((phys < pstart) || (phys >= pend))
+		/*
+		 *  Check the next node.
+		 */
+                if (paddr >= pend)
+			continue;
+		/*
+		 *  Bump up to the next node.
+		 */
+                if (paddr < pstart) {
+			*nextvaddr = PTOV(paddr);
                         continue;
+		}
                 /*
-                 *  We're in the physical range -- calculate the page.
+                 *  We're in the physical range.
                  */
-		pgnum = BTOP(phys - pstart);
-		*pp = nt->mem_map + (pgnum * SIZE(page));
-
+		*nextvaddr = vaddr;
                 return TRUE;
         }
 
 	return FALSE;
+}
 
-#ifdef PRE_NODES
-	if (phys >= (vt->total_pages * PAGESIZE()))
-		return FALSE;
 
-	pgnum = PTOB(BTOP(phys)) / PAGESIZE();
-	*pp = vt->mem_map + (pgnum * vt->page_struct_len);
-	
-	return TRUE;
-#endif
-}
+/*
+ *  Return the L1 cache size in bytes, which can be found stored in the
+ *  cache_cache.
+ */
+
+int
+l1_cache_size(void)
+{
+	ulong cache;
+	ulong c_align;
+	int colour_off;
+	int retval;
+
+	retval = -1;
+
+	if (VALID_MEMBER(kmem_cache_s_c_align)) {
+        	cache = symbol_value("cache_cache");
+                readmem(cache+OFFSET(kmem_cache_s_c_align),
+                	KVADDR, &c_align, sizeof(ulong),
+                        "c_align", FAULT_ON_ERROR);
+		retval = (int)c_align;
+	} else if (VALID_MEMBER(kmem_cache_s_colour_off)) {
+        	cache = symbol_value("cache_cache");
+                readmem(cache+OFFSET(kmem_cache_s_colour_off),
+                	KVADDR, &colour_off, sizeof(int),
+                        "colour_off", FAULT_ON_ERROR);
+		retval = colour_off;
+	}
 
+	return retval;
+}
 
 /*
- *  Try to read a string of non-NULL characters from a memory location, 
- *  returning the number of characters read.
+ *  Multi-purpose routine used to query/control dumpfile memory usage.
  */
 int
-read_string(ulong kvaddr, char *buf, int maxlen)
+dumpfile_memory(int cmd)
 {
-	char strbuf[MIN_PAGE_SIZE];
-        ulong kp;
-	char *bufptr;
-	long cnt, size;
+	int retval;
 
-        BZERO(buf, maxlen);
-	BZERO(strbuf, MIN_PAGE_SIZE);
+	retval = 0;
 
-	kp = kvaddr;
-	bufptr = strbuf;
-	size = maxlen;
+	if (!DUMPFILE())
+		return retval;
 
-	while (size > 0) {
-        	cnt = MIN_PAGE_SIZE - (kp & (MIN_PAGE_SIZE-1)); 
- 
-        	if (cnt > size)
-                        cnt = size;
+	switch (cmd)
+	{
+	case DUMPFILE_MEM_USED:
+                if (REMOTE_DUMPFILE()) 
+                        retval = remote_memory_used();
+		else if (pc->flags & NETDUMP)
+        		retval = netdump_memory_used();
+		else if (pc->flags & KDUMP)
+        		retval = kdump_memory_used();
+		else if (pc->flags & XENDUMP)
+        		retval = xendump_memory_used();
+		else if (pc->flags & DISKDUMP)
+        		retval = diskdump_memory_used();
+		else if (pc->flags & LKCD)
+        		retval = lkcd_memory_used();
+		else if (pc->flags & MCLXCD)
+                        retval = vas_memory_used();
+		else if (pc->flags & S390D)
+			retval = s390_memory_used();
+		break;
 
-                if (!readmem(kp, KVADDR, bufptr, cnt,
-                    "readstring characters", QUIET|RETURN_ON_ERROR))
-                        break;
+	case DUMPFILE_FREE_MEM:
+                if (REMOTE_DUMPFILE())
+                        retval = remote_free_memory();
+                else if (pc->flags & NETDUMP)
+			retval = netdump_free_memory();
+                else if (pc->flags & KDUMP)
+			retval = kdump_free_memory();
+                else if (pc->flags & XENDUMP)
+			retval = xendump_free_memory();
+                else if (pc->flags & DISKDUMP)
+			retval = diskdump_free_memory();
+                else if (pc->flags & LKCD)
+                        retval = lkcd_free_memory();
+                else if (pc->flags & MCLXCD)
+                        retval = vas_free_memory(NULL);
+                else if (pc->flags & S390D)
+                        retval = s390_free_memory();
+		break;
 
-		if (count_buffer_chars(bufptr, NULLCHAR, cnt))
-			break;
+	case DUMPFILE_MEM_DUMP:
+		if (REMOTE_DUMPFILE())
+                        retval = remote_memory_dump(0);
+                else if (pc->flags & NETDUMP) 
+                        retval = netdump_memory_dump(fp);
+                else if (pc->flags & KDUMP) 
+                        retval = kdump_memory_dump(fp);
+                else if (pc->flags & XENDUMP) 
+                        retval = xendump_memory_dump(fp);
+                else if (pc->flags & DISKDUMP) 
+                        retval = diskdump_memory_dump(fp);
+                else if (pc->flags & LKCD) 
+                        retval = lkcd_memory_dump(set_lkcd_fp(fp));
+                else if (pc->flags & MCLXCD)
+                        retval = vas_memory_dump(fp);
+                else if (pc->flags & S390D)
+                        retval = s390_memory_dump(fp);
+		break;
+	
+	case DUMPFILE_ENVIRONMENT:
+                if (pc->flags & LKCD) {
+                        set_lkcd_fp(fp);
+                        dump_lkcd_environment(0);
+		} else if (pc->flags & REM_LKCD) 
+                        retval = remote_memory_dump(VERBOSE);
+		break;
+	}
+
+	return retval;
+}
+
+/* 
+ *  Functions for sparse mem support 
+ */
+ulong 
+sparse_decode_mem_map(ulong coded_mem_map, ulong section_nr)
+{
+        return coded_mem_map + 
+	    (section_nr_to_pfn(section_nr) * SIZE(page));
+}
+
+void
+sparse_mem_init(void)
+{
+	ulong addr;
+	ulong mem_section_size;
+	int dimension;
+
+	if (!IS_SPARSEMEM())
+		return;
+
+	MEMBER_OFFSET_INIT(mem_section_section_mem_map, "mem_section",
+		"section_mem_map");
+	STRUCT_SIZE_INIT(mem_section, "mem_section");
+
+	if (!MAX_PHYSMEM_BITS())
+		error(FATAL, 
+		    "CONFIG_SPARSEMEM kernels not supported for this architecture\n");
 
-                kp += cnt;
-                bufptr += cnt;
-                size -= cnt;
+	if ((get_array_length("mem_section", &dimension, 0) ==
+	    (NR_MEM_SECTIONS() / _SECTIONS_PER_ROOT_EXTREME())) || !dimension)
+		vt->flags |= SPARSEMEM_EX;
+
+	if (IS_SPARSEMEM_EX()) {
+		machdep->sections_per_root = _SECTIONS_PER_ROOT_EXTREME();
+		mem_section_size = sizeof(void *) * NR_SECTION_ROOTS();
+	} else {
+		machdep->sections_per_root = _SECTIONS_PER_ROOT();
+		mem_section_size = SIZE(mem_section) * NR_SECTION_ROOTS();
 	}
 
-	strcpy(buf, strbuf);
-	return (strlen(buf));
+	if (CRASHDEBUG(1)) {
+		fprintf(fp, "PAGESIZE=%d\n",PAGESIZE());
+		fprintf(fp,"mem_section_size = %ld\n", mem_section_size);
+		fprintf(fp, "NR_SECTION_ROOTS = %ld\n", NR_SECTION_ROOTS());
+		fprintf(fp, "NR_MEM_SECTIONS = %ld\n", NR_MEM_SECTIONS());
+		fprintf(fp, "SECTIONS_PER_ROOT = %ld\n", SECTIONS_PER_ROOT() );
+		fprintf(fp, "SECTION_ROOT_MASK = 0x%lx\n", SECTION_ROOT_MASK());
+		fprintf(fp, "PAGES_PER_SECTION = %ld\n", PAGES_PER_SECTION());
+	}
+
+	if (!(vt->mem_sec = (void *)malloc(mem_section_size)))
+		error(FATAL, "cannot malloc mem_sec cache\n");
+	if (!(vt->mem_section = (char *)malloc(SIZE(mem_section))))
+		error(FATAL, "cannot malloc mem_section cache\n");
+
+	addr = symbol_value("mem_section");
+	readmem(addr, KVADDR,vt->mem_sec ,mem_section_size,
+		"memory section root table", FAULT_ON_ERROR);
 }
 
-/*
- *  "help -v" output
- */
-void
-dump_vm_table(int verbose)
+char *
+read_mem_section(ulong addr)
 {
-	int i;
-	struct node_table *nt;
-	int others;
+	if ((addr == 0) || !IS_KVADDR(addr))
+		return 0;
+	
+	readmem(addr, KVADDR, vt->mem_section, SIZE(mem_section),
+		"memory section", FAULT_ON_ERROR);
 
-	others = 0;
-	fprintf(fp, "              flags: %lx  %s(", 
-		vt->flags, count_bits_long(vt->flags) > 4 ? "\n " : "");
-	if (vt->flags & NODES)
-		fprintf(fp, "%sNODES", others++ ? "|" : "");
-	if (vt->flags & ZONES)
-		fprintf(fp, "%sZONES", others++ ? "|" : "");
-	if (vt->flags & PERCPU_KMALLOC_V1)
-		fprintf(fp, "%sPERCPU_KMALLOC_V1", others++ ? "|" : "");
-	if (vt->flags & PERCPU_KMALLOC_V2)
-		fprintf(fp, "%sPERCPU_KMALLOC_V2", others++ ? "|" : "");
-	if (vt->flags & COMMON_VADDR)
-		fprintf(fp, "%sCOMMON_VADDR", others++ ? "|" : "");
-	if (vt->flags & KMEM_CACHE_INIT)
-		fprintf(fp, "%sKMEM_CACHE_INIT", others++ ? "|" : "");
-	if (vt->flags & V_MEM_MAP)
-		fprintf(fp, "%sV_MEM_MAP", others++ ? "|" : "");
-	if (vt->flags & KMEM_CACHE_UNAVAIL)
-		fprintf(fp, "%sKMEM_CACHE_UNAVAIL", others++ ? "|" : "");
-	if (vt->flags & DISCONTIGMEM)
-		fprintf(fp, "%sDISCONTIGMEM", others++ ? "|" : "");
-	fprintf(fp, ")\n");
-	if (vt->kernel_pgd[0] == vt->kernel_pgd[1])
-       		fprintf(fp, "     kernel_pgd[NR_CPUS]: %lx ...\n", 
-			vt->kernel_pgd[0]);
-	else {
-       		fprintf(fp, "     kernel_pgd[NR_CPUS]: ");
-		for (i = 0; i < NR_CPUS; i++) {
-			if ((i % 4) == 0)
-				fprintf(fp, "\n     ");
-			fprintf(fp, "%lx ", vt->kernel_pgd[i]);
-		}
-		fprintf(fp, "\n");
-	}
-        fprintf(fp, "        high_memory: %lx\n", vt->high_memory);
-        fprintf(fp, "      vmalloc_start: %lx\n", vt->vmalloc_start);
-        fprintf(fp, "            mem_map: %lx\n", vt->mem_map);
-        fprintf(fp, "        total_pages: %ld\n", vt->total_pages);
-        fprintf(fp, "          max_mapnr: %ld\n", vt->max_mapnr);
-        fprintf(fp, "     totalram_pages: %ld\n", vt->totalram_pages);
-        fprintf(fp, "    totalhigh_pages: %ld\n", vt->totalhigh_pages);
-        fprintf(fp, "      num_physpages: %ld\n", vt->num_physpages);
-	fprintf(fp, "    page_hash_table: %lx\n", vt->page_hash_table);
-	fprintf(fp, "page_hash_table_len: %d\n", vt->page_hash_table_len);
-	fprintf(fp, "     kmem_max_c_num: %ld\n", vt->kmem_max_c_num);
-	fprintf(fp, "     kmem_max_limit: %ld\n", vt->kmem_max_limit);
-	fprintf(fp, "      kmem_max_cpus: %ld\n", vt->kmem_max_cpus);
-	fprintf(fp, "   kmem_cache_count: %ld\n", vt->kmem_cache_count);
-	fprintf(fp, " kmem_cache_namelen: %d\n", vt->kmem_cache_namelen);
-	fprintf(fp, "        PG_reserved: %lx\n", vt->PG_reserved);
-	fprintf(fp, "            PG_slab: %ld\n", vt->PG_slab);
-	fprintf(fp, "        paddr_prlen: %d\n", vt->paddr_prlen);
-	fprintf(fp, "           numnodes: %d\n", vt->numnodes);
-	fprintf(fp, "           nr_zones: %d\n", vt->nr_zones);
-	fprintf(fp, "      nr_free_areas: %d\n", vt->nr_free_areas);
-	for (i = 0; i < vt->numnodes; i++) {
-		nt = &vt->node_table[i];
-		fprintf(fp, "      node_table[%d]: \n", i);
-		fprintf(fp, "                 id: %d\n", nt->node_id);
-		fprintf(fp, "              pgdat: %lx\n", nt->pgdat);
-		fprintf(fp, "               size: %ld\n", nt->size);
-		fprintf(fp, "            mem_map: %lx\n", nt->mem_map);
-		fprintf(fp, "        start_paddr: %llx\n", nt->start_paddr);
-		fprintf(fp, "        start_mapnr: %ld\n", nt->start_mapnr);
-	}
+	return vt->mem_section;
+}
 
-	fprintf(fp, "    dump_free_pages: ");
-	if (vt->dump_free_pages == dump_free_pages)
-		fprintf(fp, "dump_free_pages()\n");
-	else if (vt->dump_free_pages == dump_free_pages_zones_v1)
-		fprintf(fp, "dump_free_pages_zones_v1()\n");
-	else if (vt->dump_free_pages == dump_free_pages_zones_v2)
-		fprintf(fp, "dump_free_pages_zones_v2()\n");
-	else if (vt->dump_free_pages == dump_multidimensional_free_pages)
-		fprintf(fp, "dump_multidimensional_free_pages()\n");
-	else
-		fprintf(fp, "%lx (unknown)\n", (ulong)vt->dump_free_pages);
+ulong
+nr_to_section(ulong nr)
+{
+	ulong addr;
+	ulong *mem_sec = vt->mem_sec;
 
-	fprintf(fp, "    dump_kmem_cache: ");
-	if (vt->dump_kmem_cache == dump_kmem_cache)
-		fprintf(fp, "dump_kmem_cache()\n");
-	else if (vt->dump_kmem_cache == dump_kmem_cache_percpu_v1)
-		fprintf(fp, "dump_kmem_cache_percpu_v1()\n");
-	else if (vt->dump_kmem_cache == dump_kmem_cache_percpu_v2)
-		fprintf(fp, "dump_kmem_cache_percpu_v2()\n");
+	if ((mem_sec[SECTION_NR_TO_ROOT(nr)] == 0) || 
+	    !IS_KVADDR(mem_sec[SECTION_NR_TO_ROOT(nr)]))
+		return 0;
+
+	if (IS_SPARSEMEM_EX())
+		addr = mem_sec[SECTION_NR_TO_ROOT(nr)] + 
+		    (nr & SECTION_ROOT_MASK()) * SIZE(mem_section);
 	else
-		fprintf(fp, "%lx (unknown)\n", (ulong)vt->dump_kmem_cache);
-	fprintf(fp, "          slab_data: %lx\n", (ulong)vt->slab_data);
-	if (verbose) 
-		dump_saved_slab_data();
-	fprintf(fp, "       nr_swapfiles: %d\n", vt->nr_swapfiles);
-	fprintf(fp, "     last_swap_read: %lx\n", vt->last_swap_read);
-	fprintf(fp, "   swap_info_struct: %lx\n", (ulong)vt->swap_info_struct);
+		addr = symbol_value("mem_section") +
+		    (SECTIONS_PER_ROOT() * SECTION_NR_TO_ROOT(nr) +
+			(nr & SECTION_ROOT_MASK())) * SIZE(mem_section);
 
-	dump_vma_cache(VERBOSE);
+	if (!IS_KVADDR(addr))
+		return 0;
+
+	return addr;
 }
 
 /*
- *  Calculate the amount of memory referenced in the kernel-specific "nodes".
+ * We use the lower bits of the mem_map pointer to store
+ * a little bit of information.  There should be at least
+ * 3 bits here due to 32-bit alignment.
  */
-uint64_t
-total_node_memory()
+#define SECTION_MARKED_PRESENT	(1UL<<0)
+#define SECTION_HAS_MEM_MAP	(1UL<<1)
+#define SECTION_MAP_LAST_BIT	(1UL<<2)
+#define SECTION_MAP_MASK	(~(SECTION_MAP_LAST_BIT-1))
+
+
+int 
+valid_section(ulong addr)
 {
-	int i;
-	struct node_table *nt;
-	uint64_t total;
+	char *mem_section;
 
-        for (i = total = 0; i < vt->numnodes; i++) {
-                nt = &vt->node_table[i];
+	if ((mem_section = read_mem_section(addr)))
+        	return (ULONG(mem_section + 
+			OFFSET(mem_section_section_mem_map)) && 
+			SECTION_MARKED_PRESENT);
+	return 0;
+}
 
-		if (CRASHDEBUG(1)) {
-                	console("node_table[%d]: \n", i);
-                	console("           id: %d\n", nt->node_id);
-                	console("        pgdat: %lx\n", nt->pgdat);
-                	console("         size: %ld\n", nt->size);
-                	console("      mem_map: %lx\n", nt->mem_map);
-                	console("  start_paddr: %lx\n", nt->start_paddr);
-                	console("  start_mapnr: %ld\n", nt->start_mapnr);
-		}
+int 
+section_has_mem_map(ulong addr)
+{
+	char *mem_section;
 
-		total += (uint64_t)((uint64_t)nt->size * (uint64_t)PAGESIZE());
-        }
+	if ((mem_section = read_mem_section(addr)))
+		return (ULONG(mem_section + 
+			OFFSET(mem_section_section_mem_map))
+			&& SECTION_HAS_MEM_MAP);
+	return 0;
+}
 
-	return total;
+ulong 
+section_mem_map_addr(ulong addr)
+{   
+	char *mem_section;
+	ulong map;
+
+	if ((mem_section = read_mem_section(addr))) {
+		map = ULONG(mem_section + 
+			OFFSET(mem_section_section_mem_map));
+		map &= SECTION_MAP_MASK;
+		return map;
+	}
+	return 0;
 }
 
-/*
- *  Dump just the vm_area_struct cache table data so that it can be
- *  called from above or for debug purposes.
- */
-void
-dump_vma_cache(ulong verbose)
+
+ulong 
+valid_section_nr(ulong nr)
 {
-	int i;
-        ulong vhits;
+	ulong addr = nr_to_section(nr);
 
-	if (!verbose)
-		goto show_hits;
+	if (valid_section(addr))
+		return addr;
 
-        for (i = 0; i < VMA_CACHE; i++)
-                fprintf(fp, "     cached_vma[%2d]: %lx (%ld)\n",
-                        i, vt->cached_vma[i],
-                        vt->cached_vma_hits[i]);
-        fprintf(fp, "          vma_cache: %lx\n", (ulong)vt->vma_cache);
-        fprintf(fp, "    vma_cache_index: %d\n", vt->vma_cache_index);
-        fprintf(fp, "    vma_cache_fills: %ld\n", vt->vma_cache_fills);
-	fflush(fp);
+	return 0;
+}
 
-show_hits:
-        if (vt->vma_cache_fills) {
-                for (i = vhits = 0; i < VMA_CACHE; i++)
-                        vhits += vt->cached_vma_hits[i];
+ulong 
+pfn_to_map(ulong pfn)
+{
+	ulong section, page_offset;
+	ulong section_nr;
+	ulong coded_mem_map, mem_map;
 
-                fprintf(stderr, "%s       vma hit rate: %2ld%% (%ld of %ld)\n",
-			verbose ? "" : "  ",
-                        (vhits * 100)/vt->vma_cache_fills,
-                        vhits, vt->vma_cache_fills);
-        }
+	section_nr = pfn_to_section_nr(pfn);
+	if (!(section = valid_section_nr(section_nr))) 
+		return 0;
+
+	if (section_has_mem_map(section)) {
+		page_offset = pfn - section_nr_to_pfn(section_nr);
+		coded_mem_map = section_mem_map_addr(section);
+		mem_map = sparse_decode_mem_map(coded_mem_map, section_nr) +
+			(page_offset * SIZE(page));
+		return mem_map;
+	}
+
+	return 0;
 }
 
-/*
- *  Guess at the "real" amount of physical memory installed, formatting
- *  it in a MB or GB based string.
- */
-char *
-get_memory_size(char *buf)
+void 
+dump_mem_sections(void)
 {
-	uint64_t total;
-	ulong next_gig;
-#ifdef OLDWAY
-	ulong mbs, gbs;
-#endif
+	ulong nr,addr;
+	ulong nr_mem_sections;
+	ulong coded_mem_map, mem_map, pfn;
+	char buf1[BUFSIZE];
+	char buf2[BUFSIZE];
+	char buf3[BUFSIZE];
+	char buf4[BUFSIZE];
 
-	total = machdep->memory_size();
+	nr_mem_sections = NR_MEM_SECTIONS();
 
-	if ((next_gig = roundup(total, GIGABYTES(1)))) {
-		if ((next_gig - total) <= MEGABYTES(64))
-			total = next_gig;
+	fprintf(fp, "\n");
+	pad_line(fp, BITS32() ? 59 : 67, '-');
+        fprintf(fp, "\n\nNR  %s  %s  %s  PFN\n",
+                mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "SECTION"),
+                mkstring(buf2, VADDR_PRLEN, CENTER|LJUST, "CODED_MEM_MAP"),
+                mkstring(buf3, VADDR_PRLEN, CENTER|LJUST, "MEM_MAP"));
+
+	for (nr = 0; nr <= nr_mem_sections ; nr++) {
+		if ((addr = valid_section_nr(nr))) {
+			coded_mem_map = section_mem_map_addr(addr);
+			mem_map = sparse_decode_mem_map(coded_mem_map,nr);
+			pfn = section_nr_to_pfn(nr);
+
+        		fprintf(fp, "%2ld  %s  %s  %s  %s\n",
+                		nr,
+                		mkstring(buf1, VADDR_PRLEN,
+                        	CENTER|LONG_HEX, MKSTR(addr)),
+                		mkstring(buf2, VADDR_PRLEN,
+                        	CENTER|LONG_HEX|RJUST, MKSTR(coded_mem_map)),
+                		mkstring(buf3, VADDR_PRLEN,
+                        	CENTER|LONG_HEX|RJUST, MKSTR(mem_map)),
+				pc->output_radix == 10 ?
+                		mkstring(buf4, VADDR_PRLEN,
+                        	LONG_DEC|LJUST, MKSTR(pfn)) :
+                		mkstring(buf4, VADDR_PRLEN,
+                        	LONG_HEX|LJUST, MKSTR(pfn)));
+		}
 	}
+}
 
-	return (pages_to_size((ulong)(total/PAGESIZE()), buf));
+void 
+list_mem_sections(void)
+{
+	ulong nr,addr;
+	ulong nr_mem_sections = NR_MEM_SECTIONS();
+	ulong coded_mem_map;
+
+	for (nr = 0; nr <= nr_mem_sections ; nr++) {
+		if ((addr = valid_section_nr(nr))) {
+			coded_mem_map = section_mem_map_addr(addr);
+			fprintf(fp,
+			    "nr=%ld section = %lx coded_mem_map=%lx pfn=%ld mem_map=%lx\n",
+				nr,
+				addr,
+				coded_mem_map,
+				section_nr_to_pfn(nr),
+				sparse_decode_mem_map(coded_mem_map,nr));
+		}
+	}
+}
 
-#ifdef OLDWAY
-	gbs = (ulong)(total/GIGABYTES(1));
-	mbs = (ulong)(total/MEGABYTES(1));
-	if (gbs) 
-		mbs = (total % GIGABYTES(1))/MEGABYTES(1);
+/*
+ *  For kernels containing the node_online_map or node_states[], 
+ *  return the number of online node bits set.
+ */
+static int
+get_nodes_online(void)
+{
+	int i, len, online;
+	struct gnu_request req;
+	ulong *maskptr;
+	long N_ONLINE;
+	ulong mapaddr;
 
-        if (total%MEGABYTES(1))
-                mbs++;
+	if (!symbol_exists("node_online_map") && 
+	    !symbol_exists("node_states")) 
+		return 0;
 
-	if (gbs) 
-		sprintf(buf, mbs ? "%ld GB %ld MB" : "%ld GB", gbs, mbs);
-	else 
-		sprintf(buf, "%ld MB", mbs);
+	if (symbol_exists("node_online_map")) {
+		if (LKCD_KERNTYPES()) {
+                	if ((len = STRUCT_SIZE("nodemask_t")) < 0)
+       				error(FATAL,
+					"cannot determine type nodemask_t\n");
+			mapaddr = symbol_value("node_online_map");
+		} else {
+			len = get_symbol_type("node_online_map", NULL, &req)
+			    == TYPE_CODE_UNDEF ?  sizeof(ulong) : req.length;
+			mapaddr = symbol_value("node_online_map");
+		}
+	} else if (symbol_exists("node_states")) {
+		if ((get_symbol_type("node_states", NULL, &req) != TYPE_CODE_ARRAY) ||
+		    !(len = get_array_length("node_states", NULL, 0)) ||
+		    !enumerator_value("N_ONLINE", &N_ONLINE))
+			return 0;
+		len = req.length / len;
+		mapaddr = symbol_value("node_states") + (N_ONLINE * len);
+	}
+
+       	if (!(vt->node_online_map = (ulong *)malloc(len)))
+       		error(FATAL, "cannot malloc node_online_map\n");
+
+ 	if (!readmem(mapaddr, KVADDR, 
+	    (void *)&vt->node_online_map[0], len, "node_online_map", 
+	    QUIET|RETURN_ON_ERROR))
+		error(FATAL, "cannot read node_online_map/node_states\n");
+
+	vt->node_online_map_len = len/sizeof(ulong);
+
+	online = 0;
+
+	maskptr = (ulong *)vt->node_online_map;
+	for (i = 0; i < vt->node_online_map_len; i++, maskptr++)
+		online += count_bits_long(*maskptr);
 
-	return buf;
-#endif
+	if (CRASHDEBUG(1)) {
+		fprintf(fp, "node_online_map: [");
+		for (i = 0; i < vt->node_online_map_len; i++)
+			fprintf(fp, "%s%lx", i ? ", " : "",  vt->node_online_map[i]);
+		fprintf(fp, "] -> nodes online: %d\n", online);
+	}
+
+	if (online)
+		vt->numnodes = online;
+
+	return online;
 }
 
 /*
- *  For use by architectures not having machine-specific manners for
- *  best determining physical memory size.
- */ 
-uint64_t
-generic_memory_size(void)
+ *  Return the next node index, with "first" being the first acceptable node.
+ */
+static int
+next_online_node(int first)
 {
-	if (machdep->memsize)
-		return machdep->memsize;
+	int i, j, node;
+	ulong mask, *maskptr;
 
-        return (machdep->memsize = total_node_memory());
+	if ((first/BITS_PER_LONG) >= vt->node_online_map_len) {
+		error(INFO, "next_online_node: %d is too large!\n", first);
+		return -1;
+	}
+
+	maskptr = (ulong *)vt->node_online_map;
+	for (i = node = 0; i <  vt->node_online_map_len; i++, maskptr++) {
+		mask = *maskptr;
+        	for (j = 0; j < BITS_PER_LONG; j++, node++) {
+                	if (mask & 1) {
+				if (node >= first)
+					return node;
+			}
+               	 	mask >>= 1;
+        	}
+	}
+
+	return -1;
 }
 
 /*
- *  Determine whether a virtual address is user or kernel or ambiguous.
- */ 
-int
-vaddr_type(ulong vaddr, struct task_context *tc)
+ *  Modify appropriately for architecture/kernel nuances.
+ */
+static ulong
+next_online_pgdat(int node)
 {
-	int memtype, found;
+        char buf[BUFSIZE];
+	ulong pgdat;
 
-	if (!tc)
-		tc = CURRENT_CONTEXT();
-	memtype = found = 0;
+	/*
+  	 *  Default -- look for type: struct pglist_data node_data[]
+	 */
+	if (LKCD_KERNTYPES()) {
+		if (!kernel_symbol_exists("node_data"))
+			goto pgdat2;
+		/* 
+		 *  Just index into node_data[] without checking that it is
+		 *  an array; kerntypes have no such symbol information.
+	 	 */
+	} else {
+		if (get_symbol_type("node_data", NULL, NULL) != TYPE_CODE_ARRAY)
+			goto pgdat2;
+
+	        open_tmpfile();
+	        sprintf(buf, "whatis node_data");
+	        if (!gdb_pass_through(buf, fp, GNU_RETURN_ON_ERROR)) {
+	                close_tmpfile();
+			goto pgdat2;
+	        }
+	        rewind(pc->tmpfile);
+	        while (fgets(buf, BUFSIZE, pc->tmpfile)) {
+	                if (STRNEQ(buf, "type = "))
+	                        break;
+	        }
+	        close_tmpfile();
+
+		if ((!strstr(buf, "struct pglist_data *") &&
+		     !strstr(buf, "pg_data_t *")) ||
+		    (count_chars(buf, '[') != 1) ||
+		    (count_chars(buf, ']') != 1))
+			goto pgdat2;
+	}
+
+	if (!readmem(symbol_value("node_data") + (node * sizeof(void *)), 
+	    KVADDR, &pgdat, sizeof(void *), "node_data", RETURN_ON_ERROR) ||
+	    !IS_KVADDR(pgdat))
+		goto pgdat2;
+
+	return pgdat;
+
+pgdat2:
+	if (LKCD_KERNTYPES()) {
+		if (!kernel_symbol_exists("pgdat_list"))
+			goto pgdat3;
+	} else {
+		if (get_symbol_type("pgdat_list",NULL,NULL) != TYPE_CODE_ARRAY)
+			goto pgdat3;
 
-	if (machdep->is_uvaddr(vaddr, tc)) {
-		memtype |= UVADDR;
-		found++;
-	}
+	        open_tmpfile();
+	        sprintf(buf, "whatis pgdat_list");
+	        if (!gdb_pass_through(buf, fp, GNU_RETURN_ON_ERROR)) {
+	                close_tmpfile();
+			goto pgdat3;
+	        }
+	        rewind(pc->tmpfile);
+	        while (fgets(buf, BUFSIZE, pc->tmpfile)) {
+	                if (STRNEQ(buf, "type = "))
+	                        break;
+	        }
+	        close_tmpfile();
 
-	if (machdep->is_kvaddr(vaddr)) {
-		memtype |= KVADDR;
-		found++;
+		if ((!strstr(buf, "struct pglist_data *") &&
+		     !strstr(buf, "pg_data_t *")) ||
+		    (count_chars(buf, '[') != 1) ||
+		    (count_chars(buf, ']') != 1))
+			goto pgdat3;
 	}
 
-	if (found == 1)
-		return memtype;
-	else
-		return AMBIGUOUS;
-}
+	if (!readmem(symbol_value("pgdat_list") + (node * sizeof(void *)), 
+	    KVADDR, &pgdat, sizeof(void *), "pgdat_list", RETURN_ON_ERROR) ||
+	    !IS_KVADDR(pgdat))
+		goto pgdat3;
 
-/*
- * Determine the first valid user space address
- */
-static int
-address_space_start(struct task_context *tc, ulong *addr)
-{
-        ulong vma;
-        char *vma_buf;
+	return pgdat;
 
-        if (!tc->mm_struct)
-                return FALSE;
+pgdat3:
+	if (symbol_exists("contig_page_data") && (node == 0))
+		return symbol_value("contig_page_data");
 
-        fill_mm_struct(tc->mm_struct);
-        vma = ULONG(tt->mm_struct + OFFSET(mm_struct_mmap));
-        if (!vma)
-                return FALSE;
-	vma_buf = fill_vma_cache(vma);
-        *addr = ULONG(vma_buf + OFFSET(vm_area_struct_vm_start));
-	
-	return TRUE;
+	return 0;
 }
 
 /*
- *  Search for a given value between a starting and ending address range,
- *  applying an optional mask for "don't care" bits.  As an alternative
- *  to entering the starting address value, -k means "start of kernel address
- *  space".  For processors with ambiguous user/kernel address spaces,
- *  -u or -k must be used (with or without -s) as a differentiator.
+ *  Make the vm_stat[] array contents easily accessible.
  */
-void
-cmd_search(void)
+static int
+vm_stat_init(void)
 {
-        int c;
-	ulong start, end, mask, memtype, len;
-	ulong uvaddr_end;
-	int sflag;
-	struct meminfo meminfo;
-	ulong value_array[MAXARGS];
-	struct syment *sp;
+        char buf[BUFSIZE];
+        char *arglist[MAXARGS];
+	int i, c, stringlen, total;
+        struct gnu_request *req;
+	char *start;
 
-	start = end = mask = sflag = memtype = len = 0;
-	uvaddr_end = COMMON_VADDR_SPACE() ? (ulong)(-1) : machdep->kvbase;
-	BZERO(value_array, sizeof(ulong) * MAXARGS);
+	if (vt->flags & VM_STAT)
+		return TRUE;
 
-        while ((c = getopt(argcnt, args, "l:uks:e:v:m:")) != EOF) {
-                switch(c)
-                {
-		case 'u':
-			if (!sflag) {
-				address_space_start(CURRENT_CONTEXT(),&start);
-				sflag++;
-			}
-			memtype = UVADDR;
-			sflag++;
-			break;
+	if ((vt->nr_vm_stat_items == -1) || !symbol_exists("vm_stat"))
+		goto bailout;
 
-		case 'k':
-			if (!sflag) {
-				start = machdep->kvbase;
-				sflag++;
-			}
-			memtype = KVADDR;
-			sflag++;
-			break;
+        /*
+         *  look for type: type = atomic_long_t []
+         */
+	if (LKCD_KERNTYPES()) {
+        	if (!symbol_exists("vm_stat"))
+			goto bailout;
+		/* 
+		 *  Just assume that vm_stat is an array; there is
+		 *  no symbol info in a kerntypes file. 
+		 */
+	} else {
+		if (!symbol_exists("vm_stat") ||
+		    get_symbol_type("vm_stat", NULL, NULL) != TYPE_CODE_ARRAY)
+			goto bailout;
+
+	        open_tmpfile();
+	        sprintf(buf, "whatis vm_stat");
+	        if (!gdb_pass_through(buf, fp, GNU_RETURN_ON_ERROR)) {
+	                close_tmpfile();
+			goto bailout;
+	        }
+	        rewind(pc->tmpfile);
+	        while (fgets(buf, BUFSIZE, pc->tmpfile)) {
+	                if (STRNEQ(buf, "type = "))
+	                        break;
+	        }
+	        close_tmpfile();
 
-		case 's':
-			if ((sp = symbol_search(optarg)))
-				start = sp->value;
-			else
-				start = htol(optarg, FAULT_ON_ERROR, NULL);
-			sflag++;
-			break;
+	        if (!strstr(buf, "atomic_long_t") ||
+	            (count_chars(buf, '[') != 1) ||
+	            (count_chars(buf, ']') != 1))
+	                goto bailout;
+	}
+
+        open_tmpfile();
+        req = (struct gnu_request *)GETBUF(sizeof(struct gnu_request));
+        req->command = GNU_GET_DATATYPE;
+        req->name = "zone_stat_item";
+        req->flags = GNU_PRINT_ENUMERATORS;
+        gdb_interface(req);
+        FREEBUF(req);
 
-		case 'e':
-                        if ((sp = symbol_search(optarg)))
-                                end = sp->value;
-                        else
-                        	end = htol(optarg, FAULT_ON_ERROR, NULL);
-                        break;
+	stringlen = 1;
 
-		case 'l':
-			len = stol(optarg, FAULT_ON_ERROR, NULL);
+        rewind(pc->tmpfile);
+        while (fgets(buf, BUFSIZE, pc->tmpfile)) {
+		if (strstr(buf, "{") || strstr(buf, "}"))
+			continue;
+		clean_line(buf);
+		c = parse_line(buf, arglist);
+		if (STREQ(arglist[0], "NR_VM_ZONE_STAT_ITEMS")) {
+			vt->nr_vm_stat_items = atoi(arglist[2]);
 			break;
-
-		case 'm':
-                        mask = htol(optarg, FAULT_ON_ERROR, NULL);
-                        break;
-
-                default:
-                        argerrs++;
-                        break;
-                }
+		} else
+			stringlen += strlen(arglist[0]);
         }
 
-        if (argerrs || !sflag || !args[optind] || (len && end))
-                cmd_usage(pc->curcmd, SYNOPSIS);
+	total = stringlen + vt->nr_vm_stat_items + 
+		(sizeof(void *) * vt->nr_vm_stat_items);
+        if (!(vt->vm_stat_items = (char **)malloc(total))) {
+		close_tmpfile();
+                error(FATAL, "cannot malloc vm_stat_items cache\n");
+	}
 
-	if (!memtype)
-		memtype = vaddr_type(start, CURRENT_CONTEXT());
+	start = (char *)&vt->vm_stat_items[vt->nr_vm_stat_items];
 
-	switch (memtype)
-	{
-	case UVADDR:
-		if (!IS_UVADDR(start, CURRENT_CONTEXT())) {
-			error(INFO, "invalid user virtual address: %lx\n", 
-				start);
-                	cmd_usage(pc->curcmd, SYNOPSIS);
+        rewind(pc->tmpfile);
+        while (fgets(buf, BUFSIZE, pc->tmpfile)) {
+                if (strstr(buf, "{") || strstr(buf, "}"))
+                        continue;
+		c = parse_line(buf, arglist);
+		i = atoi(arglist[2]);
+		if (i < vt->nr_vm_stat_items) {
+			vt->vm_stat_items[i] = start;
+			strcpy(start, arglist[0]);
+			start += strlen(arglist[0]) + 1;
 		}
-		break;
+        }
+	close_tmpfile();
 
-	case KVADDR:
-		if (!IS_KVADDR(start)) {
-			error(INFO, "invalid kernel virtual address: %lx\n",
-				start);
-               		cmd_usage(pc->curcmd, SYNOPSIS);
-		}
-		break;
+	vt->flags |= VM_STAT;
+	return TRUE;
 
-	case AMBIGUOUS:	
-		error(INFO, 
-		    "ambiguous virtual address: %lx  (requires -u or -k)\n",
-			start);
-               	cmd_usage(pc->curcmd, SYNOPSIS);
+bailout:
+	vt->nr_vm_stat_items = -1;
+	return FALSE;
+}
+
+/*
+ *  Either dump all vm_stat entries, or return the value of
+ *  the specified vm_stat item.  Use the global counter unless
+ *  a zone-specific address is passed.
+ */
+static int
+dump_vm_stat(char *item, long *retval, ulong zone)
+{
+	char *buf;
+	ulong *vp;
+	ulong location;
+	int i;
+
+	if (!vm_stat_init()) {
+		if (!item)
+			if (CRASHDEBUG(1))
+				error(INFO, 
+			    	    "vm_stat not available in this kernel\n");
+		return FALSE;
 	}
 
-	if (!end && !len) {
-		switch (memtype)
-		{
-		case UVADDR:
-			end = uvaddr_end;
-			break;
+	buf = GETBUF(sizeof(ulong) * vt->nr_vm_stat_items);
 
-		case KVADDR:
-			if (vt->vmalloc_start < machdep->identity_map_base)
-				end = (ulong)(-1);
-			else {
-				meminfo.memtype = KVADDR;
-				meminfo.spec_addr = 0;
-				meminfo.flags = (ADDRESS_SPECIFIED|GET_HIGHEST);
-				dump_vmlist(&meminfo);
-				end = meminfo.retval;
-			}
-			break;
-		}
-	} else if (len)  
-		end = start + len;
+	location = zone ? zone : symbol_value("vm_stat");
 
-	switch (memtype)
-	{
-	case UVADDR:
-		if (end > uvaddr_end) {
-			error(INFO, 
-	          "address range starts in user space and ends kernel space\n");
-               		cmd_usage(pc->curcmd, SYNOPSIS);
-		}
-			/* FALLTHROUGH */
-	case KVADDR:
-		if (end < start) {
-			error(INFO, 
-			   "ending address %lx is below starting address %lx\n",
-				end, start);
-               		cmd_usage(pc->curcmd, SYNOPSIS);
-		}
-		break;
+	readmem(location, KVADDR, buf, 
+	    sizeof(ulong) * vt->nr_vm_stat_items, 
+	    "vm_stat", FAULT_ON_ERROR);
+
+	if (!item) {
+		if (!zone)
+			fprintf(fp, "  VM_STAT:\n");
+		vp = (ulong *)buf;
+		for (i = 0; i < vt->nr_vm_stat_items; i++)
+			fprintf(fp, "%23s: %ld\n", vt->vm_stat_items[i], vp[i]);
+		return TRUE;
 	}
 
-	c = 0;
-	while (args[optind]) {
-		value_array[c] = htol(args[optind], FAULT_ON_ERROR, NULL);
-		c++;
-		optind++;
+	vp = (ulong *)buf;
+	for (i = 0; i < vt->nr_vm_stat_items; i++) {
+		if (STREQ(vt->vm_stat_items[i], item)) {
+			*retval = vp[i];
+			return TRUE;
+		}
 	}
 
-	search(start, end, mask, memtype, value_array, c);
+	return FALSE;
 }
 
 /*
- *  Do the work for cmd_search().
+ *  Dump the cumulative totals of the per_cpu__page_states counters.
  */
+int
+dump_page_states(void)
+{
+	struct syment *sp;
+	ulong addr, value;
+	int i, c, fd, len, instance, members;
+	char buf[BUFSIZE];
+        char *arglist[MAXARGS];
+	struct entry {
+		char *name;
+		ulong value;
+	} *entry_list;
+	struct stat stat;
+	char *namebuf, *nameptr;
 
-#define SEARCHMASK(X) ((X) | mask) 
+	if (!(sp = symbol_search("per_cpu__page_states"))) {
+		if (CRASHDEBUG(1))
+			error(INFO, "per_cpu__page_states"
+			    "not available in this kernel\n");
+		return FALSE;
+	}
 
-static void
-search(ulong start, ulong end, ulong mask, int memtype, ulong *value, int vcnt)
-{
-	int i, j;
-	ulong pp, next, *ubp;
-	int wordcnt, lastpage;
-	ulong page;
-	physaddr_t paddr;
-	char *pagebuf;
+	instance = members = len = 0;
 
-	if (start & (sizeof(long)-1)) {
-		start &= ~(sizeof(long)-1);
-		error(INFO, "rounding down start address to: %lx\n", start);
+        sprintf(buf, "ptype struct page_state");
+
+	open_tmpfile();
+        if (!gdb_pass_through(buf, fp, GNU_RETURN_ON_ERROR)) {
+		close_tmpfile();
+		return FALSE;
 	}
 
-	pagebuf = GETBUF(PAGESIZE());
-	next = start;
+	fflush(pc->tmpfile);
+	fd = fileno(pc->tmpfile);
+	fstat(fd, &stat);
+	namebuf = GETBUF(stat.st_size);
+	nameptr = namebuf;
 
-	for (pp = VIRTPAGEBASE(start); next < end; next = pp) {
-		lastpage = (VIRTPAGEBASE(next) == VIRTPAGEBASE(end));
-		if (LKCD_DUMPFILE())
-			set_lkcd_nohash();
+	rewind(pc->tmpfile);
+        while (fgets(buf, BUFSIZE, pc->tmpfile)) {
+		if (strstr(buf, "struct page_state") ||
+		    strstr(buf, "}"))
+			continue;
+		members++;
+	}
 
-                switch (memtype)
-                {
-                case UVADDR:
-                        if (!uvtop(CURRENT_CONTEXT(), pp, &paddr, 0) ||
-                            !phys_to_page(paddr, &page)) { 
-				if (!next_upage(CURRENT_CONTEXT(), pp, &pp)) 
-					return;
-                                continue;
-			}
-                        break;
+	entry_list = (struct entry *)
+		GETBUF(sizeof(struct entry) * members);
 
-                case KVADDR:
-                        if (!kvtop(CURRENT_CONTEXT(), pp, &paddr, 0) ||
-                            !phys_to_page(paddr, &page)) {
-				if (!next_kpage(pp, &pp))
-					return;
-                                continue;
-			}
-                        break;
-                }
+	rewind(pc->tmpfile);
+	i = 0;
+        while (fgets(buf, BUFSIZE, pc->tmpfile)) {
+		if (strstr(buf, "struct page_state") ||
+		    strstr(buf, "}"))
+			continue;
+		strip_ending_char(strip_linefeeds(buf), ';');
+		c = parse_line(buf, arglist);
+		strcpy(nameptr, arglist[c-1]);
+		entry_list[i].name = nameptr;
+		if (strlen(nameptr) > len)
+			len = strlen(nameptr);
+		nameptr += strlen(nameptr)+2;
+		i++;
+	}
+	close_tmpfile();
+
+	open_tmpfile();
+
+        for (c = 0; c < kt->cpus; c++) {
+                addr = sp->value + kt->__per_cpu_offset[c];
+		dump_struct("page_state", addr, RADIX(16));
+        }
 
-                if (!readmem(paddr, PHYSADDR, pagebuf, PAGESIZE(),
-                    "search page", RETURN_ON_ERROR|QUIET)) {
-			pp += PAGESIZE();
+	i = 0;
+	rewind(pc->tmpfile);
+        while (fgets(buf, BUFSIZE, pc->tmpfile)) {
+		if (strstr(buf, "struct page_state")) {
+			instance++;
+			i = 0;
 			continue;
 		}
+		if (strstr(buf, "}"))
+			continue;	
+		strip_linefeeds(buf);
+		extract_hex(buf, &value, ',', TRUE);
+		entry_list[i].value += value;
+		i++;
+        }
 
-		ubp = (ulong *)&pagebuf[next - pp];
-		if (lastpage) {
-			if (end == (ulong)(-1))
-				wordcnt = PAGESIZE()/sizeof(long);
-			else
-				wordcnt = (end - next)/sizeof(long);
-		} else
-			wordcnt = (PAGESIZE() - (next - pp))/sizeof(long);
+	close_tmpfile();
 
-		for (i = 0; i < wordcnt; i++, ubp++, next += sizeof(long)) {
-			for (j = 0; j < vcnt; j++) {
-				if (SEARCHMASK(*ubp) == SEARCHMASK(value[j])) 
-					fprintf(fp, "%lx: %lx\n", next, *ubp);
-			}
-		}
+	fprintf(fp, "  PAGE_STATES:\n");
+	for (i = 0; i < members; i++) {
+		sprintf(buf, "%s", entry_list[i].name);
+		fprintf(fp, "%s", mkstring(buf, len+2, RJUST, 0));
+		fprintf(fp, ": %ld\n", entry_list[i].value);
+	}
 
-		if (CRASHDEBUG(1))
-			if ((pp % (1024*1024)) == 0)
-				console("%lx\n", pp);
+	FREEBUF(namebuf);
+	FREEBUF(entry_list);
 
-		pp += PAGESIZE();
-	}
+	return TRUE;
 }
 
 
-/*
- *  Return the next mapped user virtual address page that comes after 
- *  the passed-in address.
+/* 
+ *  Dump the cumulative totals of the per_cpu__vm_event_state
+ *  counters.
  */
-static int
-next_upage(struct task_context *tc, ulong vaddr, ulong *nextvaddr)
+static int 
+dump_vm_event_state(void)
 {
-	ulong vma, total_vm;
-	int found;
-	char *vma_buf;
-        ulong vm_start, vm_end;
-	void *vm_next;
-
-        if (!tc->mm_struct)
-                return FALSE;
-
-        fill_mm_struct(tc->mm_struct);
-	vma = ULONG(tt->mm_struct + OFFSET(mm_struct_mmap));
-	total_vm = ULONG(tt->mm_struct + OFFSET(mm_struct_total_vm));
+	int i, c;
+	struct syment *sp;
+	ulong addr;
+	ulong *events, *cumulative;
 
-	if (!vma || (total_vm == 0))
+	if (!vm_event_state_init())
 		return FALSE;
 
-	vaddr = VIRTPAGEBASE(vaddr) + PAGESIZE();  /* first possible page */
-
-        for (found = FALSE; vma; vma = (ulong)vm_next) {
-                vma_buf = fill_vma_cache(vma);
+	events = (ulong *)GETBUF((sizeof(ulong) * vt->nr_vm_event_items) * 2);
+	cumulative = &events[vt->nr_vm_event_items];
 
-                vm_start = ULONG(vma_buf + OFFSET(vm_area_struct_vm_start));
-                vm_end = ULONG(vma_buf + OFFSET(vm_area_struct_vm_end));
-                vm_next = VOID_PTR(vma_buf + OFFSET(vm_area_struct_vm_next));
+        sp = symbol_search("per_cpu__vm_event_states");
 
-		if (vaddr <= vm_start) {
-			*nextvaddr = vm_start;
-			return TRUE;
+        for (c = 0; c < kt->cpus; c++) {
+                addr = sp->value + kt->__per_cpu_offset[c];
+		if (CRASHDEBUG(1)) {
+			fprintf(fp, "[%d]: %lx\n", c, addr);
+			dump_struct("vm_event_state", addr, RADIX(16));
 		}
+                readmem(addr, KVADDR, events,
+                    sizeof(ulong) * vt->nr_vm_event_items, 
+		    "vm_event_states buffer", FAULT_ON_ERROR);
+		for (i = 0; i < vt->nr_vm_event_items; i++)
+			cumulative[i] += events[i];
+        }
 
-		if ((vaddr > vm_start) && (vaddr < vm_end)) {
-			*nextvaddr = vaddr;
-			return TRUE;
-		}
-	}
+	fprintf(fp, "\n  VM_EVENT_STATES:\n");
+	for (i = 0; i < vt->nr_vm_event_items; i++)
+		fprintf(fp, "%23s: %ld\n", vt->vm_event_items[i], cumulative[i]);
 
-	return FALSE;
+	FREEBUF(events);
+
+	return TRUE;
 }
 
-/*
- *  Return the next kernel virtual address page that comes after
- *  the passed-in address.
- */
 static int
-next_kpage(ulong vaddr, ulong *nextvaddr)
+vm_event_state_init(void)
 {
-        int n;
-        ulong paddr, vaddr_orig, node_size;
-        struct node_table *nt;
-        ulonglong pstart, pend;
-	ulong vmalloc_limit;
-	struct meminfo meminfo;
+	int i, c, stringlen, total;
+	long count;
+	struct gnu_request *req;
+	char *arglist[MAXARGS];
+	char buf[BUFSIZE];
+	char *start;
 
-	vaddr_orig = vaddr;
-	vaddr = VIRTPAGEBASE(vaddr) + PAGESIZE();  /* first possible page */
+	if (vt->flags & VM_EVENT)
+		return TRUE;
 
-        if (vaddr < vaddr_orig)  /* wrapped back to zero? */
-                return FALSE;
+        if ((vt->nr_vm_event_items == -1) || 
+	    !symbol_exists("per_cpu__vm_event_states"))
+                goto bailout;
 
-        meminfo.memtype = KVADDR;
-        meminfo.spec_addr = 0;
-        meminfo.flags = (ADDRESS_SPECIFIED|GET_HIGHEST);
-        dump_vmlist(&meminfo);
-        vmalloc_limit = meminfo.retval;
+	if (!enumerator_value("NR_VM_EVENT_ITEMS", &count))
+		return FALSE;
 
-	if (IS_VMALLOC_ADDR(vaddr_orig)) {
-		if (IS_VMALLOC_ADDR(vaddr) && (vaddr < vmalloc_limit)) {
-			*nextvaddr = vaddr;
-			return TRUE;
-		}
+	vt->nr_vm_event_items = count;
 
-		if (vt->vmalloc_start < machdep->identity_map_base) {   
-			*nextvaddr = machdep->identity_map_base;
-			return TRUE;
-		}
+        open_tmpfile();
+        req = (struct gnu_request *)GETBUF(sizeof(struct gnu_request));
+        req->command = GNU_GET_DATATYPE;
+        req->name = "vm_event_item";
+        req->flags = GNU_PRINT_ENUMERATORS;
+        gdb_interface(req);
+        FREEBUF(req);
 
-		return FALSE;	
-	}
+	stringlen = 1;
 
-	paddr = VTOP(vaddr);
+        rewind(pc->tmpfile);
+        while (fgets(buf, BUFSIZE, pc->tmpfile)) {
+		if (strstr(buf, "{") || strstr(buf, "}"))
+			continue;
+		clean_line(buf);
+		c = parse_line(buf, arglist);
+		if (STREQ(arglist[0], "NR_VM_EVENT_ITEMS"))
+			break;
+		else
+			stringlen += strlen(arglist[0]);
+        }
 
-        for (n = 0; n < vt->numnodes; n++) {
-                nt = &vt->node_table[n];
-                if ((vt->flags & V_MEM_MAP) && (vt->numnodes == 1))
-                        node_size = vt->max_mapnr;
-                else
-	                node_size = nt->size;
+	total = stringlen + vt->nr_vm_event_items + 
+		(sizeof(void *) * vt->nr_vm_event_items);
+        if (!(vt->vm_event_items = (char **)malloc(total))) {
+		close_tmpfile();
+                error(FATAL, "cannot malloc vm_event_items cache\n");
+	}
 
-                pstart = nt->start_paddr;
-                pend = pstart + ((ulonglong)node_size * PAGESIZE());
+	start = (char *)&vt->vm_event_items[vt->nr_vm_event_items];
 
-                if ((paddr < pstart) || (paddr >= pend))
+        rewind(pc->tmpfile);
+        while (fgets(buf, BUFSIZE, pc->tmpfile)) {
+                if (strstr(buf, "{") || strstr(buf, "}"))
                         continue;
-                /*
-                 *  We're in the physical range.
-                 */
-                return TRUE;
+		c = parse_line(buf, arglist);
+		i = atoi(arglist[2]);
+		if (i < vt->nr_vm_event_items) {
+			vt->vm_event_items[i] = start;
+			strcpy(start, arglist[0]);
+			start += strlen(arglist[0]) + 1;
+		}
         }
+	close_tmpfile();
 
-	if (vt->vmalloc_start > vaddr) {
-		*nextvaddr = vt->vmalloc_start;
-		return TRUE;
-	} else
-        	return FALSE;
+	vt->flags |= VM_EVENT;
+	return TRUE;
+
+bailout:
+	vt->nr_vm_event_items = -1;
+	return FALSE;
 }
 
+
 /*
- *  Display swap statistics.
+ *  Support for slub.c slab cache.
  */
-void
-cmd_swap(void)
+static void
+kmem_cache_init_slub(void)
 {
-        int c;
-
-        while ((c = getopt(argcnt, args, "")) != EOF) {
-                switch(c)
-                {
-                default:
-                        argerrs++;
-                        break;
-                }
-        }
+	if (CRASHDEBUG(1) &&
+	    !(vt->flags & CONFIG_NUMA) && (vt->numnodes > 1))
+		error(WARNING, 
+		    "kmem_cache_init_slub: numnodes: %d without CONFIG_NUMA\n",
+			vt->numnodes);
 
-        if (argerrs)
-                cmd_usage(pc->curcmd, SYNOPSIS);
+	vt->cpu_slab_type = MEMBER_TYPE("kmem_cache", "cpu_slab");
 
-	dump_swap_info(VERBOSE, NULL, NULL);
+	vt->flags |= KMEM_CACHE_INIT;
 }
 
-/*
- *  Do the work for cmd_swap().
- */
-
-#define SWP_USED        1
-#define SWAP_MAP_BAD    0x8000
-
-char *swap_info_hdr = \
-"FILENAME           TYPE         SIZE      USED   PCT  PRIORITY\n";
-
-static int
-dump_swap_info(ulong swapflags, ulong *totalswap_pages, ulong *totalused_pages)
+static void 
+kmem_cache_list_slub(void)
 {
-	int i, j;
-	int flags, swap_device, pages, prio, usedswap;
-	ulong swap_file, max, swap_map, pct;
-	ulong vfsmnt;
-	ulong swap_info;
-	ushort *map;
-	ulong totalswap, totalused;
+        int i, cnt;
+        ulong *cache_list;
+        ulong name;
+	char *cache_buf;
 	char buf[BUFSIZE];
 
-	if (!symbol_exists("nr_swapfiles"))
-		error(FATAL, "nr_swapfiles doesn't exist in this kernel!\n");
+	cnt = get_kmem_cache_list(&cache_list);
+	cache_buf = GETBUF(SIZE(kmem_cache));
 
-        if (!symbol_exists("swap_info"))
-                error(FATAL, "swap_info doesn't exist in this kernel!\n");
+	for (i = 0; i < cnt; i++) {
+		fprintf(fp, "%lx ", cache_list[i]);
 
-	swap_info = symbol_value("swap_info");
+		readmem(cache_list[i], KVADDR, cache_buf, 
+			SIZE(kmem_cache), "kmem_cache buffer", 
+			FAULT_ON_ERROR);
 
-	if (swapflags & VERBOSE)
-		fprintf(fp, swap_info_hdr);
+		name = ULONG(cache_buf + OFFSET(kmem_cache_name)); 
+		if (!read_string(name, buf, BUFSIZE-1))
+			sprintf(buf, "(unknown)\n");
+		
+		fprintf(fp, "%s\n", buf);
+	}
 
-	totalswap = totalused = 0;
+	FREEBUF(cache_list);
+	FREEBUF(cache_buf);
+}
 
-	for (i = 0; i < vt->nr_swapfiles; i++, 
-	    swap_info += SIZE(swap_info_struct)){
-		fill_swap_info(swap_info);
+#define DUMP_KMEM_CACHE_INFO_SLUB()  dump_kmem_cache_info_slub(si)
 
-		flags = INT(vt->swap_info_struct + 
-			OFFSET(swap_info_struct_flags));
+static void
+dump_kmem_cache_info_slub(struct meminfo *si)
+{
+	char b1[BUFSIZE];
+	char b2[BUFSIZE];
+	int namelen, sizelen, spacelen;
 
-		if (!(flags & SWP_USED))
-			continue;
+	fprintf(fp, "%s ",
+		mkstring(b1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(si->cache))); 
 
-		swap_file = ULONG(vt->swap_info_struct + 
-			OFFSET(swap_info_struct_swap_file));
+	namelen = strlen(si->curname);
+	sprintf(b2, "%ld", si->objsize);
+	sizelen = strlen(b2);
+	spacelen = 0;
 
-                swap_device = INT(vt->swap_info_struct +
-                        OFFSET_OPTION(swap_info_struct_swap_device, 
-			swap_info_struct_old_block_size));
+	if (namelen++ > 18) {
+		spacelen = 29 - namelen - sizelen;
+		fprintf(fp, "%s%s%ld  ", si->curname,
+			space(spacelen <= 0 ? 1 : spacelen), si->objsize); 
+		if (spacelen > 0)
+			spacelen = 1;
+		sprintf(b1, "%c%dld  ", '%', 9 + spacelen - 1);
+	} else {
+		fprintf(fp, "%-18s  %8ld  ", si->curname, si->objsize); 
+		sprintf(b1, "%c%dld  ", '%', 9);
+	}
 
-                pages = INT(vt->swap_info_struct +
-                        OFFSET(swap_info_struct_pages));
+        fprintf(fp, b1, si->inuse);
 
-		totalswap += pages;
-		pages <<= (PAGESHIFT() - 10);
+        fprintf(fp, "%8ld  %5ld  %4ldk\n",  
+		si->num_slabs * si->objects, 
+                si->num_slabs, si->slabsize/1024); 
+}
 
-                prio = INT(vt->swap_info_struct + 
-			OFFSET(swap_info_struct_prio));
+static void
+dump_kmem_cache_slub(struct meminfo *si)
+{
+	int i;
+	ulong name, oo;
+	unsigned int size, objsize, objects, order, offset;
+	char *reqname, *p1;
+	char kbuf[BUFSIZE];
+	char buf[BUFSIZE];
 
-                max = ULONG(vt->swap_info_struct +
-                        OFFSET(swap_info_struct_max));
+	if (INVALID_MEMBER(kmem_cache_node_nr_slabs)) {
+		error(INFO, 
+		    "option requires kmem_cache_node.nr_slabs member!\n"
+		    "(the kernel must be built with CONFIG_SLUB_DEBUG)\n");
+		return;
+	}
 
-                swap_map = ULONG(vt->swap_info_struct +
-                        OFFSET(swap_info_struct_swap_map));
+	si->cache_count = get_kmem_cache_list(&si->cache_list);
+	si->cache_buf = GETBUF(SIZE(kmem_cache));
 
-		if (swap_file) {
-			if (VALID_MEMBER(swap_info_struct_swap_vfsmnt)) {
-                		vfsmnt = ULONG(vt->swap_info_struct +
-                        		OFFSET(swap_info_struct_swap_vfsmnt));
-				get_pathname(swap_file, buf, BUFSIZE, 
-					1, vfsmnt);
-			} else if (VALID_MEMBER
-				(swap_info_struct_old_block_size)) {
-				get_pathname(file_to_dentry(swap_file), 
-					buf, BUFSIZE, 1, 0);
-			} else {
-				get_pathname(swap_file, buf, BUFSIZE, 1, 0);
-			}
-		} else
-			sprintf(buf, "(unknown)");
+	if (!si->reqname &&
+	     !(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES)))
+		fprintf(fp, kmem_cache_hdr);
+
+	if (si->flags & ADDRESS_SPECIFIED) {
+		if ((p1 = is_slab_page(si, kbuf))) {
+			si->flags |= VERBOSE;
+			si->slab = (ulong)si->spec_addr;
+		} else if (!(p1 = vaddr_to_kmem_cache(si->spec_addr, kbuf, 
+		    	VERBOSE))) {
+			error(INFO, 
+			   "address is not allocated in slab subsystem: %lx\n",
+				si->spec_addr);
+			goto bailout;
+		}
+		
+		if (si->reqname && (si->reqname != p1)) 
+			error(INFO, 
+			    "ignoring pre-selected %s cache for address: %lx\n",
+				si->reqname, si->spec_addr, si->reqname);
+		reqname = p1;
+	} else
+		reqname = si->reqname;
 
-		map = (ushort *)GETBUF(sizeof(ushort) * max);
+	for (i = 0; i < si->cache_count; i++) {
+		if (!readmem(si->cache_list[i], KVADDR, si->cache_buf, 
+		    SIZE(kmem_cache), "kmem_cache buffer", RETURN_ON_ERROR))
+			goto next_cache;
 
-		if (!readmem(swap_map, KVADDR, map, 
-		    sizeof(ushort) * max, "swap_info swap_map data",
-		    RETURN_ON_ERROR|QUIET)) {
-			if (swapflags & RETURN_ON_ERROR) {
-				*totalswap_pages = swap_map;
-				*totalused_pages = i;
-				return FALSE;
-			} else 
-				error(FATAL, 
-		              "swap_info[%d].swap_map at %lx is unaccessible\n",
-                        		i, swap_map);
+		name = ULONG(si->cache_buf + OFFSET(kmem_cache_name)); 
+		if (!read_string(name, buf, BUFSIZE-1))
+			sprintf(buf, "(unknown)");
+		if (reqname) {
+			if (!STREQ(reqname, buf))
+				continue;
+			fprintf(fp, kmem_cache_hdr);
 		}
-
-		usedswap = 0;
-                for (j = 0; j < max; j++) {
-                        switch (map[j])
-                        {
-                        case SWAP_MAP_BAD:
-                        case 0:
-                                continue;
-                        default:
-                                usedswap++;
-                        }
+		if (ignore_cache(si, buf)) {
+			fprintf(fp, "%lx %-18s [IGNORED]\n", 
+				si->cache_list[i], buf);
+			goto next_cache;
 		}
 
-		FREEBUF(map);
+		objsize = UINT(si->cache_buf + OFFSET(kmem_cache_objsize)); 
+		size = UINT(si->cache_buf + OFFSET(kmem_cache_size)); 
+		offset = UINT(si->cache_buf + OFFSET(kmem_cache_offset));
+		if (VALID_MEMBER(kmem_cache_objects)) {
+			objects = UINT(si->cache_buf + 
+				OFFSET(kmem_cache_objects)); 
+			order = UINT(si->cache_buf + OFFSET(kmem_cache_order)); 
+		} else if (VALID_MEMBER(kmem_cache_oo)) {
+			oo = ULONG(si->cache_buf + OFFSET(kmem_cache_oo));
+			objects = oo_objects(oo);
+			order = oo_order(oo);
+		} else
+			error(FATAL, "cannot determine "
+			    	"kmem_cache objects/order values\n");
 
-		totalused += usedswap;
-		usedswap <<= (PAGESHIFT() - 10);
-		pct = (usedswap * 100)/pages;
+		si->cache = si->cache_list[i];
+		si->curname = buf;
+		si->objsize = objsize;
+		si->size = size;
+		si->objects = objects;
+		si->slabsize = (PAGESIZE() << order);
+		si->inuse = si->num_slabs = 0;
+		si->slab_offset = offset;
+		if (!get_kmem_cache_slub_data(GET_SLUB_SLABS, si) ||
+		    !get_kmem_cache_slub_data(GET_SLUB_OBJECTS, si))
+			goto next_cache;
 
-		if (swapflags & VERBOSE)
-			fprintf(fp, "%-15s  %s    %7dk %7dk  %2ld%%     %d\n", 
-				buf, swap_device ? "PARTITION" : "  FILE   ", 
-				pages, usedswap, pct, prio);
-	}
+		DUMP_KMEM_CACHE_INFO_SLUB();
 
-	if (totalswap_pages)
-		*totalswap_pages = totalswap;
-	if (totalused_pages)
-		*totalused_pages = totalused;
+		if (si->flags & ADDRESS_SPECIFIED) {
+			if (!si->slab)
+                		si->slab = vaddr_to_slab(si->spec_addr);
+			do_slab_slub(si, VERBOSE);
+		} else if (si->flags & VERBOSE) {
+			do_kmem_cache_slub(si);
+			if (!reqname && ((i+1) < si->cache_count))
+				fprintf(fp, kmem_cache_hdr);
+		}
 
-	return TRUE;
+next_cache:
+		if (reqname) 
+			break;
+	}
+
+bailout:
+	FREEBUF(si->cache_list);
+	FREEBUF(si->cache_buf);
 }
 
 /*
- *  Translate a PTE into a swap device and offset string.
- */
-char *
-swap_location(ulonglong pte, char *buf)
+ *  Emulate the total count calculation done by the
+ *  slab_objects() sysfs function in slub.c.
+ */ 
+static int 
+get_kmem_cache_slub_data(long cmd, struct meminfo *si)
 {
-	char swapdev[BUFSIZE];
+	int i, n, node;
+	ulong total_objects, total_slabs;
+	ulong cpu_slab_ptr, node_ptr;
+	ulong node_nr_partial, node_nr_slabs;
+	int full_slabs, objects;
+	long p;
+	short inuse;
+        ulong *nodes, *per_cpu;
 
-        if (!pte)
-                return NULL;
+	/*
+	 *  nodes[n] is not being used (for now)
+	 *  per_cpu[n] is a count of cpu_slab pages per node.
+	 */
+        nodes = (ulong *)GETBUF(2 * sizeof(ulong) * vt->numnodes);
+        per_cpu = nodes + vt->numnodes;
 
-	sprintf(buf, "%s  OFFSET: %lld", 
-		get_swapdev(SWP_TYPE(pte), swapdev), SWP_OFFSET(pte));
+	total_slabs = total_objects = 0; 
 
-        return buf;
-}
+	for (i = 0; i < kt->cpus; i++) {
+		cpu_slab_ptr = get_cpu_slab_ptr(si, i, NULL);
 
-/*
- *  Given the type field from a PTE, return the name of the swap device.
- */
-static char *
-get_swapdev(ulong type, char *buf)
-{
-	unsigned int i, swap_info_len;
-	ulong swap_info, swap_file;
-	ulong vfsmnt;
+		if (!cpu_slab_ptr)
+			continue;
 
-        if (!symbol_exists("nr_swapfiles"))
-                error(FATAL, "nr_swapfiles doesn't exist in this kernel!\n");
+		if ((node = page_to_nid(cpu_slab_ptr)) < 0)
+			goto bailout;
 
-        if (!symbol_exists("swap_info"))
-                error(FATAL, "swap_info doesn't exist in this kernel!\n");
+		switch (cmd)
+		{
+		case GET_SLUB_OBJECTS:
+			if (!readmem(cpu_slab_ptr + OFFSET(page_inuse), 
+			    KVADDR, &inuse, sizeof(short), 
+			    "page inuse", RETURN_ON_ERROR))
+				return FALSE;
+			total_objects += inuse;
+			break;
 
-        swap_info = symbol_value("swap_info");
+		case GET_SLUB_SLABS:
+			total_slabs++;
+			break;
+		}
+		per_cpu[node]++;
+	}
+	
+	for (n = 0; n < vt->numnodes; n++) {
+		if (vt->flags & CONFIG_NUMA)
+			node_ptr = ULONG(si->cache_buf +
+				OFFSET(kmem_cache_node) +
+				(sizeof(void *)*n));
+		else
+			node_ptr = si->cache + 
+				OFFSET(kmem_cache_local_node);
 
-	swap_info_len = (i = ARRAY_LENGTH(swap_info)) ?
-		i : get_array_length("swap_info", NULL, 0);
+               	if (!readmem(node_ptr + OFFSET(kmem_cache_node_nr_partial), 
+		    KVADDR, &node_nr_partial, sizeof(ulong), 
+		    "kmem_cache_node nr_partial", RETURN_ON_ERROR))
+			goto bailout;
+               	if (!readmem(node_ptr + OFFSET(kmem_cache_node_nr_slabs), 
+		    KVADDR, &node_nr_slabs, sizeof(ulong), 
+		    "kmem_cache_node nr_slabs", RETURN_ON_ERROR))
+			goto bailout;
 
-        sprintf(buf, "(unknown swap location)");
+		switch (cmd)
+		{
+		case GET_SLUB_OBJECTS:
+			if ((p = count_partial(node_ptr)) < 0)
+				return FALSE;
+			total_objects += p;
+			break;
 
-	if (type >= swap_info_len)
-		return buf;
+		case GET_SLUB_SLABS:
+			total_slabs += node_nr_partial;
+			break;
+		}
 
-	swap_info += (SIZE(swap_info_struct) * type);
-	fill_swap_info(swap_info);
-	swap_file = ULONG(vt->swap_info_struct + 
-		OFFSET(swap_info_struct_swap_file));
+		full_slabs = node_nr_slabs - per_cpu[n] - node_nr_partial;
+		objects = si->objects;
 
-        if (swap_file) {
-		if (VALID_MEMBER(swap_info_struct_swap_vfsmnt)) {
-			vfsmnt = ULONG(vt->swap_info_struct + 
-				OFFSET(swap_info_struct_swap_vfsmnt));
-        		get_pathname(swap_file, buf, BUFSIZE, 1, vfsmnt);
-                } else if (VALID_MEMBER (swap_info_struct_old_block_size)) {
-                        get_pathname(file_to_dentry(swap_file),
-                        	buf, BUFSIZE, 1, 0);
-		} else {
-        		get_pathname(swap_file, buf, BUFSIZE, 1, 0);
+		switch (cmd)
+		{
+		case GET_SLUB_OBJECTS:
+			total_objects += (full_slabs * objects);
+			break;
+
+		case GET_SLUB_SLABS:
+			total_slabs += full_slabs;
+			break;
 		}
-        } 
 
-	return buf;
-}
+		if (!(vt->flags & CONFIG_NUMA))
+			break;
+	}
 
-/*
- *  If not currently stashed, cache the passed-in swap_info_struct.
- */
-static void
-fill_swap_info(ulong swap_info)
-{
-	if (vt->last_swap_read == swap_info)
-		return;
+	switch (cmd)
+	{
+	case GET_SLUB_OBJECTS:
+		si->inuse = total_objects;
+		break;
 
-	if (!vt->swap_info_struct && !(vt->swap_info_struct = (char *)
-        	malloc(SIZE(swap_info_struct))))
-			error(FATAL, "cannot malloc swap_info_struct space\n");
-	
-        readmem(swap_info, KVADDR, vt->swap_info_struct, SIZE(swap_info_struct),
-                "fill_swap_info", FAULT_ON_ERROR);
+	case GET_SLUB_SLABS:
+		si->num_slabs = total_slabs;
+		break;
+	}
 
-	vt->last_swap_read = swap_info;
+	FREEBUF(nodes);
+	return TRUE;
+
+bailout:
+	FREEBUF(nodes);
+	return FALSE;
 }
 
-/*
- *  If active, clear references to the swap_info references.
- */
-void
-clear_swap_info_cache(void)
+
+static void
+do_kmem_cache_slub(struct meminfo *si)  
 {
-	if (ACTIVE())
-		vt->last_swap_read = 0;
-}
+	int i, n;
+	ulong cpu_slab_ptr, node_ptr;
+	ulong node_nr_partial, node_nr_slabs;
+	ulong *per_cpu;
 
+	per_cpu = (ulong *)GETBUF(sizeof(ulong) * vt->numnodes);
 
-/*
- *  Translage a vm_area_struct and virtual address into a filename
- *  and offset string.
- */ 
+        for (i = 0; i < kt->cpus; i++) {
+		cpu_slab_ptr = get_cpu_slab_ptr(si, i, NULL);
 
-#define PAGE_CACHE_SHIFT  (machdep->pageshift) /* This is supposed to change! */
+		fprintf(fp, "CPU %d SLAB:\n%s", i, 
+			cpu_slab_ptr ? "" : "  (empty)\n");
 
-static char *
-vma_file_offset(ulong vma, ulong vaddr, char *buf)
-{
-	ulong vm_file, vm_start, vm_offset, vm_pgoff, dentry, offset;
-	ulong vfsmnt;
-	char file[BUFSIZE];
-	char *vma_buf, *file_buf;
+                if (!cpu_slab_ptr)
+                        continue;
 
-	if (!vma)
-		return NULL;
+                if ((n = page_to_nid(cpu_slab_ptr)) >= 0)
+			per_cpu[n]++;
 
-        vma_buf = fill_vma_cache(vma);
+		si->slab = cpu_slab_ptr;
+		do_slab_slub(si, VERBOSE);
 
-        vm_file = ULONG(vma_buf + OFFSET(vm_area_struct_vm_file));
+		if (received_SIGINT())
+			restart(0);
+        }
 
-	if (!vm_file) 
-		goto no_file_offset;
+        for (n = 0; n < vt->numnodes; n++) {
+                if (vt->flags & CONFIG_NUMA)
+                        node_ptr = ULONG(si->cache_buf +
+                                OFFSET(kmem_cache_node) +
+                                (sizeof(void *)*n));
+                else
+                        node_ptr = si->cache +
+                                OFFSET(kmem_cache_local_node);
 
-        file_buf = fill_file_cache(vm_file);
-        dentry = ULONG(file_buf + OFFSET(file_f_dentry));
+	 	if (!readmem(node_ptr + OFFSET(kmem_cache_node_nr_partial),
+		    KVADDR, &node_nr_partial, sizeof(ulong),
+		    "kmem_cache_node nr_partial", RETURN_ON_ERROR))
+			break;
+		if (!readmem(node_ptr + OFFSET(kmem_cache_node_nr_slabs),
+		    KVADDR, &node_nr_slabs, sizeof(ulong),
+		    "kmem_cache_node nr_slabs", RETURN_ON_ERROR))
+			break;
 
-	if (!dentry) 
-		goto no_file_offset;
+		fprintf(fp, "KMEM_CACHE_NODE   NODE  SLABS  PARTIAL  PER-CPU\n");
 
-	file[0] = NULLCHAR;
-	if (VALID_MEMBER(file_f_vfsmnt)) {
-        	vfsmnt = ULONG(file_buf + OFFSET(file_f_vfsmnt));
-               	get_pathname(dentry, file, BUFSIZE, 1, vfsmnt);
-	} else 
-               	get_pathname(dentry, file, BUFSIZE, 1, 0);
+		fprintf(fp, "%lx%s", node_ptr, space(VADDR_PRLEN > 8 ? 2 : 10));
+		fprintf(fp, "%4d  %5ld  %7ld  %7ld\n",
+			n, node_nr_slabs, node_nr_partial, per_cpu[n]);
 
-	if (!strlen(file)) 
-		goto no_file_offset;
+		do_node_lists_slub(si, node_ptr, n);
 
-        vm_start = ULONG(vma_buf + OFFSET(vm_area_struct_vm_start));
+		if (!(vt->flags & CONFIG_NUMA))
+			break;
+	}
 
-	vm_offset = vm_pgoff = 0xdeadbeef;
+	fprintf(fp, "\n");
 
-	if (VALID_MEMBER(vm_area_struct_vm_offset)) 
-        	vm_offset = ULONG(vma_buf + 
-			OFFSET(vm_area_struct_vm_offset));
-	else if (VALID_MEMBER(vm_area_struct_vm_pgoff))
-        	vm_pgoff = ULONG(vma_buf + 
-			OFFSET(vm_area_struct_vm_pgoff));
-	else 
-		goto no_file_offset;
+	FREEBUF(per_cpu);
+}
 
-	if (vm_offset != 0xdeadbeef) 
-		offset = VIRTPAGEBASE(vaddr) - vm_start + vm_offset;
-	else if (vm_pgoff != 0xdeadbeef) {
-		offset = ((vaddr - vm_start) >> PAGE_CACHE_SHIFT) + vm_pgoff;
-		offset <<= PAGE_CACHE_SHIFT;
+#define DUMP_SLAB_INFO_SLUB() \
+      { \
+        char b1[BUFSIZE], b2[BUFSIZE]; \
+        fprintf(fp, "  %s  %s  %4d  %5d  %9d  %4d\n", \
+                mkstring(b1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(si->slab)), \
+                mkstring(b2, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(vaddr)), \
+		node, objects, inuse, objects - inuse); \
+      }
+
+static void 
+do_slab_slub(struct meminfo *si, int verbose)
+{
+	physaddr_t paddr; 
+	ulong vaddr;
+	ushort inuse, objects; 
+	ulong freelist, cpu_freelist, cpu_slab_ptr;
+	int i, cpu_slab, is_free, node;
+	ulong p, q;
+
+	if (!si->slab) {
+		if (CRASHDEBUG(1))
+			error(INFO, "-S option not supported for CONFIG_SLUB\n");
+		return;
 	}
 
-	sprintf(buf, "%s  OFFSET: %lx", file, offset);
+	if (!page_to_phys(si->slab, &paddr)) {
+		error(WARNING, 
+		    "%lx: cannot tranlate slab page to physical address\n",
+			si->slab);
+		return;
+	} 
 
-	return buf;
+	node = page_to_nid(si->slab);
 
-no_file_offset:
-	return NULL;
-}
+	vaddr = PTOV(paddr);
 
-/*
- *  Translate a PTE into its physical address and flags.
- */
-void
-cmd_pte(void)
-{
-        int c;
-	ulonglong pte;
+	if (verbose)
+		fprintf(fp, "  %s", slab_hdr);
 
-        while ((c = getopt(argcnt, args, "")) != EOF) {
-                switch(c)
-                {
-                default:
-                        argerrs++;
-                        break;
-                }
-        }
+	if (!readmem(si->slab + OFFSET(page_inuse), KVADDR, &inuse,
+	    sizeof(ushort), "page.inuse", RETURN_ON_ERROR))
+		return;
+	if (!readmem(si->slab + OFFSET(page_freelist), KVADDR, &freelist,
+	    sizeof(void *), "page.freelist", RETURN_ON_ERROR))
+		return;
+	/* 
+	 *  Pre-2.6.27, the object count and order were fixed in the
+	 *  kmem_cache structure.  Now they may change, say if a high
+	 *  order slab allocation fails, so the per-slab object count
+	 *  is kept in the slab.
+	 */
+	if (VALID_MEMBER(page_objects)) {
+		if (!readmem(si->slab + OFFSET(page_objects), KVADDR, &objects,
+		    sizeof(ushort), "page.objects", RETURN_ON_ERROR))
+			return;
 
-        if (argerrs)
-                cmd_usage(pc->curcmd, SYNOPSIS);
+		if (CRASHDEBUG(1) && (objects != si->objects))
+			error(NOTE, "%s: slab: %lx oo objects: %ld "
+			    "slab objects: %d\n",
+				si->curname, si->slab, 
+				si->objects, objects);
+	} else
+		objects = (ushort)si->objects;
 
-	while (args[optind]) {
-		pte = htoll(args[optind], FAULT_ON_ERROR, NULL);
-		machdep->translate_pte((ulong)pte, NULL, pte);
-		optind++;
+	if (!verbose) {
+		DUMP_SLAB_INFO_SLUB();
+		return;
 	}
 
-}
+	for (i = 0, cpu_slab = -1; i < kt->cpus; i++) {
+		cpu_slab_ptr = get_cpu_slab_ptr(si, i, &cpu_freelist);
 
-static char *node_zone_hdr = "ZONE  NAME         SIZE";
+		if (!cpu_slab_ptr)
+                        continue;
+		if (cpu_slab_ptr == si->slab) {
+			cpu_slab = i;
+			/*
+			 *  Later slub scheme uses the per-cpu freelist
+			 *  and keeps page->inuse maxed out, so count 
+			 *  the free objects by hand.
+			 */
+			if (cpu_freelist)
+				freelist = cpu_freelist;
+			if ((si->objects - inuse) == 0)
+				inuse = si->objects - 
+					count_free_objects(si, freelist);
+			break;
+		}
+	}
 
-/*
- *  On systems supporting memory nodes, display the basic per-node data.
- */
-static void
-dump_memory_nodes(int initialize)
-{
-	int i, j;
-	int n, id, flen, slen;
-	ulong node_mem_map;
-        ulong node_start_paddr;
-	ulong node_start_pfn;
-        ulong node_start_mapnr;
-	ulong node_spanned_pages;
-        ulong free_pages, zone_size, node_size;
-	ulong zone_start_paddr, zone_start_mapnr, zone_mem_map;
-	ulong zone_start_pfn;
-	ulong bdata;
-	ulong pgdat;
-	ulong node_zones;
-	ulong value;
-	char buf1[BUFSIZE];
-	char buf2[BUFSIZE];
-	char buf3[BUFSIZE];
-	char buf4[BUFSIZE];
-	char buf5[BUFSIZE];
-	struct node_table *nt;
+	DUMP_SLAB_INFO_SLUB();
 
-        if (!(vt->flags & NODES)) {
-		if (!initialize) 
-			error(FATAL, 
-			    "memory nodes not supported by this kernel\n\n");
-		else {
-			nt = &vt->node_table[0];
-			nt->node_id = 0;
-			if (symbol_exists("contig_page_data"))
-				nt->pgdat = symbol_value("contig_page_data");
-			else
-				nt->pgdat = 0;
-			nt->size = vt->total_pages;
-			nt->mem_map = vt->mem_map;
-			nt->start_paddr = 0;
-			nt->start_mapnr = 0;
-			return;
+	fprintf(fp, "  %s", free_inuse_hdr);
+
+#define PAGE_MAPPING_ANON  1
+
+	if (CRASHDEBUG(1)) {
+		fprintf(fp, "< SLUB: free list START: >\n");
+		i = 0;
+		for (q = freelist; q; q = get_freepointer(si, (void *)q)) {
+			if (q & PAGE_MAPPING_ANON) { 
+				fprintf(fp, 
+				    "< SLUB: free list END: %lx (%d found) >\n",
+					q, i); 
+				break;
+			}
+			fprintf(fp, "   %lx\n", q);
+			i++;
 		}
+		if (!q) 
+			fprintf(fp, "< SLUB: free list END (%d found) >\n", i);
 	}
 
-	if (initialize)
-		get_symbol_data("pgdat_list", sizeof(void *), &pgdat);
-	else
-		pgdat = vt->node_table[0].pgdat;
+	for (p = vaddr; p < vaddr + objects * si->size; p += si->size) {
+		is_free = FALSE;
+		for (is_free = 0, q = freelist; q; 
+			q = get_freepointer(si, (void *)q)) {
+			if (q == BADADDR)
+				return;
+			if (q & PAGE_MAPPING_ANON)
+				break;
+			if (p == q) {
+				is_free = TRUE;
+				break;
+			}
+		}
 
-	for (n = 0; pgdat; n++) {
-		if (n >= vt->numnodes)
-			error(FATAL, "numnodes out of sync with pgdat_list?\n");
+		if (si->flags & ADDRESS_SPECIFIED) {
+			if ((si->spec_addr < p) ||
+			    (si->spec_addr >= (p + si->size))) {
+				if (!(si->flags & VERBOSE))
+					continue;
+			} 
+		}
 
-		nt = &vt->node_table[n];
+		fprintf(fp, "  %s%lx%s", 
+			is_free ? " " : "[",
+			p, is_free ? "  " : "]");
+		if (is_free && (cpu_slab >= 0))
+			fprintf(fp, "(cpu %d cache)", cpu_slab);
+		fprintf(fp, "\n");
 
-		readmem(pgdat+OFFSET(pglist_data_node_id), KVADDR, &id,
-			sizeof(int), "pglist node_id", FAULT_ON_ERROR);
+	}
+}
 
-		readmem(pgdat+OFFSET(pglist_data_node_mem_map), KVADDR, 
-			&node_mem_map, sizeof(ulong), 
-			"node_mem_map", FAULT_ON_ERROR);
+static int
+count_free_objects(struct meminfo *si, ulong freelist)
+{
+	int c;
+	ulong q;
 
-		if (VALID_MEMBER(pglist_data_node_start_paddr))
-			readmem(pgdat+OFFSET(pglist_data_node_start_paddr), 
-				KVADDR, &node_start_paddr, sizeof(ulong), 
-				"pglist node_start_paddr", FAULT_ON_ERROR);
-		else if (VALID_MEMBER(pglist_data_node_start_pfn)) {
-			readmem(pgdat+OFFSET(pglist_data_node_start_pfn), 
-				KVADDR, &node_start_pfn, sizeof(ulong), 
-				"pglist node_start_pfn", FAULT_ON_ERROR);
-				node_start_mapnr = node_start_pfn;
-				node_start_paddr = PTOB(node_start_pfn);
-		} else error(INFO, 
-			"cannot determine zone starting physical address\n");
+	c = 0;
+	for (q = freelist; q; q = get_freepointer(si, (void *)q)) {
+                if (q & PAGE_MAPPING_ANON)
+			break;
+                c++;
+	}
 
-		if (VALID_MEMBER(pglist_data_node_start_mapnr))
-			readmem(pgdat+OFFSET(pglist_data_node_start_mapnr), 
-				KVADDR, &node_start_mapnr, sizeof(ulong), 
-				"pglist node_start_mapnr", FAULT_ON_ERROR);
+	return c;
+}
 
-		if (VALID_MEMBER(pglist_data_node_size)) 
-			readmem(pgdat+OFFSET(pglist_data_node_size), 
-				KVADDR, &node_size, sizeof(ulong), 
-				"pglist node_size", FAULT_ON_ERROR);
-		else if (VALID_MEMBER(pglist_data_node_spanned_pages)) {
-			readmem(pgdat+OFFSET(pglist_data_node_spanned_pages), 
-				KVADDR, &node_spanned_pages, sizeof(ulong), 
-				"pglist node_spanned_pages", FAULT_ON_ERROR);
-			node_size = node_spanned_pages;
-		} else error(INFO, "cannot determine zone size\n");
 
-		readmem(pgdat+OFFSET(pglist_data_bdata), KVADDR, &bdata,
-			sizeof(ulong), "pglist bdata", FAULT_ON_ERROR);
+static ulong
+get_freepointer(struct meminfo *si, void *object)
+{
+	ulong vaddr, nextfree;
+	
+	vaddr = (ulong)(object + si->slab_offset);
+	if (!readmem(vaddr, KVADDR, &nextfree,
+           sizeof(void *), "get_freepointer", RETURN_ON_ERROR))
+		return BADADDR;
 
-		if (initialize) {
-			nt->node_id = id;
-			nt->pgdat = pgdat;
-			if (VALID_MEMBER(zone_struct_memsize)) 
-				nt->size = 0;  /* initialize below */
-			else 
-				nt->size = node_size;
-			nt->mem_map = node_mem_map;
-			nt->start_paddr = node_start_paddr;
-			nt->start_mapnr = node_start_mapnr;
-		}
+	return nextfree;
+}
 
-		if (!initialize) {
-			if (n) {
-				fprintf(fp, "\n");
-				pad_line(fp, slen, '-');
-			}
-			flen = MAX(VADDR_PRLEN, strlen("BOOTMEM_DATA"));
-			fprintf(fp, "%sNODE  %s  %s  %s  %s\n", 
-			    n ? "\n\n" : "",
-			    mkstring(buf1, 8, CENTER, "SIZE"),
-			    mkstring(buf2, flen, CENTER|LJUST, "PGLIST_DATA"),
-			    mkstring(buf3, flen, CENTER|LJUST, "BOOTMEM_DATA"),
-			    mkstring(buf4, flen, CENTER|LJUST, "NODE_ZONES"));
+static void
+do_node_lists_slub(struct meminfo *si, ulong node_ptr, int node)
+{
+	ulong next, list_head, flags;
+	int first;
 
-			node_zones = pgdat + OFFSET(pglist_data_node_zones);
-			sprintf(buf5, " %2d   %s  %s  %s  %s\n", id, 
-			    mkstring(buf1, 8, CENTER|LJUST|LONG_DEC, 
-				MKSTR(node_size)),
-			    mkstring(buf2, flen, CENTER|LJUST|LONG_HEX, 
-				MKSTR(pgdat)),
-			    mkstring(buf3, flen, CENTER|LONG_HEX, 
-				MKSTR(bdata)),
-			    mkstring(buf4, flen, CENTER|LJUST|LONG_HEX,
-                                MKSTR(node_zones)));
-			fprintf(fp, "%s", buf5);
+	list_head = node_ptr + OFFSET(kmem_cache_node_partial);
+ 	if (!readmem(list_head, KVADDR, &next, sizeof(ulong),
+	    "kmem_cache_node partial", RETURN_ON_ERROR))
+		return;
 
-			j = 12 + strlen(buf1) + strlen(buf2) + strlen(buf3) +
-				count_leading_spaces(buf4);
-                	for (i = 1; i < vt->nr_zones; i++) {
-				node_zones += SIZE_OPTION(zone_struct, zone);
-				INDENT(j);
-				fprintf(fp, "%lx\n", node_zones);
-			}
-	
-	                fprintf(fp, "%s  START_PADDR  START_MAPNR\n",
-	                    mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, 
-				"MEM_MAP"));
-	                fprintf(fp, "%s  %s  %s\n",
-	                    mkstring(buf1, VADDR_PRLEN,
-	                        CENTER|LONG_HEX, MKSTR(node_mem_map)),
-	                    mkstring(buf2, strlen("START_PADDR"),
-	                        CENTER|LONG_HEX|RJUST, MKSTR(node_start_paddr)),
-	                    mkstring(buf3, strlen("START_MAPNR"),
-	                        CENTER|LONG_DEC|RJUST, 
-				    MKSTR(node_start_mapnr)));
+	fprintf(fp, "NODE %d PARTIAL:\n%s", node,
+		next == list_head ? "  (empty)\n" : "");
+	first = 0;
+        while (next != list_head) {
+		si->slab = next - OFFSET(page_lru);
+		if (first++ == 0)
+			fprintf(fp, "  %s", slab_hdr);
+		do_slab_slub(si, !VERBOSE);
+		
+		if (received_SIGINT())
+			restart(0);
+
+                if (!readmem(next, KVADDR, &next, sizeof(ulong),
+                    "page.lru.next", RETURN_ON_ERROR))
+                        return;
+        }
+
+#define SLAB_STORE_USER (0x00010000UL)
+	flags = ULONG(si->cache_buf + OFFSET(kmem_cache_flags));
 	
-			sprintf(buf2, "%s  %s  START_PADDR  START_MAPNR", 
-				node_zone_hdr,
-				mkstring(buf1, VADDR_PRLEN, CENTER|RJUST, 
-				    "MEM_MAP"));
-			slen = strlen(buf2);
-			fprintf(fp, "\n%s\n", buf2);
-		}
+	if (INVALID_MEMBER(kmem_cache_node_full) ||
+	    !(flags & SLAB_STORE_USER)) {
+		fprintf(fp, "NODE %d FULL:\n  (not tracked)\n", node);
+		return;
+	}
 
-       		node_zones = pgdat + OFFSET(pglist_data_node_zones);
-		for (i = 0; i < vt->nr_zones; i++) {
-			if (CRASHDEBUG(7))
-				fprintf(fp, "zone at %lx\n", node_zones);
+	list_head = node_ptr + OFFSET(kmem_cache_node_full);
+ 	if (!readmem(list_head, KVADDR, &next, sizeof(ulong),
+	    "kmem_cache_node full", RETURN_ON_ERROR))
+		return;
 
-			if (VALID_MEMBER(zone_struct_size))
-                		readmem(node_zones+OFFSET(zone_struct_size), 
-				    	KVADDR, &zone_size, sizeof(ulong),
-                        		"zone_struct size", FAULT_ON_ERROR);
-			else if (VALID_MEMBER(zone_struct_memsize)) {
-                		readmem(node_zones+OFFSET(zone_struct_memsize), 
-				    	KVADDR, &zone_size, sizeof(ulong),
-                        		"zone_struct memsize", FAULT_ON_ERROR);
-				nt->size += zone_size;
-			} else if (VALID_MEMBER(zone_spanned_pages)) {
-                		readmem(node_zones+ OFFSET(zone_spanned_pages), 
-				    	KVADDR, &zone_size, sizeof(ulong),
-                        		"zone spanned_pages", FAULT_ON_ERROR);
-			} else error(FATAL, 
-			    "zone_struct has neither size nor memsize field\n");
-                	readmem(node_zones+ 
-				OFFSET_OPTION(zone_struct_free_pages,
-				zone_free_pages), KVADDR, &free_pages, 
-				sizeof(ulong), "zone[_struct] free_pages", 
-				FAULT_ON_ERROR);
-                	readmem(node_zones+OFFSET_OPTION(zone_struct_name,
-				zone_name), KVADDR, &value, sizeof(void *),
-                        	"zone[_struct] name", FAULT_ON_ERROR);
-                	if (!read_string(value, buf1, BUFSIZE-1))
-                        	sprintf(buf1, "(unknown) ");
-			if (VALID_STRUCT(zone_struct)) {
-                        	readmem(node_zones+
-					OFFSET(zone_struct_zone_start_paddr),
-                                	KVADDR, &zone_start_paddr, 
-					sizeof(ulong), 
-					"node_zones zone_start_paddr", 
-					FAULT_ON_ERROR);
-                        	readmem(node_zones+
-					OFFSET(zone_struct_zone_start_mapnr),
-                                	KVADDR, &zone_start_mapnr, 
-					sizeof(ulong), 
-					"node_zones zone_start_mapnr", 
-					FAULT_ON_ERROR);
-			} else {
-                                readmem(node_zones+
-                                        OFFSET(zone_zone_start_pfn),
-                                        KVADDR, &zone_start_pfn,
-                                        sizeof(ulong),
-                                        "node_zones zone_start_pfn",
-                                        FAULT_ON_ERROR);
-				zone_start_paddr = PTOB(zone_start_pfn);
-                                readmem(node_zones+
-                                        OFFSET(zone_zone_mem_map),
-                                        KVADDR, &zone_mem_map,
-                                        sizeof(ulong),
-                                        "node_zones zone_mem_map",
-                                        FAULT_ON_ERROR);
-				if (zone_mem_map)
-					zone_start_mapnr = 
-				    	    (zone_mem_map - node_mem_map) / 
-					    SIZE(page);
-				else
-					zone_start_mapnr = 0;
-			}
-                        readmem(node_zones+
-				OFFSET_OPTION(zone_struct_zone_mem_map,
-				zone_zone_mem_map), KVADDR, &zone_mem_map, 
-				sizeof(ulong), "node_zones zone_mem_map", 
-				FAULT_ON_ERROR);
+	fprintf(fp, "NODE %d FULL:\n%s", node, 
+		next == list_head ? "  (empty)\n" : "");
+	first = 0;
+        while (next != list_head) {
+		si->slab = next - OFFSET(page_lru);
+		if (first++ == 0)
+			fprintf(fp, "  %s", slab_hdr);
+		do_slab_slub(si, !VERBOSE);
 
-			if (!initialize) {
-				fprintf(fp, " %2d   %-9s %7ld  ", 
-					i, buf1, zone_size);
-				fprintf(fp, "%s  %s  %s\n",
-	                    	    mkstring(buf1, VADDR_PRLEN,
-	                        	RJUST|LONG_HEX,MKSTR(zone_mem_map)),
-	                            mkstring(buf2, strlen("START_PADDR"),
-	                        	LONG_HEX|RJUST,MKSTR(zone_start_paddr)),
-	                    	    mkstring(buf3, strlen("START_MAPNR"),
-	                        	LONG_DEC|RJUST,
-					MKSTR(zone_start_mapnr)));
-			}
+		if (received_SIGINT())
+			restart(0);
+
+                if (!readmem(next, KVADDR, &next, sizeof(ulong),
+                    "page.lru.next", RETURN_ON_ERROR))
+                        return;
+        }
+}
 
-			node_zones += SIZE_OPTION(zone_struct, zone);
-		}
 
-		if (initialize)
-			readmem(pgdat + OFFSET_OPTION(pglist_data_node_next,
-				pglist_data_pgdat_next), KVADDR,
-				&pgdat, sizeof(void *), "pglist_data node_next",
-				FAULT_ON_ERROR);
-		else {
-			if ((n+1) < vt->numnodes)
-				pgdat = vt->node_table[n+1].pgdat;
-			else
-				pgdat = 0;
-		}
-	} 
+static char *
+is_kmem_cache_addr_slub(ulong vaddr, char *kbuf)
+{
+        int i, cnt;
+        ulong *cache_list;
+        ulong name;
+        char *cache_buf;
+        int found;
+
+        cnt = get_kmem_cache_list(&cache_list);
+        cache_buf = GETBUF(SIZE(kmem_cache));
+	
+        for (i = 0, found = FALSE; i < cnt; i++) {
+		if (cache_list[i] != vaddr)
+			continue;
+
+                if (!readmem(cache_list[i], KVADDR, cache_buf,
+		    SIZE(kmem_cache), "kmem_cache buffer",
+		    RETURN_ON_ERROR))
+			break;
+
+                name = ULONG(cache_buf + OFFSET(kmem_cache_name));
+                if (!read_string(name, kbuf, BUFSIZE-1))
+			sprintf(kbuf, "(unknown)");
+
+		found = TRUE;
+		break;
+        }
+
+        FREEBUF(cache_list);
+        FREEBUF(cache_buf);
 
-	if (n != vt->numnodes)
-		error(FATAL, "numnodes out of sync with pgdat_list?\n");
+	return (found ? kbuf : NULL);
 }
 
 /*
- *  Gather essential information regarding each memory node.
+ *  Kernel-config-neutral page-to-node evaluator.
  */
-static void
-node_table_init(void)
+static int 
+page_to_nid(ulong page)
 {
-	int n;
-	ulong pgdat;
-
-	/*
-	 *  Override numnodes -- some kernels may leave it at 1 on a system
-	 *  with multiple memory nodes.
-	 */
-        get_symbol_data("pgdat_list", sizeof(void *), &pgdat);
+        int i;
+	physaddr_t paddr;
+        struct node_table *nt;
+        physaddr_t end_paddr;
 
-        for (n = 0; pgdat; n++) {
-                readmem(pgdat + OFFSET_OPTION(pglist_data_node_next,
-                        pglist_data_pgdat_next), KVADDR,
-                        &pgdat, sizeof(void *), "pglist_data node_next",
-                        FAULT_ON_ERROR);
-	}
-	if (n != vt->numnodes) {
-		if (CRASHDEBUG(2))
-			error(NOTE, "changing numnodes from %d to %d\n",
-				vt->numnodes, n);
-		vt->numnodes = n;
+	if (!page_to_phys(page, &paddr)) {
+		error(INFO, "page_to_nid: invalid page: %lx\n", page);
+		return -1;
 	}
 
-       	if (!(vt->node_table = (struct node_table *)
-	    malloc(sizeof(struct node_table) * vt->numnodes)))
-		error(FATAL, "cannot malloc node_table %s(%d nodes)",
-			vt->numnodes > 1 ? "array " : "", vt->numnodes);
-
-	BZERO(vt->node_table, sizeof(struct node_table) * vt->numnodes);
+        for (i = 0; i < vt->numnodes; i++) {
+                nt = &vt->node_table[i];
 
-	dump_memory_nodes(MEMORY_NODES_INITIALIZE);
+		end_paddr = nt->start_paddr +
+			((physaddr_t)nt->size * (physaddr_t)PAGESIZE());
+	
+		if ((paddr >= nt->start_paddr) && (paddr < end_paddr))
+			return i;
+        }
 
-        qsort((void *)vt->node_table, (size_t)vt->numnodes,
-                sizeof(struct node_table), compare_node_data);
+	error(INFO, "page_to_nid: cannot determine node for pages: %lx\n", 
+		page);
 
-	if (CRASHDEBUG(2))
-		dump_memory_nodes(MEMORY_NODES_DUMP);
+	return -1; 
 }
 
 /*
- *  The comparison function must return an integer less  than,
- *  equal  to,  or  greater than zero if the first argument is
- *  considered to be respectively  less  than,  equal  to,  or
- *  greater than the second.  If two members compare as equal,
- *  their order in the sorted array is undefined.
+ *  Allocate and fill the passed-in buffer with a list of
+ *  the current kmem_cache structures.
  */
-
 static int
-compare_node_data(const void *v1, const void *v2)
+get_kmem_cache_list(ulong **cache_buf)
 {
-        struct node_table *t1, *t2;
+	int cnt;
+	ulong vaddr;
+	struct list_data list_data, *ld;
 
-        t1 = (struct node_table *)v1;
-        t2 = (struct node_table *)v2;
+	get_symbol_data("slab_caches", sizeof(void *), &vaddr);
 
-        return (t1->node_id < t2->node_id ? -1 :
-                t1->node_id == t2->node_id ? 0 : 1);
+	ld = &list_data;
+	BZERO(ld, sizeof(struct list_data));
+	ld->start = vaddr;
+	ld->list_head_offset = OFFSET(kmem_cache_list);
+	ld->end = symbol_value("slab_caches");
+	if (CRASHDEBUG(3))
+		ld->flags |= VERBOSE;
+
+	hq_open();
+	cnt = do_list(ld);
+	*cache_buf = (ulong *)GETBUF(cnt * sizeof(ulong));
+	cnt = retrieve_list(*cache_buf, cnt);
+	hq_close();
+
+	return cnt;
 }
 
 
 /*
- *  Depending upon the processor, and whether we're running live or on a 
- *  dumpfile, get the system page size.
+ *  Get the address of the head page of a compound page.
  */
-uint
-memory_page_size(void)
+static ulong
+compound_head(ulong page)
 {
-	uint psz;
+	ulong flags, first_page;;
 
-	if (REMOTE_MEMSRC()) 
-		return remote_page_size();
+	first_page = page;
 
-	switch (pc->flags & MEMORY_SOURCES)
-	{
-	case DISKDUMP:
-		psz = diskdump_page_size();
-		break;
+	if (!readmem(page+OFFSET(page_flags), KVADDR, &flags, sizeof(ulong),
+	    "page.flags", RETURN_ON_ERROR))
+		return first_page;
 
-	case NETDUMP:
-		psz = netdump_page_size();
-		break;
+	if ((flags & vt->PG_head_tail_mask) == vt->PG_head_tail_mask)
+		readmem(page+OFFSET(page_first_page), KVADDR, &first_page, 
+			sizeof(ulong), "page.first_page", RETURN_ON_ERROR);
+		
+	return first_page;
+}
 
-	case MCLXCD:
-		psz = (uint)mclx_page_size();
-		break;
+long 
+count_partial(ulong node)
+{
+	ulong list_head, next;
+	short inuse;
+	ulong total_inuse;
+
+	total_inuse = 0;
+	list_head = node + OFFSET(kmem_cache_node_partial);
+	if (!readmem(list_head, KVADDR, &next, sizeof(ulong),
+	    "kmem_cache_node.partial", RETURN_ON_ERROR))
+		return -1;
+
+	while (next != list_head) {
+		if (!readmem(next - OFFSET(page_lru) + OFFSET(page_inuse), KVADDR, &inuse, 
+		    sizeof(ushort), "page.inuse", RETURN_ON_ERROR))
+			return -1;
+		total_inuse += inuse;
+		if (!readmem(next, KVADDR, &next, sizeof(ulong),
+		    "page.lru.next", RETURN_ON_ERROR))
+			return -1;
+	}
+	return total_inuse;
+}
 
-	case LKCD:
-#if 0							/* REMIND: */
-		psz = lkcd_page_size();			/* dh_dump_page_size is HW page size; should add dh_page_size */
-#else
-		psz = (uint)getpagesize();
-#endif
-		break;
+char *
+is_slab_page(struct meminfo *si, char *buf)
+{
+	int i, cnt;
+	ulong page_slab, page_flags, name;
+        ulong *cache_list;
+        char *cache_buf, *retval;
 
-	case DEVMEM:                      
-	case MEMMOD:
-		psz = (uint)getpagesize();  
-		break;
+	if (!(vt->flags & KMALLOC_SLUB))
+		return NULL;
 
-	case S390D:
-		psz = s390_page_size();
-		break;
+	if (!is_page_ptr((ulong)si->spec_addr, NULL))
+		return NULL;
 
-	default:
-		error(FATAL, "memory_page_size: invalid pc->flags: %lx\n", 
-			pc->flags & MEMORY_SOURCES); 
-	}
+	if (!readmem(si->spec_addr + OFFSET(page_flags), KVADDR, 
+	    &page_flags, sizeof(ulong), "page.flags", 
+	    RETURN_ON_ERROR|QUIET))
+		return NULL;
 
-	return psz;
-}
+	if (!(page_flags & (1 << vt->PG_slab)))
+		return NULL;
 
-/*
- *  Return the vmalloc address referenced by the first vm_struct
- *  on the vmlist.  This can normally be used by the machine-specific
- *  xxx_vmalloc_start() routines.
- */
+	if (!readmem(si->spec_addr + OFFSET(page_slab), KVADDR, 
+	    &page_slab, sizeof(ulong), "page.slab", 
+	    RETURN_ON_ERROR|QUIET))
+		return NULL;
 
-ulong
-first_vmalloc_address(void)
-{
-        ulong vmlist, addr;
+	retval = NULL;
+        cnt = get_kmem_cache_list(&cache_list);
+        cache_buf = GETBUF(SIZE(kmem_cache));
+
+	for (i = 0; i < cnt; i++) {
+		if (page_slab == cache_list[i]) {
+			if (!readmem(cache_list[i], KVADDR, cache_buf,
+			    SIZE(kmem_cache), "kmem_cache buffer",
+			    QUIET|RETURN_ON_ERROR))
+				goto bailout;
+
+			name = ULONG(cache_buf + OFFSET(kmem_cache_name));
+			if (!read_string(name, buf, BUFSIZE-1))
+				goto bailout;
 
-        get_symbol_data("vmlist", sizeof(void *), &vmlist);
+			retval = buf;
+			break;
+		}
+	} 
 
-        if (!readmem(vmlist+OFFSET(vm_struct_addr), KVADDR, &addr, 
-	    sizeof(void *), "first vmlist addr", RETURN_ON_ERROR)) 
-		non_matching_kernel();
+bailout:
+	FREEBUF(cache_list);
+	FREEBUF(cache_buf);
 
-        return addr;
+	return retval;
 }
 
 /*
- *  Return the L1 cache size in bytes, which can be found stored in the
- *  cache_cache.
+ *  Figure out which of the kmem_cache.cpu_slab declarations
+ *  is used by this kernel, and return a pointer to the slab
+ *  page being used.  Return the kmem_cache_cpu.freelist pointer
+ *  if requested.
  */
-
-int
-l1_cache_size(void)
+static ulong
+get_cpu_slab_ptr(struct meminfo *si, int cpu, ulong *cpu_freelist)
 {
-	ulong cache_cache;
-	ulong c_align;
-	int colour_off;
-	int retval;
+	ulong cpu_slab_ptr, page, freelist;
 
-        cache_cache = symbol_value("cache_cache");
+	if (cpu_freelist)
+		*cpu_freelist = 0;
 
-	retval = -1;
+	switch (vt->cpu_slab_type)
+	{
+	case TYPE_CODE_STRUCT:
+		cpu_slab_ptr = ULONG(si->cache_buf +
+                        OFFSET(kmem_cache_cpu_slab) +
+			OFFSET(kmem_cache_cpu_page));
+		if (cpu_freelist && VALID_MEMBER(kmem_cache_cpu_freelist))
+			*cpu_freelist = ULONG(si->cache_buf +
+                        	OFFSET(kmem_cache_cpu_slab) +
+                        	OFFSET(kmem_cache_cpu_freelist));
+		break;
+
+	case TYPE_CODE_ARRAY:
+		cpu_slab_ptr = ULONG(si->cache_buf +
+			OFFSET(kmem_cache_cpu_slab) + (sizeof(void *)*cpu));
+
+		if (cpu_slab_ptr && cpu_freelist &&
+		    VALID_MEMBER(kmem_cache_cpu_freelist)) {
+			if (readmem(cpu_slab_ptr + OFFSET(kmem_cache_cpu_freelist),
+			    KVADDR, &freelist, sizeof(void *),
+			    "kmem_cache_cpu.freelist", RETURN_ON_ERROR))
+				*cpu_freelist = freelist;
+		}
+	
+		if (cpu_slab_ptr && VALID_MEMBER(kmem_cache_cpu_page)) {
+			if (!readmem(cpu_slab_ptr + OFFSET(kmem_cache_cpu_page),
+			    KVADDR, &page, sizeof(void *),
+			    "kmem_cache_cpu.page", RETURN_ON_ERROR))
+				cpu_slab_ptr = 0;
+			else
+				cpu_slab_ptr = page;
+		}
+		break;
 
-	if (VALID_MEMBER(kmem_cache_s_c_align)) {
-                readmem(cache_cache+OFFSET(kmem_cache_s_c_align),
-                	KVADDR, &c_align, sizeof(ulong),
-                        "c_align", FAULT_ON_ERROR);
-		retval = (int)c_align;
-	} else if (VALID_MEMBER(kmem_cache_s_colour_off)) {
-                readmem(cache_cache+OFFSET(kmem_cache_s_colour_off),
-                	KVADDR, &colour_off, sizeof(int),
-                        "colour_off", FAULT_ON_ERROR);
-		retval = colour_off;
+	default:
+		error(FATAL, "cannot determine location of kmem_cache.cpu_slab page\n");
 	}
 
-	return retval;
+	return cpu_slab_ptr;
 }
 
 /*
- *  Multi-purpose routine used to query/control dumpfile memory usage.
+ *  In 2.6.27 kmem_cache.order and kmem_cache.objects were merged
+ *  into the kmem_cache.oo, a kmem_cache_order_objects structure.  
+ *  oo_order() and oo_objects() emulate the kernel functions
+ *  of the same name.
  */
-int
-dumpfile_memory(int cmd)
+static unsigned int oo_order(ulong oo)
 {
-	int retval;
+        return (oo >> 16);
+}
 
-	retval = 0;
+static unsigned int oo_objects(ulong oo)
+{
+        return (oo & ((1 << 16) - 1));
+}
 
-	if (!DUMPFILE())
-		return retval;
+#ifdef NOT_USED
+ulong
+slab_to_kmem_cache_node(struct meminfo *si, ulong slab_page)
+{
+	int node;
+	ulong node_ptr;
 
-	switch (cmd)
-	{
-	case DUMPFILE_MEM_USED:
-                if (REMOTE_DUMPFILE()) 
-                        retval = remote_memory_used();
-		else if (pc->flags & NETDUMP)
-        		retval = netdump_memory_used();
-		else if (pc->flags & DISKDUMP)
-        		retval = diskdump_memory_used();
-		else if (pc->flags & LKCD)
-        		retval = lkcd_memory_used();
-		else if (pc->flags & MCLXCD)
-                        retval = vas_memory_used();
-		else if (pc->flags & S390D)
-			retval = s390_memory_used();
-		break;
+	if (vt->flags & CONFIG_NUMA) {
+		node = page_to_nid(slab_page);
+		node_ptr = ULONG(si->cache_buf +
+			OFFSET(kmem_cache_node) +
+			(sizeof(void *)*node));
+	} else
+		node_ptr = si->cache + OFFSET(kmem_cache_local_node);
 
-	case DUMPFILE_FREE_MEM:
-                if (REMOTE_DUMPFILE())
-                        retval = remote_free_memory();
-                else if (pc->flags & NETDUMP)
-			retval = netdump_free_memory();
-                else if (pc->flags & DISKDUMP)
-			retval = diskdump_free_memory();
-                else if (pc->flags & LKCD)
-                        retval = lkcd_free_memory();
-                else if (pc->flags & MCLXCD)
-                        retval = vas_free_memory(NULL);
-                else if (pc->flags & S390D)
-                        retval = s390_free_memory();
-		break;
+	return node_ptr;
+}
 
-	case DUMPFILE_MEM_DUMP:
-		if (REMOTE_DUMPFILE())
-                        retval = remote_memory_dump(0);
-                else if (pc->flags & NETDUMP) 
-                        retval = netdump_memory_dump(fp);
-                else if (pc->flags & DISKDUMP) 
-                        retval = diskdump_memory_dump(fp);
-                else if (pc->flags & LKCD) 
-                        retval = lkcd_memory_dump(set_lkcd_fp(fp));
-                else if (pc->flags & MCLXCD)
-                        retval = vas_memory_dump(fp);
-                else if (pc->flags & S390D)
-                        retval = s390_memory_dump(fp);
-		break;
-	
-	case DUMPFILE_ENVIRONMENT:
-                if (pc->flags & LKCD) {
-                        set_lkcd_fp(fp);
-                        dump_lkcd_environment(0);
-		} else if (pc->flags & REM_LKCD) 
-                        retval = remote_memory_dump(VERBOSE);
-		break;
-	}
+ulong
+get_kmem_cache_by_name(char *request)
+{
+        int i, cnt;
+        ulong *cache_list;
+        ulong name;
+        char *cache_buf;
+        char buf[BUFSIZE];
+        ulong found;
+
+        cnt = get_kmem_cache_list(&cache_list);
+        cache_buf = GETBUF(SIZE(kmem_cache));
+        found = 0;
+
+        for (i = 0; i < cnt; i++) {
+                readmem(cache_list[i], KVADDR, cache_buf,
+                        SIZE(kmem_cache), "kmem_cache buffer",
+                        FAULT_ON_ERROR);
 
-	return retval;
-}
+                name = ULONG(cache_buf + OFFSET(kmem_cache_name));
+                if (!read_string(name, buf, BUFSIZE-1))
+			continue;
+
+                if (STREQ(buf, request)) {
+                        found = cache_list[i];
+                        break;
+                }
+        }
 
+        FREEBUF(cache_list);
+        FREEBUF(cache_buf);
+
+        return found;
+}
+#endif  /* NOT_USED */
--- crash/lkcd_fix_mem.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/lkcd_fix_mem.c	2007-11-15 10:44:38.000000000 -0500
@@ -20,21 +20,13 @@
 
 #define LKCD_COMMON
 #include "defs.h"
-#include "lkcd_fix_mem.h"
+#include "lkcd_dump_v8.h"
 
 static int fix_addr(dump_header_asm_t *); 
     
 int
-fix_addr_v8(int fd)
+fix_addr_v8(dump_header_asm_t *dha)
 {
-    static dump_header_asm_t dump_header_asm_v8 = { 0 };
-    dump_header_asm_t *dha;
-    dha = &dump_header_asm_v8;
-    
-    if (read(lkcd->fd, dha, sizeof(dump_header_asm_t)) !=
-	    sizeof(dump_header_asm_t))
-	return -1;
-    
     fix_addr(dha);
 
     return 0;
@@ -59,14 +51,6 @@
 static int
 fix_addr(dump_header_asm_t *dha)  
 {
-    
-
-    if (dha->dha_header_size != sizeof(dump_header_asm_t)) {
-	error(INFO, "LKCD machine specific dump header doesn't match crash version\n");
-	error(INFO, "traceback of currently executing threads may not work\n\n");
-    }
-    
-
     lkcd->dump_header_asm = dha;
     
 
@@ -83,7 +67,7 @@
 		if (dha->dha_stack[i] && dha->dha_smp_current_task[i]) {
 		    lkcd->fix_addr[i].task = (ulong)dha->dha_smp_current_task[i];
 		    lkcd->fix_addr[i].saddr = (ulong)dha->dha_stack[i]; 
-		    lkcd->fix_addr[i].sw = (ulong)dha->dha_switch_stack[i];
+		    lkcd->fix_addr[i].sw = (ulong)dha->dha_stack_ptr[i];
 		    /* remember the highest non-zero entry */
 		    lkcd->fix_addr_num = i + 1;
 		} else {
@@ -113,4 +97,14 @@
 	return 0;
 }
 
+int lkcd_get_kernel_start_v8(ulong *addr)
+{
+	if (!addr)
+		return 0;
+
+	*addr = ((dump_header_asm_t *)lkcd->dump_header_asm)->dha_kernel_addr;
+
+	return 1;
+}
+
 #endif // IA64
--- crash/tools.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/tools.c	2009-01-26 14:54:54.000000000 -0500
@@ -1,8 +1,8 @@
 /* tools.c - core analysis suite
  *
  * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc.
- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson
- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 David Anderson
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 Red Hat, Inc. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -18,12 +18,12 @@
 #include "defs.h"
 #include <ctype.h>
 
-static int calculate(char *, ulong *, ulonglong *, ulong);
 static void print_number(struct number_option *, int, int);
 static long alloc_hq_entry(void);
 struct hq_entry;
 static void dealloc_hq_entry(struct hq_entry *);
 static void show_options(void);
+static void dump_struct_members(struct list_data *, int, ulong);
 
 /*
  *  General purpose error reporting routine.  Type INFO prints the message
@@ -41,11 +41,11 @@
 {
 	int end_of_line, new_line;
         char buf[BUFSIZE];
-        ulong retaddr[4] = { 0 };
+        ulong retaddr[NUMBER_STACKFRAMES] = { 0 };
 	va_list ap;
 
 	if (CRASHDEBUG(1) || (pc->flags & DROP_CORE)) {
-		save_return_address(retaddr);
+		SAVE_RETURN_ADDRESS(retaddr);
 		console("error() trace: %lx => %lx => %lx => %lx\n",
 			retaddr[3], retaddr[2], retaddr[1], retaddr[0]);
 	}
@@ -63,6 +63,8 @@
 
 	if ((new_line = (buf[0] == '\n')))
 		shift_string_left(buf, 1);
+	else if (pc->flags & PLEASE_WAIT)
+		new_line = TRUE;
 
 	if (pc->stdpipe) {
 		fprintf(pc->stdpipe, "%s%s: %s%s", 
@@ -1532,7 +1534,7 @@
 	int right;
 	char buf[BUFSIZE];
 
-	switch (flags & (LONG_DEC|LONG_HEX|INT_HEX|INT_DEC|LONGLONG_HEX)) 
+	switch (flags & (LONG_DEC|LONG_HEX|INT_HEX|INT_DEC|LONGLONG_HEX|ZERO_FILL)) 
 	{
 	case LONG_DEC:
 		sprintf(s, "%lu", (ulong)opt);
@@ -1540,6 +1542,12 @@
 	case LONG_HEX:
 		sprintf(s, "%lx", (ulong)opt);
 		break;
+	case (LONG_HEX|ZERO_FILL):
+		if (VADDR_PRLEN == 8)
+			sprintf(s, "%08lx", (ulong)opt);
+		else if (VADDR_PRLEN == 16)
+			sprintf(s, "%016lx", (ulong)opt);
+		break;
 	case INT_DEC:
 		sprintf(s, "%u", (uint)((ulong)opt));
 		break;
@@ -1671,6 +1679,9 @@
                 switch(c)
 		{
 		case 'c':
+			if (XEN_HYPER_MODE())
+				option_not_supported(c);
+
 			if (!runtime) {
 				error(INFO, 
 				    "cpu setting not allowed from .%src\n",
@@ -1687,6 +1698,9 @@
 			return;
 
 		case 'p':
+			if (XEN_HYPER_MODE())
+				option_not_supported(c);
+
 			if (!runtime)
 				return;
 
@@ -1721,7 +1735,10 @@
 	}
 
 	if (!args[optind]) {
-		if (runtime)
+		if (XEN_HYPER_MODE())
+			error(INFO, 
+			    "requires an option with the Xen hypervisor\n");
+		else if (runtime)
 			show_context(CURRENT_CONTEXT());
 		return;
 	}
@@ -1770,6 +1787,42 @@
                                 	pc->flags & HASH ? "on" : "off");
 			return;
 
+                } else if (STREQ(args[optind], "unwind")) {
+                        if (args[optind+1]) {
+                                optind++;
+                                if (STREQ(args[optind], "on")) {
+				    	if ((kt->flags & DWARF_UNWIND_CAPABLE) ||
+					    !runtime) {
+                                        	kt->flags |= DWARF_UNWIND;
+						kt->flags &= ~NO_DWARF_UNWIND;
+					}
+                                } else if (STREQ(args[optind], "off")) {
+                                        kt->flags &= ~DWARF_UNWIND;
+					if (!runtime)
+						kt->flags |= NO_DWARF_UNWIND;
+				} else if (IS_A_NUMBER(args[optind])) {
+					value = stol(args[optind],
+                                    		FAULT_ON_ERROR, NULL);
+					if (value) {
+				    		if ((kt->flags & DWARF_UNWIND_CAPABLE) ||
+						    !runtime) {
+							kt->flags |= DWARF_UNWIND;
+							kt->flags &= ~NO_DWARF_UNWIND;
+						}
+					} else {
+						kt->flags &= ~DWARF_UNWIND;
+						if (!runtime)
+							kt->flags |= NO_DWARF_UNWIND;
+					}
+				} else
+					goto invalid_set_command;
+                        }
+
+			if (runtime)
+                        	fprintf(fp, "unwind: %s\n",
+                                	kt->flags & DWARF_UNWIND ? "on" : "off");
+			return;
+
                } else if (STREQ(args[optind], "refresh")) {
                         if (args[optind+1]) {
                                 optind++;
@@ -1806,7 +1859,14 @@
                                         pc->flags |= SCROLL;
                                 else if (STREQ(args[optind], "off"))
                                         pc->flags &= ~SCROLL;
-                                else if (IS_A_NUMBER(args[optind])) {
+				else if (STREQ(args[optind], "more"))
+					pc->scroll_command = SCROLL_MORE;
+				else if (STREQ(args[optind], "less"))
+					pc->scroll_command = SCROLL_LESS;
+				else if (STREQ(args[optind], "CRASHPAGER")) {
+					if (CRASHPAGER_valid())
+						pc->scroll_command = SCROLL_CRASHPAGER;
+				} else if (IS_A_NUMBER(args[optind])) {
                                         value = stol(args[optind],
                                                 FAULT_ON_ERROR, NULL);
                                         if (value)
@@ -1817,9 +1877,25 @@
 					goto invalid_set_command;
                         }
 
-			if (runtime)
-                        	fprintf(fp, "scroll: %s\n",
-                                	pc->flags & SCROLL ? "on" : "off");
+			if (runtime) {
+				fprintf(fp, "scroll: %s ",
+					pc->flags & SCROLL ? "on" : "off");
+				switch (pc->scroll_command)
+				{
+				case SCROLL_LESS:
+					fprintf(fp, "(/usr/bin/less)\n");
+					break;
+				case SCROLL_MORE:
+					fprintf(fp, "(/bin/more)\n");
+					break;
+				case SCROLL_NONE:
+					fprintf(fp, "(none)\n");
+					break;
+				case SCROLL_CRASHPAGER:
+					fprintf(fp, "(CRASHPAGER: %s)\n", getenv("CRASHPAGER"));
+					break;
+				}
+			}
 
 			return;
 
@@ -1998,12 +2074,38 @@
 				fprintf(fp, "print_max: %d\n", print_max);
 			return;
 
+                } else if (STREQ(args[optind], "null-stop")) {
+			optind++;
+			if (args[optind]) {
+				if (STREQ(args[optind], "on"))
+					stop_print_at_null = 1;
+				else if (STREQ(args[optind], "off"))
+					stop_print_at_null = 0;
+				else if (IS_A_NUMBER(args[optind])) {
+					value = stol(args[optind],
+						FAULT_ON_ERROR, NULL);
+					if (value)
+						stop_print_at_null = 1;
+					else
+						stop_print_at_null = 0;
+					} else
+						goto invalid_set_command;
+			}
+			if (runtime)
+				fprintf(fp, "null-stop: %s\n", 
+					stop_print_at_null ? "on" : "off");
+			return;
+
                 } else if (STREQ(args[optind], "dumpfile")) {
 			optind++;
                         if (!runtime && args[optind]) {
 				pc->flags &= ~(DUMPFILE_TYPES);
 				if (is_netdump(args[optind], NETDUMP_LOCAL))
 					pc->flags |= NETDUMP;
+				else if (is_kdump(args[optind], KDUMP_LOCAL))
+					pc->flags |= KDUMP;
+				else if (is_xendump(args[optind]))
+					pc->flags |= XENDUMP;
 				else if (is_diskdump(args[optind]))
 					pc->flags |= DISKDUMP;
 				else if (is_lkcd_compressed_dump(args[optind])) 
@@ -2054,6 +2156,33 @@
 			pc->flags |= DATADEBUG;
 			return;
 
+                } else if (STREQ(args[optind], "zero_excluded")) {
+
+                        if (args[optind+1]) {
+                                optind++;
+                                if (STREQ(args[optind], "on"))
+                                        *diskdump_flags |= ZERO_EXCLUDED;
+                                else if (STREQ(args[optind], "off"))
+                                        *diskdump_flags &= ~ZERO_EXCLUDED;
+				else if (IS_A_NUMBER(args[optind])) {
+					value = stol(args[optind],
+                                    		FAULT_ON_ERROR, NULL);
+					if (value)
+                                        	*diskdump_flags |= ZERO_EXCLUDED;
+					else
+                                        	*diskdump_flags &= ~ZERO_EXCLUDED;
+				} else
+					goto invalid_set_command;
+                        }
+
+			if (runtime)
+                        	fprintf(fp, "zero_excluded: %s\n",
+                               	    *diskdump_flags & ZERO_EXCLUDED ? 
+					"on" : "off");
+			return;
+
+		} else if (XEN_HYPER_MODE()) {
+			error(FATAL, "invalid argument for the Xen hypervisor\n");
 		} else if (runtime) {
 			ulong pid, task;
 
@@ -2106,7 +2235,23 @@
 static void
 show_options(void)
 {
-	fprintf(fp, "        scroll: %s\n", pc->flags & SCROLL ? "on" : "off"); 
+	fprintf(fp, "        scroll: %s ",
+		pc->flags & SCROLL ? "on" : "off");
+	switch (pc->scroll_command)
+	{
+	case SCROLL_LESS:
+		fprintf(fp, "(/usr/bin/less)\n");
+		break;
+	case SCROLL_MORE:
+		fprintf(fp, "(/bin/more)\n");
+		break;
+	case SCROLL_NONE:
+		fprintf(fp, "(none)\n");
+		break;
+	case SCROLL_CRASHPAGER:
+		fprintf(fp, "(CRASHPAGER: %s)\n", getenv("CRASHPAGER"));
+		break;
+	}
         fprintf(fp, "         radix: %d (%s)\n", pc->output_radix,
                 pc->output_radix == 10 ? "decimal" :
                 pc->output_radix == 16 ? "hexadecimal" : "unknown");
@@ -2121,6 +2266,9 @@
 	fprintf(fp, "          edit: %s\n", pc->editing_mode);
 	fprintf(fp, "      namelist: %s\n", pc->namelist);
 	fprintf(fp, "      dumpfile: %s\n", pc->dumpfile);
+	fprintf(fp, "        unwind: %s\n", kt->flags & DWARF_UNWIND ? "on" : "off");
+	fprintf(fp, " zero_excluded: %s\n", *diskdump_flags & ZERO_EXCLUDED ? "on" : "off");
+	fprintf(fp, "     null-stop: %s\n", stop_print_at_null ? "on" : "off");
 }
 
 
@@ -2336,6 +2484,7 @@
 	char *element2;
 	struct syment *sp;
 
+	opcode = 0;
 	value1 = value2 = 0;
 	ll_value1 = ll_value2 = 0;
 
@@ -2550,7 +2699,7 @@
  *  its real value.  The allowable multipliers are k, K, m, M, g and G, for
  *  kilobytes, megabytes and gigabytes.
  */
-static int
+int
 calculate(char *s, ulong *value, ulonglong *llvalue, ulong flags)
 {
 	ulong factor, bias;
@@ -2832,7 +2981,9 @@
 			break;
 
 		case 's':
-			ld->structname = optarg;
+			if (ld->structname_args++ == 0) 
+				hq_open();
+			hq_enter((ulong)optarg);
 			break;
 
 		case 'o':
@@ -2871,6 +3022,12 @@
 		cmd_usage(pc->curcmd, SYNOPSIS);
 	}
 
+	if (ld->structname_args) {
+		ld->structname = (char **)GETBUF(sizeof(char *) * ld->structname_args);
+		retrieve_list((ulong *)ld->structname, ld->structname_args); 
+		hq_close();
+	}
+
 	while (args[optind]) {
 		if (strstr(args[optind], ".") &&
 		    arg_to_datatype(args[optind], sm, RETURN_ON_ERROR) > 1) {
@@ -2896,11 +3053,25 @@
 			}
 
 			/*
-			 *  If it's not a symbol nor a number, bail out.
+			 *  If it's not a symbol nor a number, bail out if it
+			 *  cannot be evaluated as a start address.
 			 */
-			if (!IS_A_NUMBER(args[optind]))	
+			if (!IS_A_NUMBER(args[optind])) {	
+				if (can_eval(args[optind])) {
+                        		value = eval(args[optind], FAULT_ON_ERROR, NULL);
+					if (IS_KVADDR(value)) {
+                               			if (ld->flags & LIST_START_ENTERED)
+                                        		error(FATAL,
+                                            		    "list start already entered\n");
+                                		ld->start = value;
+                                		ld->flags |= LIST_START_ENTERED;
+						goto next_arg;
+					}
+				}
+				
 				error(FATAL, "invalid argument: %s\n",
                                 	args[optind]);
+			}
 
 			/*
 			 *  If the start is known, it's got to be an offset.
@@ -2941,7 +3112,8 @@
                                 ld->member_offset = value;
                                 ld->flags |= LIST_OFFSET_ENTERED;
                                 goto next_arg;
-			} else if (!IS_A_NUMBER(args[optind+1]) &&
+			} else if ((!IS_A_NUMBER(args[optind+1]) &&
+				!can_eval(args[optind+1])) &&
 				!strstr(args[optind+1], "."))
 				error(FATAL, "symbol not found: %s\n",
                                         args[optind+1]);
@@ -3002,8 +3174,12 @@
 	hq_open();
 	c = do_list(ld);
 	hq_close();
+
+        if (ld->structname_args)
+		FREEBUF(ld->structname);
 }
 
+
 /*
  *  Does the work for cmd_list() and any other function that requires the
  *  contents of a linked list.  See cmd_list description above for details.
@@ -3013,7 +3189,7 @@
 {
 	ulong next, last, first;
 	ulong searchfor, readflag;
-	int count, others;
+	int i, count, others;
 
 	if (CRASHDEBUG(1)) {
 		others = 0;
@@ -3038,7 +3214,11 @@
 		console("list_head_offset: %ld\n", ld->list_head_offset);
 		console("             end: %lx\n", ld->end);
 		console("       searchfor: %lx\n", ld->searchfor);
-		console("      structname: %s\n", ld->structname);
+		console(" structname_args: %lx\n", ld->structname_args);
+		if (!ld->structname_args)
+			console("      structname: (unused)\n");
+		for (i = 0; i < ld->structname_args; i++)	
+			console("   structname[%d]: %s\n", i, ld->structname[i]);
 		console("          header: %s\n", ld->header);
 	}
 
@@ -3065,20 +3245,21 @@
 			fprintf(fp, "%lx\n", next - ld->list_head_offset);
 
 			if (ld->structname) {
-				switch (count_chars(ld->structname, '.'))
-				{
-				case 0:
-					dump_struct(ld->structname, 
-						next - ld->list_head_offset, 0);
-					break;
-				case 1:
-					dump_struct_member(ld->structname, 
-						next - ld->list_head_offset, 0);
-					break;
-				default:
-					error(FATAL, 
-					    "invalid structure reference: %s\n",
-						ld->structname);
+				for (i = 0; i < ld->structname_args; i++) {
+					switch (count_chars(ld->structname[i], '.'))
+					{
+					case 0:
+						dump_struct(ld->structname[i], 
+							next - ld->list_head_offset, 0);
+						break;
+					case 1:
+						dump_struct_members(ld, i, next);
+						break;
+					default:
+						error(FATAL, 
+						    "invalid structure reference: %s\n",
+							ld->structname[i]);
+					}
 				}
 			}
 		}
@@ -3148,6 +3329,42 @@
 }
 
 /*
+ *  Issue a dump_struct_member() call for one or more structure
+ *  members.  Multiple members are passed in a comma-separated
+ *  list using the the format:  
+ *
+ *            struct.member1,member2,member3
+ */
+void
+dump_struct_members(struct list_data *ld, int idx, ulong next)
+{
+	int i, argc;
+	char *p1, *p2;
+	char *structname, *members;
+	char *arglist[MAXARGS];
+
+	structname = GETBUF(strlen(ld->structname[idx])+1);
+	members = GETBUF(strlen(ld->structname[idx])+1);
+
+	strcpy(structname, ld->structname[idx]);
+	p1 = strstr(structname, ".") + 1;
+
+	p2 = strstr(ld->structname[idx], ".") + 1;
+	strcpy(members, p2);
+	replace_string(members, ",", ' ');
+	argc = parse_line(members, arglist);
+
+	for (i = 0; i < argc; i++) {
+		*p1 = NULLCHAR;
+		strcat(structname, arglist[i]);
+ 		dump_struct_member(structname, next - ld->list_head_offset, 0);
+	}
+
+	FREEBUF(structname);
+	FREEBUF(members);
+}
+
+/*
  *  The next set of functions are a general purpose hashing tool used to
  *  identify duplicate entries in a set of passed-in data, and if found, 
  *  to fail the entry attempt.  When a command wishes to verify a list
@@ -3552,6 +3769,52 @@
         return(-1);
 }
 
+/*
+ *  For a given value, check to see if a hash queue entry exists.  If an
+ *  entry is found, return TRUE; for all other possibilities return FALSE.
+ */
+int
+hq_entry_exists(ulong value)
+{
+	struct hash_table *ht;
+	struct hq_entry *list_entry;
+	long hqi;
+
+	if (!(pc->flags & HASH))
+		return FALSE;
+
+	ht = &hash_table;
+
+	if (ht->flags & (HASH_QUEUE_NONE))
+		return FALSE;
+
+	if (!(ht->flags & HASH_QUEUE_OPEN))
+		return FALSE;
+
+	hqi = HQ_INDEX(value);
+	list_entry = ht->memptr + ht->queue_heads[hqi].next;
+
+	while (TRUE) {
+		if (list_entry->value == value)
+			return TRUE;
+
+		if (list_entry->next >= ht->count) {
+			error(INFO, corrupt_hq,
+				list_entry->value, 
+				list_entry->next,
+ 				list_entry->order);
+			ht->flags |= HASH_QUEUE_NONE;
+			return FALSE;
+		}
+
+		if (list_entry->next == 0)
+			break;
+
+		list_entry = ht->memptr + list_entry->next;
+	}
+
+	return FALSE;
+}
 
 /*
  *  K&R power function for integers
@@ -4210,6 +4473,14 @@
 {
 	ulonglong total, days, hours, minutes, seconds;
 
+	if (CRASHDEBUG(2))
+		error(INFO, "convert_time: %lld (%llx)\n", count, count);
+
+	if (!machdep->hz) {
+		sprintf(buf, "(cannot calculate: unknown HZ value)");
+		return buf;
+	}
+
         total = (count)/(ulonglong)machdep->hz;
 
         days = total / SEC_DAYS;
@@ -4297,15 +4568,142 @@
 	return STREQ(MACHINE_TYPE, type);
 }
 
+int 
+machine_type_mismatch(char *file, char *e_machine, char *alt, ulong query)
+{
+	if (machine_type(e_machine) || machine_type(alt))
+		return FALSE;
+
+	if (query == KDUMP_LOCAL)  /* already printed by NETDUMP_LOCAL */
+		return TRUE;
+
+	error(WARNING, "machine type mismatch:\n");
+
+	fprintf(fp, "         crash utility: %s\n", MACHINE_TYPE);
+	fprintf(fp, "         %s: %s%s%s\n\n", file, e_machine,
+		alt ? " or " : "", alt ? alt : "");
+		
+	return TRUE;
+}
 void
 command_not_supported()
 {
-	error(FATAL, "command not supported on this architecture\n");
+	error(FATAL, 
+	    "command not supported or applicable on this architecture or kernel\n");
 }
 
 void
 option_not_supported(int c)
 {
-	error(FATAL, "-%c option not supported on this architecture\n", 
+	error(FATAL, 
+	    "-%c option not supported or applicable on this architecture or kernel\n", 
 		(char)c);
 }
+
+void
+please_wait(char *s)
+{
+	if ((pc->flags & SILENT) || !(pc->flags & TTY) || 
+	    !DUMPFILE() || (pc->flags & RUNTIME))
+		return;
+
+	pc->flags |= PLEASE_WAIT;
+
+        fprintf(fp, "\rplease wait... (%s)", s);
+        fflush(fp);
+}
+
+void
+please_wait_done(void)
+{
+	if ((pc->flags & SILENT) || !(pc->flags & TTY) || 
+	    !DUMPFILE() || (pc->flags & RUNTIME))
+		return;
+
+	pc->flags &= ~PLEASE_WAIT;
+
+	fprintf(fp, "\r                                                \r");
+	fflush(fp);
+}
+
+/*
+ *  Compare two pathnames.
+ */
+int
+pathcmp(char *p1, char *p2)
+{
+        char c1, c2;
+
+        do {
+                if ((c1 = *p1++) == '/')
+                        while (*p1 == '/') { p1++; }
+                if ((c2 = *p2++) == '/')
+                        while (*p2 == '/') { p2++; }
+                if (c1 == '\0')
+                        return ((c2 == '/') && (*p2 == '\0')) ? 0 : c1 - c2;
+        } while (c1 == c2);
+
+        return ((c2 == '\0') && (c1 == '/') && (*p1 == '\0')) ? 0 : c1 - c2;
+}
+
+#include <elf.h>
+
+/*
+ *  Check the byte-order of an ELF file vs. the host byte order.
+ */
+int
+endian_mismatch(char *file, char dumpfile_endian, ulong query)
+{
+	char *endian;
+
+	switch (dumpfile_endian)
+	{
+	case ELFDATA2LSB:
+		if (__BYTE_ORDER == __LITTLE_ENDIAN)
+			return FALSE;
+		endian = "big-endian";
+		break;
+	case ELFDATA2MSB:
+		if (__BYTE_ORDER == __BIG_ENDIAN)	
+			return FALSE;
+		endian = "little-endian";
+		break;
+	default:
+		endian = "unknown";	
+		break;
+	}
+
+	if (query == KDUMP_LOCAL)  /* already printed by NETDUMP_LOCAL */
+		return TRUE;
+
+        error(WARNING, "endian mismatch:\n");
+
+        fprintf(fp, "         crash utility: %s\n", 
+		(__BYTE_ORDER == __LITTLE_ENDIAN) ?
+		"little-endian" : "big-endian");
+        fprintf(fp, "         %s: %s\n\n", file, endian);
+
+	return TRUE;	
+}
+
+uint16_t
+swap16(uint16_t val, int swap)
+{
+	if (swap) 
+        	return (((val & 0x00ff) << 8) |
+                	((val & 0xff00) >> 8));
+	else
+		return val;
+}
+
+uint32_t
+swap32(uint32_t val, int swap)
+{
+	if (swap)
+        	return (((val & 0x000000ffU) << 24) |
+                	((val & 0x0000ff00U) <<  8) |
+                	((val & 0x00ff0000U) >>  8) |
+                	((val & 0xff000000U) >> 24));
+	else
+		return val;
+}
--- crash/unwind_x86.h.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/unwind_x86.h	2006-10-20 14:58:14.000000000 -0400
@@ -0,0 +1,2 @@
+
+
--- crash/ia64.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/ia64.c	2008-09-25 14:56:20.000000000 -0400
@@ -1,8 +1,8 @@
 /* ia64.c - core analysis suite
  *
  * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc.
- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson
- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 David Anderson
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Red Hat, Inc. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -16,6 +16,8 @@
  */ 
 #ifdef IA64 
 #include "defs.h"
+#include "xen_hyper_defs.h"
+#include <sys/prctl.h>
 
 static int ia64_verify_symbol(const char *, ulong, char);
 static int ia64_eframe_search(struct bt_info *);
@@ -25,6 +27,8 @@
 static void try_old_unwind(struct bt_info *);
 static void ia64_dump_irq(int);
 static ulong ia64_processor_speed(void);
+static int ia64_vtop_4l(ulong, physaddr_t *paddr, ulong *pgd, int, int);
+static int ia64_vtop(ulong, physaddr_t *paddr, ulong *pgd, int, int);
 static int ia64_uvtop(struct task_context *, ulong, physaddr_t *, int);
 static int ia64_kvtop(struct task_context *, ulong, physaddr_t *, int);
 static ulong ia64_get_task_pgd(ulong);
@@ -47,10 +51,12 @@
 static int ia64_verify_paddr(uint64_t);
 static int ia64_available_memory(struct efi_memory_desc_t *);
 static void ia64_post_init(void);
+static ulong ia64_in_per_cpu_mca_stack(void);
 static struct line_number_hook ia64_line_number_hooks[];
 static ulong ia64_get_stackbase(ulong);
 static ulong ia64_get_stacktop(ulong);
 static void parse_cmdline_arg(void);
+static void ia64_calc_phys_start(void);
 
 struct unw_frame_info;
 static void dump_unw_frame_info(struct unw_frame_info *);
@@ -62,6 +68,17 @@
 static ulong rse_read_reg(struct unw_frame_info *, int, int *);
 static void rse_function_params(struct unw_frame_info *, char *);
 
+static int ia64_vtop_4l_xen_wpt(ulong, physaddr_t *paddr, ulong *pgd, int, int);
+static int ia64_vtop_xen_wpt(ulong, physaddr_t *paddr, ulong *pgd, int, int);
+static int ia64_xen_kdump_p2m_create(struct xen_kdump_data *);
+static int ia64_xendump_p2m_create(struct xendump_data *);
+static void ia64_debug_dump_page(FILE *, char *, char *);
+static char *ia64_xendump_load_page(ulong, struct xendump_data *);
+static int ia64_xendump_page_index(ulong, struct xendump_data *);
+static ulong ia64_xendump_panic_task(struct xendump_data *);
+static void ia64_get_xendump_regs(struct xendump_data *, struct bt_info *, ulong *, ulong *);
+
+static void ia64_init_hyper(int);
 
 struct machine_specific ia64_machine_specific = { 0 };
 
@@ -70,8 +87,22 @@
 {
 	struct syment *sp, *spn;
 
+	if (XEN_HYPER_MODE()) {
+		ia64_init_hyper(when);
+		return;
+	}
+
         switch (when)
         {
+	case SETUP_ENV:
+#if defined(PR_SET_FPEMU) && defined(PR_FPEMU_NOPRINT)
+		prctl(PR_SET_FPEMU, PR_FPEMU_NOPRINT, 0, 0, 0);
+#endif
+#if defined(PR_SET_UNALIGN) && defined(PR_UNALIGN_NOPRINT)
+		prctl(PR_SET_UNALIGN, PR_UNALIGN_NOPRINT, 0, 0, 0);
+#endif
+		break;
+
         case PRE_SYMTAB:
                 machdep->verify_symbol = ia64_verify_symbol;
 		machdep->machspec = &ia64_machine_specific;
@@ -92,17 +123,23 @@
 		case 16384:
 			machdep->stacksize = (power(2, 1) * PAGESIZE());
 			break;
+		case 65536:
+			machdep->stacksize = (power(2, 0) * PAGESIZE());
+			break;
 		default:
 			machdep->stacksize = 32*1024;
 			break;
 		}
                 if ((machdep->pgd = (char *)malloc(PAGESIZE())) == NULL)
                         error(FATAL, "cannot malloc pgd space.");
+		if ((machdep->pud = (char *)malloc(PAGESIZE())) == NULL)
+			error(FATAL, "cannot malloc pud space.");
                 if ((machdep->pmd = (char *)malloc(PAGESIZE())) == NULL)
                         error(FATAL, "cannot malloc pmd space.");
                 if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL)
                         error(FATAL, "cannot malloc ptbl space.");
                 machdep->last_pgd_read = 0;
+                machdep->last_pud_read = 0;
                 machdep->last_pmd_read = 0;
                 machdep->last_ptbl_read = 0;
 		machdep->verify_paddr = ia64_verify_paddr;
@@ -115,14 +152,17 @@
                 break;     
 
         case PRE_GDB:
+
 		if (pc->flags & KERNEL_DEBUG_QUERY)
 			return;
+		
 		/*
 		 * Until the kernel core dump and va_server library code
 		 * do the right thing with respect to the configured page size,
 		 * try to recognize a fatal inequity between the compiled-in 
 		 * page size and the page size used by the kernel.
 		 */ 
+		
 
 		if ((sp = symbol_search("empty_zero_page")) &&
 		    (spn = next_symbol(NULL, sp)) && 
@@ -169,10 +209,14 @@
 				machdep->machspec->kernel_start +
 				GIGABYTES((ulong)(4));
 			if (machdep->machspec->phys_start == UNKNOWN_PHYS_START)
-				machdep->machspec->phys_start = 
-					DEFAULT_PHYS_START;
+				ia64_calc_phys_start();
 		} else
                		machdep->machspec->vmalloc_start = KERNEL_VMALLOC_BASE;
+
+		machdep->xen_kdump_p2m_create = ia64_xen_kdump_p2m_create;
+		machdep->xendump_p2m_create = ia64_xendump_p2m_create;
+		machdep->xendump_panic_task = ia64_xendump_panic_task;
+		machdep->get_xendump_regs = ia64_get_xendump_regs;
                 break;
 
         case POST_GDB:
@@ -202,7 +246,10 @@
 		else if (symbol_exists("_irq_desc"))
 			ARRAY_LENGTH_INIT(machdep->nr_irqs, irq_desc, 
 				"_irq_desc", NULL, 0);
-		machdep->hz = 1024;
+		if (!machdep->hz)
+			machdep->hz = 1024;
+		machdep->section_size_bits = _SECTION_SIZE_BITS;
+		machdep->max_physmem_bits = _MAX_PHYSMEM_BITS;
 		ia64_create_memmap();
                 break;
 
@@ -228,8 +275,10 @@
 	char *arglist[MAXARGS];
 	ulong value;
         struct machine_specific *ms;
+	int vm_flag;
 
         ms = &ia64_machine_specific;
+	vm_flag = 0;
 
 	if (!strstr(machdep->cmdline_arg, "=")) {
 		errflag = 0;
@@ -284,11 +333,37 @@
 					continue;
 				}
 			}
+		} else if (STRNEQ(arglist[i], "vm=")) {
+			vm_flag++;
+			p = arglist[i] + strlen("vm=");
+			if (strlen(p)) {
+				if (STREQ(p, "4l")) {
+					machdep->flags |= VM_4_LEVEL;
+					continue;
+				}
+			}
 		}
 
 		error(WARNING, "ignoring --machdep option: %s\n", arglist[i]);
 	} 
 
+	if (vm_flag) {
+		switch (machdep->flags & (VM_4_LEVEL))
+		{
+			case VM_4_LEVEL:
+				error(NOTE, "using 4-level pagetable\n");
+				c++;
+				break;
+				
+			default:
+				error(WARNING, "invalid vm= option\n");
+				c++;
+				machdep->flags &= ~(VM_4_LEVEL);
+				break;
+		} 
+	}
+
+
 	if (c)
 		fprintf(fp, "\n");
 }
@@ -314,6 +389,58 @@
 	return TRUE;
 }
 
+
+static ulong
+ia64_in_per_cpu_mca_stack(void)
+{
+	int plen, i;
+	ulong flag;
+	ulong vaddr, paddr, stackbase, stacktop;
+	ulong *__per_cpu_mca;
+	struct task_context *tc;
+
+	tc = CURRENT_CONTEXT();
+
+	if (STRNEQ(CURRENT_COMM(), "INIT"))
+		flag = INIT;
+	else if (STRNEQ(CURRENT_COMM(), "MCA"))
+		flag = MCA;
+	else
+		return 0;
+
+	if (!symbol_exists("__per_cpu_mca") ||
+	    !(plen = get_array_length("__per_cpu_mca", NULL, 0)) ||
+	    (plen < kt->cpus))
+		return 0;
+
+	vaddr = SWITCH_STACK_ADDR(CURRENT_TASK());
+	if (VADDR_REGION(vaddr) != KERNEL_CACHED_REGION)
+		return 0;
+	paddr = ia64_VTOP(vaddr);
+
+	__per_cpu_mca = (ulong *)GETBUF(sizeof(ulong) * kt->cpus);
+
+	if (!readmem(symbol_value("__per_cpu_mca"), KVADDR, __per_cpu_mca,
+	    sizeof(ulong) * kt->cpus, "__per_cpu_mca", RETURN_ON_ERROR|QUIET))
+		return 0;
+
+	if (CRASHDEBUG(1)) {
+		for (i = 0; i < kt->cpus; i++) {
+			fprintf(fp, "__per_cpu_mca[%d]: %lx\n", 
+		 		i, __per_cpu_mca[i]);
+		}
+	}
+
+	stackbase = __per_cpu_mca[tc->processor];
+	stacktop = stackbase + (STACKSIZE() * 2);
+	FREEBUF(__per_cpu_mca);
+
+	if ((paddr >= stackbase) && (paddr < stacktop))
+		return flag;
+	else
+		return 0;
+}
+
 void
 ia64_dump_machdep_table(ulong arg)
 {
@@ -401,12 +528,14 @@
 		fprintf(fp, "%sUNW_R0", others++ ? "|" : "");
 	if (machdep->flags & MEM_LIMIT)
 		fprintf(fp, "%sMEM_LIMIT", others++ ? "|" : "");
-	if (machdep->flags & SYSRQ)
-		fprintf(fp, "%sSYSRQ", others++ ? "|" : "");
 	if (machdep->flags & DEVMEMRD)
 		fprintf(fp, "%sDEVMEMRD", others++ ? "|" : "");
 	if (machdep->flags & INIT)
 		fprintf(fp, "%sINIT", others++ ? "|" : "");
+	if (machdep->flags & MCA)
+		fprintf(fp, "%sMCA", others++ ? "|" : "");
+	if (machdep->flags & VM_4_LEVEL)
+		fprintf(fp, "%sVM_4_LEVEL", others++ ? "|" : "");
         fprintf(fp, ")\n");
         fprintf(fp, "             kvbase: %lx\n", machdep->kvbase);
 	fprintf(fp, "  identity_map_base: %lx\n", machdep->identity_map_base);
@@ -445,16 +574,25 @@
 		(machdep->verify_paddr == ia64_verify_paddr) ?
 		"ia64_verify_paddr" : "generic_verify_paddr");
         fprintf(fp, "    init_kernel_pgd: NULL\n");
+	fprintf(fp, "xen_kdump_p2m_create: ia64_xen_kdump_p2m_create()\n");
+        fprintf(fp, " xendump_p2m_create: ia64_xendump_p2m_create()\n");
+	fprintf(fp, " xendump_panic_task: ia64_xendump_panic_task()\n");
+	fprintf(fp, "   get_xendump_regs: ia64_get_xendump_regs()\n");
 	fprintf(fp, "    value_to_symbol: generic_machdep_value_to_symbol()\n");
         fprintf(fp, "  line_number_hooks: ia64_line_number_hooks\n");
         fprintf(fp, "      last_pgd_read: %lx\n", machdep->last_pgd_read);
+        fprintf(fp, "      last_pud_read: %lx\n", machdep->last_pud_read);
         fprintf(fp, "      last_pmd_read: %lx\n", machdep->last_pmd_read);
         fprintf(fp, "     last_ptbl_read: %lx\n", machdep->last_ptbl_read);
         fprintf(fp, "                pgd: %lx\n", (ulong)machdep->pgd);
+        fprintf(fp, "                pud: %lx\n", (ulong)machdep->pud);
         fprintf(fp, "                pmd: %lx\n", (ulong)machdep->pmd);
         fprintf(fp, "               ptbl: %lx\n", (ulong)machdep->ptbl);
 	fprintf(fp, "       ptrs_per_pgd: %d\n", machdep->ptrs_per_pgd);
 	fprintf(fp, "        cmdline_arg: %s\n", machdep->cmdline_arg);
+        fprintf(fp, "  section_size_bits: %ld\n", machdep->section_size_bits);
+        fprintf(fp, "   max_physmem_bits: %ld\n", machdep->max_physmem_bits);
+        fprintf(fp, "  sections_per_root: %ld\n", machdep->sections_per_root);
         fprintf(fp, "           machspec: ia64_machine_specific\n");
 	fprintf(fp, "                   cpu_data_address: %lx\n", 
 			machdep->machspec->cpu_data_address);
@@ -565,9 +703,9 @@
         if (CRASHDEBUG(8))
                 fprintf(fp, "%016lx %s\n", value, name);
 
-	if (STREQ(name, "phys_start") && type == 'A')
-		if (machdep->machspec->phys_start == UNKNOWN_PHYS_START)
-			machdep->machspec->phys_start = value;
+//	if (STREQ(name, "phys_start") && type == 'A')
+//		if (machdep->machspec->phys_start == UNKNOWN_PHYS_START)
+//			machdep->machspec->phys_start = value;
 
 	region = VADDR_REGION(value);
 
@@ -665,74 +803,148 @@
 	return (machdep->mhz = mhz);
 }
 
-
-/*
- *  Translates a user virtual address to its physical address.  cmd_vtop()
- *  sets the verbose flag so that the pte translation gets displayed; all
- *  other callers quietly accept the translation.
- *
- *  This routine can also take mapped kernel virtual addresses if the -u flag
- *  was passed to cmd_vtop().  If so, it makes the translation using the
- *  swapper_pg_dir, making it irrelevant in this processor's case.
+/* Generic abstraction to translate user or kernel virtual
+ * addresses to physical using a 4 level page table.
  */
 static int
-ia64_uvtop(struct task_context *tc, ulong uvaddr, physaddr_t *paddr, int verbose)
+ia64_vtop_4l(ulong vaddr, physaddr_t *paddr, ulong *pgd, int verbose, int usr)
 {
-	ulong mm;
-	ulong *pgd;
 	ulong *page_dir;
+	ulong *page_upper;
 	ulong *page_middle;
 	ulong *page_table;
 	ulong pgd_pte;
+	ulong pud_pte;
 	ulong pmd_pte;
 	ulong pte;
 	ulong region, offset;
 
-	if (!tc)
-		error(FATAL, "current context invalid\n");
-
-	*paddr = 0;
-       	region = VADDR_REGION(uvaddr);
+	if (usr) {
+		region = VADDR_REGION(vaddr);
+		offset = (vaddr >> PGDIR_SHIFT) & ((PTRS_PER_PGD >> 3) - 1);
+		offset |= (region << (PAGESHIFT() - 6));
+		page_dir = pgd + offset;
+	} else {
+		if (!(pgd = (ulong *)vt->kernel_pgd[0]))
+			error(FATAL, "cannot determine kernel pgd pointer\n");
+		page_dir = pgd + ((vaddr >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1));
+	}
 
-	if (IS_KVADDR(uvaddr))
-		return ia64_kvtop(tc, uvaddr, paddr, verbose);
+	if (verbose) 
+		fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd);
 
-	if ((mm = task_mm(tc->task, TRUE)))
-        	pgd = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd));
-	else
-		readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd,
-			sizeof(long), "mm_struct pgd", FAULT_ON_ERROR);
+	FILL_PGD(PAGEBASE(pgd), KVADDR, PAGESIZE());
+	pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir));
+	
+        if (verbose) 
+                fprintf(fp, "   PGD: %lx => %lx\n", (ulong)page_dir, pgd_pte);
 
+        if (!(pgd_pte))
+		return FALSE;
+	
+	offset = (vaddr >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
+	page_upper = (ulong *)(PTOV(pgd_pte & _PFN_MASK)) + offset; 
+	
+	FILL_PUD(PAGEBASE(page_upper), KVADDR, PAGESIZE());
+	pud_pte = ULONG(machdep->pud + PAGEOFFSET(page_upper));
+        
 	if (verbose) 
-		fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd);
+                fprintf(fp, "   PUD: %lx => %lx\n", (ulong)page_upper, pud_pte);
+        
+	if (!(pud_pte))
+		return FALSE;
+
+	offset = (vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
+	page_middle = (ulong *)(PTOV(pud_pte & _PFN_MASK)) + offset; 
+
+	FILL_PMD(PAGEBASE(page_middle), KVADDR, PAGESIZE());
+	pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle));
+
+        if (verbose)
+                fprintf(fp, "   PMD: %lx => %lx\n", (ulong)page_middle, pmd_pte);
+
+        if (!(pmd_pte))
+		return FALSE;
+
+        offset = (vaddr >> PAGESHIFT()) & (PTRS_PER_PTE - 1);
+        page_table = (ulong *)(PTOV(pmd_pte & _PFN_MASK)) + offset;
+
+	FILL_PTBL(PAGEBASE(page_table), KVADDR, PAGESIZE());
+	pte = ULONG(machdep->ptbl + PAGEOFFSET(page_table));
 
-        offset = (uvaddr >> PGDIR_SHIFT) & ((PTRS_PER_PGD >> 3) - 1);
-        offset |= (region << (PAGESHIFT() - 6));
-        page_dir = pgd + offset;
+        if (verbose)
+                fprintf(fp, "   PTE: %lx => %lx\n", (ulong)page_table, pte);
+
+        if (!(pte & (_PAGE_P))) {
+		if (usr)
+		  	*paddr = pte;
+		if (pte && verbose) {
+			fprintf(fp, "\n");
+			ia64_translate_pte(pte, 0, 0);
+		}
+		return FALSE;
+        }
+
+        *paddr = (pte & _PFN_MASK) + PAGEOFFSET(vaddr);
+
+        if (verbose) {
+                fprintf(fp, "  PAGE: %lx\n\n", PAGEBASE(*paddr));
+		ia64_translate_pte(pte, 0, 0);
+	}
+
+	return TRUE;
+}
+
+/* Generic abstraction to translate user or kernel virtual
+ * addresses to physical using a 3 level page table.
+ */
+static int
+ia64_vtop(ulong vaddr, physaddr_t *paddr, ulong *pgd, int verbose, int usr)
+{
+	ulong *page_dir;
+	ulong *page_middle;
+	ulong *page_table;
+	ulong pgd_pte;
+	ulong pmd_pte;
+	ulong pte;
+	ulong region, offset;
+
+	if (usr) {
+		region = VADDR_REGION(vaddr);
+		offset = (vaddr >> PGDIR_SHIFT_3L) & ((PTRS_PER_PGD >> 3) - 1);
+		offset |= (region << (PAGESHIFT() - 6));
+		page_dir = pgd + offset;
+	} else {
+		if (!(pgd = (ulong *)vt->kernel_pgd[0]))
+			error(FATAL, "cannot determine kernel pgd pointer\n");
+		page_dir = pgd + ((vaddr >> PGDIR_SHIFT_3L) & (PTRS_PER_PGD - 1));
+	}
 
+	if (verbose)
+		fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd);
+	
 	FILL_PGD(PAGEBASE(pgd), KVADDR, PAGESIZE());
 	pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir));
 	
-        if (verbose) {
+        if (verbose) 
                 fprintf(fp, "   PGD: %lx => %lx\n", (ulong)page_dir, pgd_pte);
-        }
 
         if (!(pgd_pte))
-                goto no_upage;
+		return FALSE;
 
-	offset = (uvaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
+	offset = (vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
 	page_middle = (ulong *)(PTOV(pgd_pte & _PFN_MASK)) + offset; 
 
 	FILL_PMD(PAGEBASE(page_middle), KVADDR, PAGESIZE());
 	pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle));
 
         if (verbose)
-                fprintf(fp, "   PMD: %lx => %lx\n", (ulong)page_middle,pmd_pte);
+                fprintf(fp, "   PMD: %lx => %lx\n", (ulong)page_middle, pmd_pte);
 
         if (!(pmd_pte))
-                goto no_upage;
+		return FALSE;
 
-        offset = (uvaddr >> PAGESHIFT()) & (PTRS_PER_PTE - 1);
+        offset = (vaddr >> PAGESHIFT()) & (PTRS_PER_PTE - 1);
         page_table = (ulong *)(PTOV(pmd_pte & _PFN_MASK)) + offset;
 
 	FILL_PTBL(PAGEBASE(page_table), KVADDR, PAGESIZE());
@@ -742,15 +954,16 @@
                 fprintf(fp, "   PTE: %lx => %lx\n", (ulong)page_table, pte);
 
         if (!(pte & (_PAGE_P))) {
-		*paddr = pte;
+		if (usr)
+		  	*paddr = pte;
 		if (pte && verbose) {
 			fprintf(fp, "\n");
 			ia64_translate_pte(pte, 0, 0);
 		}
-                goto no_upage;
+		return FALSE;
         }
 
-        *paddr = (pte & _PFN_MASK) + PAGEOFFSET(uvaddr);
+        *paddr = (pte & _PFN_MASK) + PAGEOFFSET(vaddr);
 
         if (verbose) {
                 fprintf(fp, "  PAGE: %lx\n\n", PAGEBASE(*paddr));
@@ -758,10 +971,50 @@
 	}
 
 	return TRUE;
+}
 
-no_upage:
 
-	return FALSE;
+/*
+ *  Translates a user virtual address to its physical address.  cmd_vtop()
+ *  sets the verbose flag so that the pte translation gets displayed; all
+ *  other callers quietly accept the translation.
+ *
+ *  This routine can also take mapped kernel virtual addresses if the -u flag
+ *  was passed to cmd_vtop().  If so, it makes the translation using the
+ *  swapper_pg_dir, making it irrelevant in this processor's case.
+ */
+static int
+ia64_uvtop(struct task_context *tc, ulong uvaddr, physaddr_t *paddr, int verbose)
+{
+	ulong mm;
+	ulong *pgd;
+
+	if (!tc)
+		error(FATAL, "current context invalid\n");
+
+	*paddr = 0;
+
+	if (IS_KVADDR(uvaddr))
+		return ia64_kvtop(tc, uvaddr, paddr, verbose);
+
+	if ((mm = task_mm(tc->task, TRUE)))
+        	pgd = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd));
+	else
+		readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd,
+			sizeof(long), "mm_struct pgd", FAULT_ON_ERROR);
+
+	if (XEN() && (kt->xen_flags & WRITABLE_PAGE_TABLES)) {
+                if (machdep->flags & VM_4_LEVEL)
+                        return ia64_vtop_4l_xen_wpt(uvaddr, paddr, pgd, verbose, 1);
+                else
+                        return ia64_vtop_xen_wpt(uvaddr, paddr, pgd, verbose, 1);
+	} else {
+		if (machdep->flags & VM_4_LEVEL)
+			return ia64_vtop_4l(uvaddr, paddr, pgd, verbose, 1);
+		else
+			return ia64_vtop(uvaddr, paddr, pgd, verbose, 1);
+	}
+	
 }
 
 
@@ -774,13 +1027,6 @@
 ia64_kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose)
 {
         ulong *pgd;
-        ulong *page_dir;
-        ulong *page_middle;
-        ulong *page_table;
-        ulong pgd_pte;
-        ulong pmd_pte;
-        ulong pte;
-	ulong offset;
 
         if (!IS_KVADDR(kvaddr))
                 return FALSE;
@@ -807,72 +1053,33 @@
 	case KERNEL_VMALLOC_REGION:
 		if (ia64_IS_VMALLOC_ADDR(kvaddr))
 			break;
+		if ((kvaddr < machdep->machspec->kernel_start) &&
+		    (machdep->machspec->kernel_region == 
+		    KERNEL_VMALLOC_REGION)) {
+			*paddr = PADDR_NOT_AVAILABLE;
+			return FALSE;
+		}
                 *paddr = ia64_VTOP(kvaddr);
 		if (verbose)
 			fprintf(fp, "[MAPPED IN TRANSLATION REGISTER]\n");
                 return TRUE;
         }
 
-        pgd = (ulong *)vt->kernel_pgd[0];
-
-        if (verbose) {
-                fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd);
-	}
-
-	page_dir = pgd + ((kvaddr >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)); 
-
-        FILL_PGD(PAGEBASE(pgd), KVADDR, PAGESIZE());
-        pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir));
-
-        if (verbose) {
-                fprintf(fp, "   PGD: %lx => %lx\n", (ulong)page_dir, pgd_pte);
-	}
-
-        if (!(pgd_pte))
-                goto no_kpage;
-
-	offset = (kvaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
-	page_middle = (ulong *)(PTOV(pgd_pte & _PFN_MASK)) + offset; 
-
-        FILL_PMD(PAGEBASE(page_middle), KVADDR, PAGESIZE());
-        pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle));
-
-        if (verbose)
-                fprintf(fp, "   PMD: %lx => %lx\n", (ulong)page_middle, 
-			pmd_pte);
-
-        if (!(pmd_pte))
-                goto no_kpage;
-
-        offset = (kvaddr >> PAGESHIFT()) & (PTRS_PER_PTE - 1);
-        page_table = (ulong *)(PTOV(pmd_pte & _PFN_MASK)) + offset;
-
-        FILL_PTBL(PAGEBASE(page_table), KVADDR, PAGESIZE());
-        pte = ULONG(machdep->ptbl + PAGEOFFSET(page_table));
-
-        if (verbose)
-                fprintf(fp, "   PTE: %lx => %lx\n", (ulong)page_table, pte);
-
-        if (!(pte & (_PAGE_P))) {
-		if (pte && verbose) {
-			fprintf(fp, "\n");
-			ia64_translate_pte(pte, 0, 0);
-		}
-                goto no_kpage;
-        }
-
-        *paddr = (pte & _PFN_MASK) + PAGEOFFSET(kvaddr);
+        if (!(pgd = (ulong *)vt->kernel_pgd[0]))
+		error(FATAL, "cannot determine kernel pgd pointer\n");
 
-        if (verbose) {
-                fprintf(fp, "  PAGE: %lx\n\n", PAGEBASE(*paddr));
-		ia64_translate_pte(pte, 0, 0);
+	if (XEN() && (kt->xen_flags & WRITABLE_PAGE_TABLES)) {
+                if (machdep->flags & VM_4_LEVEL)
+                        return ia64_vtop_4l_xen_wpt(kvaddr, paddr, pgd, verbose, 0);
+                else
+                        return ia64_vtop_xen_wpt(kvaddr, paddr, pgd, verbose, 0);
+	} else {
+		if (machdep->flags & VM_4_LEVEL)
+			return ia64_vtop_4l(kvaddr, paddr, pgd, verbose, 0);
+		else
+			return ia64_vtop(kvaddr, paddr, pgd, verbose, 0);
 	}
 
-	return TRUE;
-
-no_kpage:
-
-	return FALSE;
 }
 
 /*
@@ -958,9 +1165,15 @@
 {
         ulong ksp;
 
-        readmem(task + OFFSET(task_struct_thread_ksp), KVADDR,
-                &ksp, sizeof(void *),
-                "thread_struct ksp", FAULT_ON_ERROR);
+	if (XEN_HYPER_MODE()) {
+        	readmem(task + XEN_HYPER_OFFSET(vcpu_thread_ksp), KVADDR,
+                	&ksp, sizeof(void *),
+                	"vcpu thread ksp", FAULT_ON_ERROR);
+	} else {
+        	readmem(task + OFFSET(task_struct_thread_ksp), KVADDR,
+                	&ksp, sizeof(void *),
+                	"thread_struct ksp", FAULT_ON_ERROR);
+	}
 
         return ksp;
 }
@@ -1315,7 +1528,10 @@
         BZERO(&eframe, sizeof(ulong) * NUM_PT_REGS);
 
         open_tmpfile();
-        dump_struct("pt_regs", addr, RADIX(16));
+	if (XEN_HYPER_MODE())
+        	dump_struct("cpu_user_regs", addr, RADIX(16));
+	else
+        	dump_struct("pt_regs", addr, RADIX(16));
         rewind(pc->tmpfile);
 
 	fval = 0;
@@ -1571,6 +1787,12 @@
 
 	fprintf(fp, "  EFRAME: %lx\n", addr);
 
+	if (bt->flags & BT_INCOMPLETE_USER_EFRAME) {
+		fprintf(fp, 
+    "  [exception frame incomplete -- check salinfo for complete context]\n");
+		bt->flags &= ~BT_INCOMPLETE_USER_EFRAME;
+	}
+
 	fprintf(fp, "      B0: %016lx      CR_IIP: %016lx\n", 
 		eframe[P_b0], eframe[P_cr_iip]);
 /**
@@ -2099,7 +2321,7 @@
                 fprintf(fp, "(unknown)\n");
         fprintf(fp, "                        HZ: %d\n", machdep->hz);
         fprintf(fp, "                 PAGE SIZE: %d\n", PAGESIZE());
-        fprintf(fp, "             L1 CACHE SIZE: %d\n", l1_cache_size());
+//      fprintf(fp, "             L1 CACHE SIZE: %d\n", l1_cache_size());
         fprintf(fp, "         KERNEL STACK SIZE: %ld\n", STACKSIZE());
         fprintf(fp, "      KERNEL CACHED REGION: %lx\n",
 		(ulong)KERNEL_CACHED_REGION << REGION_SHIFT);
@@ -2371,9 +2593,10 @@
             !readmem(ia64_boot_param+
 	    MEMBER_OFFSET("ia64_boot_param", "efi_memmap"),
             KVADDR, &efi_memmap, sizeof(uint64_t), "efi_memmap", 
-	    RETURN_ON_ERROR)) {
-		error(WARNING, "cannot read ia64_boot_param: " 
-			"memory verification will not be performed\n\n");
+	    QUIET|RETURN_ON_ERROR)) {
+		if (!XEN() || CRASHDEBUG(1))
+			error(WARNING, "cannot read ia64_boot_param: " 
+			    "memory verification will not be performed\n\n");
 		return;
 	}
 
@@ -2391,9 +2614,11 @@
 
 	if ((ms->mem_limit && (efi_memmap >= ms->mem_limit)) ||
             !readmem(PTOV(efi_memmap), KVADDR, memmap,
-	    ms->efi_memmap_size, "efi_mmap contents", RETURN_ON_ERROR)) {
-		error(WARNING, "cannot read efi_mmap: " 
-			"memory verification will not be performed\n");
+	    ms->efi_memmap_size, "efi_mmap contents", 
+	    QUIET|RETURN_ON_ERROR)) {
+		if (!XEN() || (XEN() && CRASHDEBUG(1)))
+			error(WARNING, "cannot read efi_mmap: " 
+			    "EFI memory verification will not be performed\n\n");
 		free(memmap);
 		return;
 	}
@@ -2605,6 +2830,8 @@
 ia64_post_init(void)
 {
 	struct machine_specific *ms;
+	struct gnu_request req;
+	ulong flag;
 
 	ms = &ia64_machine_specific;
 
@@ -2677,12 +2904,16 @@
 		}
 	}
 
-        if (symbol_exists("ia64_init_stack") && !ms->ia64_init_stack_size) 
-		ms->ia64_init_stack_size = get_array_length("ia64_init_stack", 
-			NULL, 0);
+        if (symbol_exists("ia64_init_stack") && !ms->ia64_init_stack_size) { 
+		get_symbol_type("ia64_init_stack", NULL, &req);
+		ms->ia64_init_stack_size = req.length;
+	}
 
 	if (DUMPFILE() && ia64_in_init_stack(SWITCH_STACK_ADDR(CURRENT_TASK())))
 		machdep->flags |= INIT;
+
+	if (DUMPFILE() && (flag = ia64_in_per_cpu_mca_stack()))
+		machdep->flags |= flag;
 }
 
 /*
@@ -3326,4 +3557,775 @@
         	(vaddr < (ulong)KERNEL_UNCACHED_BASE));
 }
 
+/* Generic abstraction to translate user or kernel virtual
+ * addresses to physical using a 4 level page table.
+ */
+static int
+ia64_vtop_4l_xen_wpt(ulong vaddr, physaddr_t *paddr, ulong *pgd, int verbose, int usr)
+{
+	error(FATAL, "ia64_vtop_4l_xen_wpt: TBD\n");
+	return FALSE;
+#ifdef TBD
+	ulong *page_dir;
+	ulong *page_upper;
+	ulong *page_middle;
+	ulong *page_table;
+	ulong pgd_pte;
+	ulong pud_pte;
+	ulong pmd_pte;
+	ulong pte;
+	ulong region, offset;
+
+
+	if (usr) {
+		region = VADDR_REGION(vaddr);
+		offset = (vaddr >> PGDIR_SHIFT) & ((PTRS_PER_PGD >> 3) - 1);
+		offset |= (region << (PAGESHIFT() - 6));
+		page_dir = pgd + offset;
+	} else {
+		if (!(pgd = (ulong *)vt->kernel_pgd[0]))
+			error(FATAL, "cannot determine kernel pgd pointer\n");
+		page_dir = pgd + ((vaddr >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1));
+	}
+
+	if (verbose) 
+		fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd);
+
+	FILL_PGD(PAGEBASE(pgd), KVADDR, PAGESIZE());
+	pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir));
+	
+        if (verbose) 
+                fprintf(fp, "   PGD: %lx => %lx\n", (ulong)page_dir, pgd_pte);
+
+        if (!(pgd_pte))
+		return FALSE;
+	
+	offset = (vaddr >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
+	page_upper = (ulong *)(PTOV(pgd_pte & _PFN_MASK)) + offset; 
+	
+	FILL_PUD(PAGEBASE(page_upper), KVADDR, PAGESIZE());
+	pud_pte = ULONG(machdep->pud + PAGEOFFSET(page_upper));
+        
+	if (verbose) 
+                fprintf(fp, "   PUD: %lx => %lx\n", (ulong)page_upper, pud_pte);
+        
+	if (!(pud_pte))
+		return FALSE;
+
+	offset = (vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
+	page_middle = (ulong *)(PTOV(pud_pte & _PFN_MASK)) + offset; 
+
+	FILL_PMD(PAGEBASE(page_middle), KVADDR, PAGESIZE());
+	pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle));
+
+        if (verbose)
+                fprintf(fp, "   PMD: %lx => %lx\n", (ulong)page_middle, pmd_pte);
+
+        if (!(pmd_pte))
+		return FALSE;
+
+        offset = (vaddr >> PAGESHIFT()) & (PTRS_PER_PTE - 1);
+        page_table = (ulong *)(PTOV(pmd_pte & _PFN_MASK)) + offset;
+
+	FILL_PTBL(PAGEBASE(page_table), KVADDR, PAGESIZE());
+	pte = ULONG(machdep->ptbl + PAGEOFFSET(page_table));
+
+        if (verbose)
+                fprintf(fp, "   PTE: %lx => %lx\n", (ulong)page_table, pte);
+
+        if (!(pte & (_PAGE_P))) {
+		if (usr)
+		  	*paddr = pte;
+		if (pte && verbose) {
+			fprintf(fp, "\n");
+			ia64_translate_pte(pte, 0, 0);
+		}
+		return FALSE;
+        }
+
+        *paddr = (pte & _PFN_MASK) + PAGEOFFSET(vaddr);
+
+        if (verbose) {
+                fprintf(fp, "  PAGE: %lx\n\n", PAGEBASE(*paddr));
+		ia64_translate_pte(pte, 0, 0);
+	}
+
+	return TRUE;
+#endif
+}
+
+/* Generic abstraction to translate user or kernel virtual
+ * addresses to physical using a 3 level page table.
+ */
+static int
+ia64_vtop_xen_wpt(ulong vaddr, physaddr_t *paddr, ulong *pgd, int verbose, int usr)
+{
+	error(FATAL, "ia64_vtop_xen_wpt: TBD\n");
+	return FALSE;
+#ifdef TBD
+	ulong *page_dir;
+	ulong *page_middle;
+	ulong *page_table;
+	ulong pgd_pte;
+	ulong pmd_pte;
+	ulong pte;
+	ulong region, offset;
+
+
+	if (usr) {
+		region = VADDR_REGION(vaddr);
+		offset = (vaddr >> PGDIR_SHIFT) & ((PTRS_PER_PGD >> 3) - 1);
+		offset |= (region << (PAGESHIFT() - 6));
+		page_dir = pgd + offset;
+	} else {
+		if (!(pgd = (ulong *)vt->kernel_pgd[0]))
+			error(FATAL, "cannot determine kernel pgd pointer\n");
+		page_dir = pgd + ((vaddr >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1));
+	}
+
+	if (verbose)
+		fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd);
+
+	FILL_PGD(PAGEBASE(pgd), KVADDR, PAGESIZE());
+	pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir));
+	
+        if (verbose) 
+                fprintf(fp, "   PGD: %lx => %lx\n", (ulong)page_dir, pgd_pte);
+
+        if (!(pgd_pte))
+		return FALSE;
+
+	offset = (vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
+	page_middle = (ulong *)(PTOV(pgd_pte & _PFN_MASK)) + offset; 
+
+	FILL_PMD(PAGEBASE(page_middle), KVADDR, PAGESIZE());
+	pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle));
+
+        if (verbose)
+                fprintf(fp, "   PMD: %lx => %lx\n", (ulong)page_middle, pmd_pte);
+
+        if (!(pmd_pte))
+		return FALSE;
+
+        offset = (vaddr >> PAGESHIFT()) & (PTRS_PER_PTE - 1);
+        page_table = (ulong *)(PTOV(pmd_pte & _PFN_MASK)) + offset;
+
+	FILL_PTBL(PAGEBASE(page_table), KVADDR, PAGESIZE());
+	pte = ULONG(machdep->ptbl + PAGEOFFSET(page_table));
+
+        if (verbose)
+                fprintf(fp, "   PTE: %lx => %lx\n", (ulong)page_table, pte);
+
+        if (!(pte & (_PAGE_P))) {
+		if (usr)
+		  	*paddr = pte;
+		if (pte && verbose) {
+			fprintf(fp, "\n");
+			ia64_translate_pte(pte, 0, 0);
+		}
+		return FALSE;
+        }
+
+        *paddr = (pte & _PFN_MASK) + PAGEOFFSET(vaddr);
+
+        if (verbose) {
+                fprintf(fp, "  PAGE: %lx\n\n", PAGEBASE(*paddr));
+		ia64_translate_pte(pte, 0, 0);
+	}
+
+	return TRUE;
+#endif
+}
+
+#include "netdump.h"
+
+/*
+ *  Determine the relocatable physical address base.
+ */
+static void
+ia64_calc_phys_start(void)
+{
+	FILE *iomem;
+	int i, found, errflag;
+	char buf[BUFSIZE];
+	char *p1;
+	ulong kernel_code_start;
+	struct vmcore_data *vd;
+	Elf64_Phdr *phdr;
+	ulong phys_start, text_start;
+
+	/*
+	 *  Default to 64MB.
+	 */
+	machdep->machspec->phys_start = DEFAULT_PHYS_START;
+
+	text_start = symbol_exists("_text") ? symbol_value("_text") : BADADDR;
+
+	if (ACTIVE()) {
+	        if ((iomem = fopen("/proc/iomem", "r")) == NULL)
+	                return;
+	
+		errflag = 1;
+	        while (fgets(buf, BUFSIZE, iomem)) {
+			if (strstr(buf, ": Kernel code")) {
+				clean_line(buf);
+				errflag = 0;
+				break;
+			}
+		}
+	        fclose(iomem);
+	
+		if (errflag)
+			return;
+	
+		if (!(p1 = strstr(buf, "-")))
+			return;
+		else
+			*p1 = NULLCHAR;
+	
+		errflag = 0;
+		kernel_code_start = htol(buf, RETURN_ON_ERROR|QUIET, &errflag);
+	        if (errflag)
+			return;
+	
+		machdep->machspec->phys_start = kernel_code_start;
+	
+		if (CRASHDEBUG(1)) {
+			if (text_start == BADADDR)
+				fprintf(fp, "_text: (unknown)  ");
+			else
+				fprintf(fp, "_text: %lx  ", text_start);
+			fprintf(fp, "Kernel code: %lx -> ", kernel_code_start);
+			fprintf(fp, "phys_start: %lx\n\n", 
+				machdep->machspec->phys_start);
+		}
+
+		return;
+	}
+
+	/*
+	 *  Get relocation value from whatever dumpfile format is being used.
+	 */
+
+        if (DISKDUMP_DUMPFILE()) {
+                if (diskdump_phys_base(&phys_start)) {
+                        machdep->machspec->phys_start = phys_start;
+			if (CRASHDEBUG(1))
+				fprintf(fp, 
+				    "compressed kdump: phys_start: %lx\n",
+					phys_start);
+		}
+                return;
+        } else if (LKCD_DUMPFILE()) {
+
+		if (lkcd_get_kernel_start(&phys_start)) {
+                        machdep->machspec->phys_start = phys_start;
+			if (CRASHDEBUG(1))
+				fprintf(fp,
+				    "LKCD dump: phys_start: %lx\n",
+					phys_start);
+		}
+	}
+
+	if ((vd = get_kdump_vmcore_data())) {
+		/*
+		 *  There should be at most one region 5 region, and it
+		 *  should be equal to "_text".  If not, take whatever
+		 *  region 5 address comes first and hope for the best.
+		 */
+                for (i = found = 0; i < vd->num_pt_load_segments; i++) {
+			phdr = vd->load64 + i;
+			if (phdr->p_vaddr == text_start) {
+				machdep->machspec->phys_start = phdr->p_paddr;
+				found++;
+				break;
+			}
+		}
+
+                for (i = 0; !found && (i < vd->num_pt_load_segments); i++) {
+			phdr = vd->load64 + i;
+			if (VADDR_REGION(phdr->p_vaddr) == KERNEL_VMALLOC_REGION) {
+				machdep->machspec->phys_start = phdr->p_paddr;
+				found++;
+				break;
+			}
+		}
+
+		if (found && CRASHDEBUG(1)) {
+			if (text_start == BADADDR)
+				fprintf(fp, "_text: (unknown)  ");
+			else
+				fprintf(fp, "_text: %lx  ", text_start);
+			fprintf(fp, "p_vaddr: %lx  p_paddr: %lx\n", 
+				phdr->p_vaddr, phdr->p_paddr);
+		}
+
+		return;
+	}
+}
+
+/*
+ *  From the xen vmcore, create an index of mfns for each page that makes
+ *  up the dom0 kernel's complete phys_to_machine_mapping[max_pfn] array.
+ */
+static int
+ia64_xen_kdump_p2m_create(struct xen_kdump_data *xkd)
+{
+	/*
+	 *  Temporarily read physical (machine) addresses from vmcore by
+	 *  going directly to read_netdump() instead of via read_kdump().
+	 */
+	pc->readmem = read_netdump;
+
+	if (CRASHDEBUG(1))
+		fprintf(fp, "ia64_xen_kdump_p2m_create: p2m_mfn: %lx\n", xkd->p2m_mfn);
+
+	if ((xkd->p2m_mfn_frame_list = (ulong *)malloc(PAGESIZE())) == NULL)
+		error(FATAL, "cannot malloc p2m_frame_list");
+
+	if (!readmem(PTOB(xkd->p2m_mfn), PHYSADDR, xkd->p2m_mfn_frame_list, PAGESIZE(), 
+	    "xen kdump p2m mfn page", RETURN_ON_ERROR))
+		error(FATAL, "cannot read xen kdump p2m mfn page\n");
+
+	xkd->p2m_frames = PAGESIZE()/sizeof(ulong);
+
+	pc->readmem = read_kdump;
+
+	return TRUE;
+}
+
+physaddr_t
+ia64_xen_kdump_p2m(struct xen_kdump_data *xkd, physaddr_t pseudo)
+{
+	ulong pgd_idx, pte_idx;
+	ulong pmd, pte;
+	physaddr_t paddr;
+
+	/*
+	 *  Temporarily read physical (machine) addresses from vmcore by
+	 *  going directly to read_netdump() instead of via read_kdump().
+	 */
+	pc->readmem = read_netdump;
+
+	xkd->accesses += 2;
+
+	pgd_idx = (pseudo >> PGDIR_SHIFT_3L) & (PTRS_PER_PGD - 1);
+	pmd = xkd->p2m_mfn_frame_list[pgd_idx] & _PFN_MASK;
+	if (!pmd) {
+		paddr = P2M_FAILURE;
+		goto out;
+	}
+
+	pmd += ((pseudo >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) * sizeof(ulong);
+	if (pmd != xkd->last_pmd_read) {
+		if (!readmem(pmd, PHYSADDR, &pte, sizeof(ulong), 
+		    "ia64_xen_kdump_p2m pmd", RETURN_ON_ERROR)) {
+			xkd->last_pmd_read = BADADDR;
+			xkd->last_mfn_read = BADADDR;
+			paddr = P2M_FAILURE;
+			goto out;
+		}
+		xkd->last_pmd_read = pmd;
+	} else {
+		pte = xkd->last_mfn_read;
+		xkd->cache_hits++;
+	}
+	pte = pte & _PFN_MASK;
+	if (!pte) {
+		paddr = P2M_FAILURE;
+		goto out;
+	}
+
+	if (pte != xkd->last_mfn_read) {
+		if (!readmem(pte, PHYSADDR, xkd->page, PAGESIZE(), 
+		    "ia64_xen_kdump_p2m pte page", RETURN_ON_ERROR)) {
+			xkd->last_pmd_read = BADADDR;
+			xkd->last_mfn_read = BADADDR;
+			paddr = P2M_FAILURE;
+			goto out;
+		}
+		xkd->last_mfn_read = pte;
+	} else
+		xkd->cache_hits++;
+
+	pte_idx = (pseudo >> PAGESHIFT()) & (PTRS_PER_PTE - 1);
+	paddr = *(((ulong *)xkd->page) + pte_idx);
+	if (!(paddr & _PAGE_P)) {
+		paddr = P2M_FAILURE;
+		goto out;
+	}
+	paddr = (paddr & _PFN_MASK) | PAGEOFFSET(pseudo);
+
+out:
+	pc->readmem = read_kdump;
+	return paddr;
+}
+
+#include "xendump.h"
+
+/*
+ *  Create an index of mfns for each page that makes up the
+ *  kernel's complete phys_to_machine_mapping[max_pfn] array.
+ */
+static int
+ia64_xendump_p2m_create(struct xendump_data *xd)
+{
+	if (!symbol_exists("phys_to_machine_mapping")) {
+		xd->flags |= XC_CORE_NO_P2M;
+		return TRUE;
+	}
+
+	error(FATAL, "ia64_xendump_p2m_create: TBD\n");
+
+	/* dummy calls for clean "make [wW]arn" */
+	ia64_debug_dump_page(NULL, NULL, NULL);
+	ia64_xendump_load_page(0, xd);
+	ia64_xendump_page_index(0, xd);
+	ia64_xendump_panic_task(xd);  /* externally called */
+	ia64_get_xendump_regs(xd, NULL, NULL, NULL);  /* externally called */
+
+	return FALSE;
+}
+
+static void
+ia64_debug_dump_page(FILE *ofp, char *page, char *name)
+{
+        int i;
+        ulong *up;
+
+        fprintf(ofp, "%s\n", name);
+
+        up = (ulong *)page;
+        for (i = 0; i < 1024; i++) {
+                fprintf(ofp, "%016lx: %016lx %016lx\n",
+                        (ulong)((i * 2) * sizeof(ulong)),
+                        *up, *(up+1));
+                up += 2;
+        }
+}
+
+/*
+ *  Find the page associate with the kvaddr, and read its contents
+ *  into the passed-in buffer.
+ */
+static char *
+ia64_xendump_load_page(ulong kvaddr, struct xendump_data *xd)
+{
+	error(FATAL, "ia64_xendump_load_page: TBD\n");
+
+	return NULL;
+}
+
+/*
+ *  Find the dumpfile page index associated with the kvaddr.
+ */
+static int
+ia64_xendump_page_index(ulong kvaddr, struct xendump_data *xd)
+{
+	error(FATAL, "ia64_xendump_page_index: TBD\n");
+
+	return 0;
+}
+
+static ulong
+ia64_xendump_panic_task(struct xendump_data *xd)
+{
+	if (CRASHDEBUG(1))
+		error(INFO, "ia64_xendump_panic_task: TBD\n");
+
+	return NO_TASK;
+}
+
+static void
+ia64_get_xendump_regs(struct xendump_data *xd, struct bt_info *bt, ulong *rip, ulong *rsp)
+{
+        machdep->get_stack_frame(bt, rip, rsp);
+
+	if (is_task_active(bt->task) &&
+            !(bt->flags & (BT_TEXT_SYMBOLS_ALL|BT_TEXT_SYMBOLS)) &&
+	    STREQ(closest_symbol(*rip), "schedule"))
+		error(INFO, 
+		    "xendump: switch_stack possibly not saved -- try \"bt -t\"\n");
+}
+
+/* for XEN Hypervisor analysis */
+
+static int
+ia64_is_kvaddr_hyper(ulong addr)
+{
+	return (addr >= HYPERVISOR_VIRT_START && addr < HYPERVISOR_VIRT_END);
+}
+
+static int
+ia64_kvtop_hyper(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose)
+{
+	ulong virt_percpu_start, phys_percpu_start;
+	ulong addr, dirp, entry;
+
+	if (!IS_KVADDR(kvaddr))
+		return FALSE;
+
+	if (PERCPU_VIRT_ADDR(kvaddr)) {
+		virt_percpu_start = symbol_value("__phys_per_cpu_start");
+		phys_percpu_start = virt_percpu_start - DIRECTMAP_VIRT_START;
+		*paddr = kvaddr - PERCPU_ADDR + phys_percpu_start;
+		return TRUE;
+	} else if (DIRECTMAP_VIRT_ADDR(kvaddr)) {
+		*paddr = kvaddr - DIRECTMAP_VIRT_START;
+		return TRUE;
+	} else if (!FRAME_TABLE_VIRT_ADDR(kvaddr)) {
+		return FALSE;
+	}
+
+	/* frametable virtual address */
+	addr = kvaddr - xhmachdep->frame_table;
+
+	dirp = symbol_value("frametable_pg_dir");
+	dirp += ((addr >> PGDIR_SHIFT_3L) & (PTRS_PER_PGD - 1)) * sizeof(ulong);
+	readmem(dirp, KVADDR, &entry, sizeof(ulong), 
+		"frametable_pg_dir", FAULT_ON_ERROR);
+
+	dirp = entry & _PFN_MASK;
+	if (!dirp)
+		return FALSE;
+	dirp += ((addr >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) * sizeof(ulong);
+	readmem(dirp, PHYSADDR, &entry, sizeof(ulong), 
+		"frametable pmd", FAULT_ON_ERROR);
+
+	dirp = entry & _PFN_MASK;
+	if (!dirp)
+		return FALSE;
+	dirp += ((addr >> PAGESHIFT()) & (PTRS_PER_PTE - 1)) * sizeof(ulong);
+	readmem(dirp, PHYSADDR, &entry, sizeof(ulong), 
+		"frametable pte", FAULT_ON_ERROR);
+
+	if (!(entry & _PAGE_P))
+		return FALSE;
+
+	*paddr = (entry & _PFN_MASK) + (kvaddr & (PAGESIZE() - 1));
+	return TRUE;
+}
+
+static void
+ia64_post_init_hyper(void)
+{
+	struct machine_specific *ms;
+	ulong frame_table;
+
+	ms = &ia64_machine_specific;
+
+	if (symbol_exists("unw_init_frame_info")) {
+		machdep->flags |= NEW_UNWIND;
+		if (MEMBER_EXISTS("unw_frame_info", "pt")) {
+			if (MEMBER_EXISTS("cpu_user_regs", "ar_csd")) {
+				machdep->flags |= NEW_UNW_V3;
+				ms->unwind_init = unwind_init_v3;
+				ms->unwind = unwind_v3;
+				ms->unwind_debug = unwind_debug_v3;
+				ms->dump_unwind_stats = dump_unwind_stats_v3;
+			} else {
+				machdep->flags |= NEW_UNW_V2;
+				ms->unwind_init = unwind_init_v2;
+				ms->unwind = unwind_v2;
+				ms->unwind_debug = unwind_debug_v2;
+				ms->dump_unwind_stats = dump_unwind_stats_v2;
+			}
+		} else {
+			machdep->flags |= NEW_UNW_V1;
+			ms->unwind_init = unwind_init_v1;
+			ms->unwind = unwind_v1;
+			ms->unwind_debug = unwind_debug_v1;
+			ms->dump_unwind_stats = dump_unwind_stats_v1;
+		}
+	} else {
+		machdep->flags |= OLD_UNWIND;
+		ms->unwind_init = ia64_old_unwind_init;
+		ms->unwind = ia64_old_unwind;
+	}
+	ms->unwind_init();
+
+	if (symbol_exists("frame_table")) {
+		frame_table = symbol_value("frame_table");
+		readmem(frame_table, KVADDR, &xhmachdep->frame_table, sizeof(ulong),
+			"frame_table virtual address", FAULT_ON_ERROR);
+	} else {
+		error(FATAL, "cannot find frame_table virtual address.");
+	}
+}
+
+int
+ia64_in_mca_stack_hyper(ulong addr, struct bt_info *bt)
+{
+	int plen, i;
+	ulong paddr, stackbase, stacktop;
+	ulong *__per_cpu_mca;
+	struct xen_hyper_vcpu_context *vcc;
+
+	vcc = xen_hyper_vcpu_to_vcpu_context(bt->task);
+	if (!vcc)
+		return 0;
+
+	if (!symbol_exists("__per_cpu_mca") ||
+	    !(plen = get_array_length("__per_cpu_mca", NULL, 0)) ||
+	    (plen < xht->pcpus))
+		return 0;
+
+	if (!machdep->kvtop(NULL, addr, &paddr, 0))
+		return 0;
+
+	__per_cpu_mca = (ulong *)GETBUF(sizeof(ulong) * plen);
+
+	if (!readmem(symbol_value("__per_cpu_mca"), KVADDR, __per_cpu_mca,
+	    sizeof(ulong) * plen, "__per_cpu_mca", RETURN_ON_ERROR|QUIET))
+		return 0;
+
+	if (CRASHDEBUG(1)) {
+		for (i = 0; i < plen; i++) {
+			fprintf(fp, "__per_cpu_mca[%d]: %lx\n", 
+		 		i, __per_cpu_mca[i]);
+		}
+	}
+
+	stackbase = __per_cpu_mca[vcc->processor];
+	stacktop = stackbase + (STACKSIZE() * 2);
+	FREEBUF(__per_cpu_mca);
+
+	if ((paddr >= stackbase) && (paddr < stacktop))
+		return 1;
+	else
+		return 0;
+}
+
+static void
+ia64_init_hyper(int when)
+{
+	struct syment *sp;
+
+        switch (when)
+        {
+	case SETUP_ENV:
+#if defined(PR_SET_FPEMU) && defined(PR_FPEMU_NOPRINT)
+		prctl(PR_SET_FPEMU, PR_FPEMU_NOPRINT, 0, 0, 0);
+#endif
+#if defined(PR_SET_UNALIGN) && defined(PR_UNALIGN_NOPRINT)
+		prctl(PR_SET_UNALIGN, PR_UNALIGN_NOPRINT, 0, 0, 0);
+#endif
+		break;
+
+        case PRE_SYMTAB:
+                machdep->verify_symbol = ia64_verify_symbol;
+		machdep->machspec = &ia64_machine_specific;
+		if (pc->flags & KERNEL_DEBUG_QUERY)
+			return;
+                machdep->pagesize = memory_page_size();
+                machdep->pageshift = ffs(machdep->pagesize) - 1;
+                machdep->pageoffset = machdep->pagesize - 1;
+                machdep->pagemask = ~(machdep->pageoffset);
+		switch (machdep->pagesize)
+		{
+		case 4096:
+			machdep->stacksize = (power(2, 3) * PAGESIZE());
+			break;
+		case 8192:
+			machdep->stacksize = (power(2, 2) * PAGESIZE());
+			break;
+		case 16384:
+			machdep->stacksize = (power(2, 1) * PAGESIZE());
+			break;
+		case 65536:
+			machdep->stacksize = (power(2, 0) * PAGESIZE());
+			break;
+		default:
+			machdep->stacksize = 32*1024;
+			break;
+		}
+                if ((machdep->pgd = (char *)malloc(PAGESIZE())) == NULL)
+                        error(FATAL, "cannot malloc pgd space.");
+		if ((machdep->pud = (char *)malloc(PAGESIZE())) == NULL)
+			error(FATAL, "cannot malloc pud space.");
+                if ((machdep->pmd = (char *)malloc(PAGESIZE())) == NULL)
+                        error(FATAL, "cannot malloc pmd space.");
+                if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL)
+                        error(FATAL, "cannot malloc ptbl space.");
+                machdep->last_pgd_read = 0;
+                machdep->last_pud_read = 0;
+                machdep->last_pmd_read = 0;
+                machdep->last_ptbl_read = 0;
+		machdep->verify_paddr = ia64_verify_paddr;
+		machdep->ptrs_per_pgd = PTRS_PER_PGD;
+                machdep->machspec->phys_start = UNKNOWN_PHYS_START;
+		/* ODA: if need make hyper version
+                if (machdep->cmdline_arg) 
+			parse_cmdline_arg(); */
+                break;     
+
+        case PRE_GDB:
+
+		if (pc->flags & KERNEL_DEBUG_QUERY)
+			return;
+		
+                machdep->kvbase = HYPERVISOR_VIRT_START;
+		machdep->identity_map_base = HYPERVISOR_VIRT_START;
+                machdep->is_kvaddr = ia64_is_kvaddr_hyper;
+                machdep->is_uvaddr = generic_is_uvaddr;
+                machdep->eframe_search = ia64_eframe_search;
+                machdep->back_trace = ia64_back_trace_cmd;
+                machdep->processor_speed = xen_hyper_ia64_processor_speed;
+                machdep->uvtop = ia64_uvtop;
+                machdep->kvtop = ia64_kvtop_hyper;
+		machdep->get_stack_frame = ia64_get_stack_frame;
+		machdep->get_stackbase = ia64_get_stackbase;
+		machdep->get_stacktop = ia64_get_stacktop;
+                machdep->translate_pte = ia64_translate_pte;
+                machdep->memory_size = xen_hyper_ia64_memory_size;
+                machdep->dis_filter = ia64_dis_filter;
+		machdep->cmd_mach = ia64_cmd_mach;
+		machdep->get_smp_cpus = xen_hyper_ia64_get_smp_cpus;
+		machdep->line_number_hooks = ia64_line_number_hooks;
+		machdep->value_to_symbol = generic_machdep_value_to_symbol;
+                machdep->init_kernel_pgd = NULL;
+
+		if ((sp = symbol_search("_stext"))) {
+			machdep->machspec->kernel_region = 
+				VADDR_REGION(sp->value);
+			machdep->machspec->kernel_start = sp->value;
+		} else {
+//			machdep->machspec->kernel_region = KERNEL_CACHED_REGION;
+//			machdep->machspec->kernel_start = KERNEL_CACHED_BASE;
+		}
+
+		/* machdep table for Xen Hypervisor */
+		xhmachdep->pcpu_init = xen_hyper_ia64_pcpu_init;
+                break;
+
+        case POST_GDB:
+		STRUCT_SIZE_INIT(switch_stack, "switch_stack");
+		MEMBER_OFFSET_INIT(thread_struct_fph, "thread_struct", "fph");
+		MEMBER_OFFSET_INIT(switch_stack_b0, "switch_stack", "b0");
+		MEMBER_OFFSET_INIT(switch_stack_ar_bspstore,  
+			"switch_stack", "ar_bspstore");
+		MEMBER_OFFSET_INIT(switch_stack_ar_pfs,  
+			"switch_stack", "ar_pfs");
+		MEMBER_OFFSET_INIT(switch_stack_ar_rnat, 
+			"switch_stack", "ar_rnat");
+		MEMBER_OFFSET_INIT(switch_stack_pr, 
+			"switch_stack", "pr");
+
+		XEN_HYPER_STRUCT_SIZE_INIT(cpuinfo_ia64, "cpuinfo_ia64");
+		XEN_HYPER_MEMBER_OFFSET_INIT(cpuinfo_ia64_proc_freq, "cpuinfo_ia64", "proc_freq");
+		XEN_HYPER_MEMBER_OFFSET_INIT(cpuinfo_ia64_vendor, "cpuinfo_ia64", "vendor");
+		if (symbol_exists("per_cpu__cpu_info")) {
+			xht->cpu_data_address = symbol_value("per_cpu__cpu_info");
+		}
+		/* kakuma Can this be calculated? */
+		if (!machdep->hz) {
+			machdep->hz = XEN_HYPER_HZ;
+		}
+                break;
+
+	case POST_INIT:
+		ia64_post_init_hyper();
+		break;
+	}
+}
 #endif
--- crash/alpha.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/alpha.c	2006-10-11 09:14:35.000000000 -0400
@@ -1,8 +1,8 @@
 /* alpha.c - core analysis suite
  *
  * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc.
- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson
- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006 David Anderson
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006 Red Hat, Inc. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -186,7 +186,8 @@
 				"irq_desc", NULL, 0);
         	else
                 	machdep->nr_irqs = 0;
-		machdep->hz = HZ;
+		if (!machdep->hz)
+			machdep->hz = HZ;
 		break;
 
 	case POST_INIT:
@@ -1858,8 +1859,6 @@
 	fprintf(fp, "              flags: %lx (", machdep->flags);
         if (machdep->flags & HWRESET)
                 fprintf(fp, "%sHWRESET", others++ ? "|" : "");
-        if (machdep->flags & SYSRQ)
-                fprintf(fp, "%sSYSRQ", others++ ? "|" : "");
         fprintf(fp, ")\n");
 	fprintf(fp, "             kvbase: %lx\n", machdep->kvbase);
 	fprintf(fp, "  identity_map_base: %lx\n", machdep->identity_map_base);
--- crash/Makefile.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/Makefile	2009-02-12 09:31:03.000000000 -0500
@@ -3,8 +3,8 @@
 # Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc.
 #       www.missioncriticallinux.com, info@missioncriticallinux.com
 #
-# Copyright (C) 2002, 2003, 2004, 2005 David Anderson
-# Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved.
+# Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 David Anderson
+# Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Red Hat, Inc. All rights reserved.
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
@@ -35,10 +35,12 @@
 #
 # GDB, GDB_FILES and GDB_OFILES will be configured automatically by configure 
 #
-GDB=gdb-6.1
-GDB_FILES=${GDB_6.1_FILES}
+GDB=
+GDB_FILES=
 GDB_OFILES=
 
+GDB_PATCH_FILES=gdb-6.1.patch
+
 #
 # Default installation directory
 #
@@ -60,34 +62,81 @@
 # (2) Or invoke make like so:
 #    make LDFLAGS=-static NAT_CLIBS="-lc -lresolv" GDBSERVER_LIBS="-lc -lresolv"
 
-GENERIC_HFILES=defs.h 
+GENERIC_HFILES=defs.h xen_hyper_defs.h
 MCORE_HFILES=va_server.h vas_crash.h
-REDHAT_HFILES=netdump.h diskdump.h
+REDHAT_HFILES=netdump.h diskdump.h xendump.h
 LKCD_DUMP_HFILES=lkcd_vmdump_v1.h lkcd_vmdump_v2_v3.h lkcd_dump_v5.h \
-        lkcd_dump_v7.h lkcd_dump_v8.h lkcd_fix_mem.h
+        lkcd_dump_v7.h lkcd_dump_v8.h
+LKCD_OBSOLETE_HFILES=lkcd_fix_mem.h
 LKCD_TRACE_HFILES=lkcd_x86_trace.h
 IBM_HFILES=ibm_common.h
-UNWIND_HFILES=unwind.h unwind_i.h rse.h
+UNWIND_HFILES=unwind.h unwind_i.h rse.h unwind_x86.h unwind_x86_64.h
 
 CFILES=main.c tools.c global_data.c memory.c filesys.c help.c task.c \
 	kernel.c test.c gdb_interface.c configure.c net.c dev.c \
-	alpha.c x86.c ppc.c ia64.c s390.c s390x.c ppc64.c x86_64.c \
+	alpha.c x86.c ppc.c ia64.c s390.c s390x.c s390dbf.c ppc64.c x86_64.c \
 	extensions.c remote.c va_server.c va_server_v1.c symbols.c cmdline.c \
 	lkcd_common.c lkcd_v1.c lkcd_v2_v3.c lkcd_v5.c lkcd_v7.c lkcd_v8.c\
 	lkcd_fix_mem.c s390_dump.c lkcd_x86_trace.c \
-	netdump.c diskdump.c unwind.c unwind_decoder.c
+	netdump.c diskdump.c xendump.c unwind.c unwind_decoder.c \
+	unwind_x86_32_64.c \
+ 	xen_hyper.c xen_hyper_command.c xen_hyper_global_data.c \
+	xen_hyper_dump_tables.c
 
 SOURCE_FILES=${CFILES} ${GENERIC_HFILES} ${MCORE_HFILES} \
 	${REDHAT_CFILES} ${REDHAT_HFILES} ${UNWIND_HFILES} \
-	${LKCD_DUMP_HFILES} ${LKCD_TRACE_HFILES} ${IBM_HFILES} 
+	${LKCD_DUMP_HFILES} ${LKCD_TRACE_HFILES} ${LKCD_OBSOLETE_HFILES}\
+	${IBM_HFILES} 
 
 OBJECT_FILES=main.o tools.o global_data.o memory.o filesys.o help.o task.o \
 	build_data.o kernel.o test.o gdb_interface.o net.o dev.o \
-	alpha.o x86.o ppc.o ia64.o s390.o s390x.o ppc64.o x86_64.o \
+	alpha.o x86.o ppc.o ia64.o s390.o s390x.o s390dbf.o ppc64.o x86_64.o \
 	extensions.o remote.o va_server.o va_server_v1.o symbols.o cmdline.o \
 	lkcd_common.o lkcd_v1.o lkcd_v2_v3.o lkcd_v5.o lkcd_v7.o lkcd_v8.o \
-	lkcd_fix_mem.o s390_dump.o netdump.o diskdump.o \
-	lkcd_x86_trace.o unwind_v1.o unwind_v2.o unwind_v3.o
+	lkcd_fix_mem.o s390_dump.o netdump.o diskdump.o xendump.o \
+	lkcd_x86_trace.o unwind_v1.o unwind_v2.o unwind_v3.o \
+	unwind_x86_32_64.o \
+ 	xen_hyper.o xen_hyper_command.o xen_hyper_global_data.o \
+	xen_hyper_dump_tables.o
+
+# These are the current set of crash extensions sources.  They are not built
+# by default unless the third command line of the "all:" stanza is uncommented.
+# Alternatively, they can be built by entering "make extensions" from this
+# directory.
+
+EXTENSIONS=extensions
+EXTENSION_SOURCE_FILES=${EXTENSIONS}/Makefile ${EXTENSIONS}/echo.c ${EXTENSIONS}/dminfo.c \
+        ${EXTENSIONS}/libsial/Makefile \
+        ${EXTENSIONS}/libsial/mkbaseop.c \
+        ${EXTENSIONS}/libsial/README \
+        ${EXTENSIONS}/libsial/README.sial \
+        ${EXTENSIONS}/libsial/sial_alloc.c \
+        ${EXTENSIONS}/libsial/sial_api.c \
+        ${EXTENSIONS}/libsial/sial_api.h \
+        ${EXTENSIONS}/libsial/sial_builtin.c \
+        ${EXTENSIONS}/libsial/sial_case.c \
+        ${EXTENSIONS}/libsial/sial_define.c \
+        ${EXTENSIONS}/libsial/sial_func.c \
+        ${EXTENSIONS}/libsial/sial.h \
+        ${EXTENSIONS}/libsial/sial_input.c \
+        ${EXTENSIONS}/libsial/sial.l \
+        ${EXTENSIONS}/libsial/sial-lsed \
+        ${EXTENSIONS}/libsial/sial_member.c \
+        ${EXTENSIONS}/libsial/sial_node.c \
+        ${EXTENSIONS}/libsial/sial_num.c \
+        ${EXTENSIONS}/libsial/sial_op.c \
+        ${EXTENSIONS}/libsial/sialpp.l \
+        ${EXTENSIONS}/libsial/sialpp-lsed \
+        ${EXTENSIONS}/libsial/sialpp.y \
+        ${EXTENSIONS}/libsial/sial_print.c \
+        ${EXTENSIONS}/libsial/sial_stat.c \
+        ${EXTENSIONS}/libsial/sial_str.c \
+        ${EXTENSIONS}/libsial/sial_type.c \
+        ${EXTENSIONS}/libsial/sial_util.c \
+        ${EXTENSIONS}/libsial/sial_var.c \
+        ${EXTENSIONS}/libsial/sial.y \
+        ${EXTENSIONS}/sial.c \
+        ${EXTENSIONS}/sial.mk
 
 DAEMON_OBJECT_FILES=remote_daemon.o va_server.o va_server_v1.o \
 	lkcd_common.o lkcd_v1.o lkcd_v2_v3.o lkcd_v5.o lkcd_v7.o lkcd_v8.o \
@@ -150,10 +199,11 @@
           ${GDB}/gdb/main.c ${GDB}/gdb/symtab.c ${GDB}/gdb/target.c \
           ${GDB}/gdb/symfile.c ${GDB}/gdb/elfread.c \
           ${GDB}/gdb/ui-file.c ${GDB}/gdb/utils.c ${GDB}/gdb/dwarf2read.c \
-	  ${GDB}/include/obstack.h
+          ${GDB}/include/obstack.h ${GDB}/gdb/ppc-linux-tdep.c
 GDB_6.1_OFILES=${GDB}/gdb/main.o ${GDB}/gdb/symtab.o \
           ${GDB}/gdb/target.o ${GDB}/gdb/symfile.o ${GDB}/gdb/elfread.o \
-          ${GDB}/gdb/ui-file.o ${GDB}/gdb/utils.o ${GDB}/gdb/dwarf2read.o
+          ${GDB}/gdb/ui-file.o ${GDB}/gdb/utils.o ${GDB}/gdb/dwarf2read.o \
+          ${GDB}/gdb/ppc-linux-tdep.o
 
 # 
 # GDB_FLAGS is passed up from the gdb Makefile.
@@ -175,7 +225,8 @@
 
 CFLAGS=-g -D${TARGET} ${TARGET_CFLAGS}
 
-TAR_FILES=${SOURCE_FILES} Makefile COPYING README .rh_rpm_package crash.8
+TAR_FILES=${SOURCE_FILES} Makefile COPYING README .rh_rpm_package crash.8 \
+	${EXTENSION_SOURCE_FILES}
 CSCOPE_FILES=${SOURCE_FILES}
 
 READLINE_DIRECTORY=./${GDB}/readline
@@ -184,9 +235,13 @@
 
 REDHATFLAGS=-DREDHAT
 
+# To build the extensions library by default, uncomment the third command
+# line below.  Otherwise they can be built by entering "make extensions".
+
 all: make_configure
 	@./configure -p "RPMPKG=${RPMPKG}" -b
 	@make --no-print-directory gdb_merge
+#	@make --no-print-directory extensions
 
 gdb_merge: force
 	@if [ ! -f ${GDB}/README ]; then \
@@ -206,6 +261,11 @@
 	@for FILE in ${GDB_FILES}; do\
 	  echo $$FILE >> gdb.files; done
 	@tar --exclude-from gdb.files -xvzmf ${GDB}.tar.gz
+	@make --no-print-directory gdb_patch
+
+gdb_patch:
+	if [ -f ${GDB}.patch ] && [ -s ${GDB}.patch ]; then \
+		patch -p0 < ${GDB}.patch; fi
 
 library: make_build_data ${OBJECT_FILES}
 	ar -rs ${PROGRAM}lib.a ${OBJECT_FILES}
@@ -223,6 +283,7 @@
 
 clean:
 	rm -f ${OBJECT_FILES} ${DAEMON_OBJECT_FILES} ${PROGRAM} ${PROGRAM}lib.a ${GDB_OFILES}
+	@(cd extensions; make --no-print-directory -i clean)
 
 make_build_data: force
 	cc -c ${CFLAGS} build_data.c ${WARNING_OPTIONS} ${WARNING_ERROR}
@@ -318,7 +379,7 @@
 remote_daemon.o: ${GENERIC_HFILES} remote.c
 	cc -c ${CFLAGS} -DDAEMON remote.c -o remote_daemon.o ${WARNING_OPTIONS} ${WARNING_ERROR}
 
-x86.o: ${GENERIC_HFILES} x86.c
+x86.o: ${GENERIC_HFILES} ${REDHAT_HFILES} x86.c
 	cc -c ${CFLAGS} -DMCLX x86.c ${WARNING_OPTIONS} ${WARNING_ERROR}
 
 alpha.o: ${GENERIC_HFILES} alpha.c
@@ -327,13 +388,13 @@
 ppc.o: ${GENERIC_HFILES} ppc.c
 	cc -c ${CFLAGS} ppc.c ${WARNING_OPTIONS} ${WARNING_ERROR}
 
-ia64.o: ${GENERIC_HFILES} ia64.c
+ia64.o: ${GENERIC_HFILES} ${REDHAT_HFILES} ia64.c
 	cc -c ${CFLAGS} ia64.c ${WARNING_OPTIONS} ${WARNING_ERROR}
 
 ppc64.o: ${GENERIC_HFILES} ppc64.c
 	cc -c ${CFLAGS} ppc64.c ${WARNING_OPTIONS} ${WARNING_ERROR}
 
-x86_64.o: ${GENERIC_HFILES} x86_64.c
+x86_64.o: ${GENERIC_HFILES} ${REDHAT_HFILES} x86_64.c
 	cc -c ${CFLAGS} x86_64.c ${WARNING_OPTIONS} ${WARNING_ERROR}
 
 s390.o: ${GENERIC_HFILES} ${IBM_HFILES} s390.c
@@ -342,6 +403,9 @@
 s390x.o: ${GENERIC_HFILES} ${IBM_HFILES} s390x.c
 	cc -c ${CFLAGS} s390x.c ${WARNING_OPTIONS} ${WARNING_ERROR}
 
+s390dbf.o: ${GENERIC_HFILES} ${IBM_HFILES} s390dbf.c
+	cc -c ${CFLAGS} s390dbf.c ${WARNING_OPTIONS} ${WARNING_ERROR}
+
 s390_dump.o: ${GENERIC_HFILES} ${IBM_HFILES} s390_dump.c
 	cc -c ${CFLAGS} s390_dump.c ${WARNING_OPTIONS} ${WARNING_ERROR}
 
@@ -353,12 +417,18 @@
 diskdump.o: ${GENERIC_HFILES} ${REDHAT_HFILES} diskdump.c
 	cc -c ${CFLAGS} diskdump.c ${WARNING_OPTIONS} ${WARNING_ERROR}
 
+xendump.o: ${GENERIC_HFILES} ${REDHAT_HFILES} xendump.c
+	cc -c ${CFLAGS} xendump.c ${WARNING_OPTIONS} ${WARNING_ERROR}
+
 extensions.o: ${GENERIC_HFILES} extensions.c
 	cc -c ${CFLAGS} extensions.c ${WARNING_OPTIONS} ${WARNING_ERROR}
 
 lkcd_x86_trace.o: ${GENERIC_HFILES} ${LKCD_TRACE_HFILES} lkcd_x86_trace.c 
 	cc -c ${CFLAGS} -DREDHAT lkcd_x86_trace.c ${WARNING_OPTIONS} ${WARNING_ERROR}
 
+unwind_x86_32_64.o: ${GENERIC_HFILES} ${UNWIND_HFILES} unwind_x86_32_64.c
+	cc -c ${CFLAGS} unwind_x86_32_64.c -o unwind_x86_32_64.o ${WARNING_OPTIONS} ${WARNING_ERROR}
+
 unwind_v1.o: ${GENERIC_HFILES} ${UNWIND_HFILES} unwind.c unwind_decoder.c
 	cc -c ${CFLAGS} -DREDHAT -DUNWIND_V1 unwind.c -o unwind_v1.o ${WARNING_OPTIONS} ${WARNING_ERROR}
 
@@ -369,7 +439,19 @@
 	cc -c ${CFLAGS} -DREDHAT -DUNWIND_V3 unwind.c -o unwind_v3.o ${WARNING_OPTIONS} ${WARNING_ERROR}
 
 lkcd_fix_mem.o: ${GENERIC_HFILES} ${LKCD_HFILES} lkcd_fix_mem.c
-	cc -c ${CFLAGS} lkcd_fix_mem.c ${WARNING_OPTIONS} ${WARNING_ERROR}
+	cc -c ${CFLAGS} -DMCLX lkcd_fix_mem.c ${WARNING_OPTIONS} ${WARNING_ERROR}
+
+xen_hyper.o: ${GENERIC_HFILES} xen_hyper.c
+	cc -c ${CFLAGS} xen_hyper.c ${WARNING_OPTIONS} ${WARNING_ERROR}
+
+xen_hyper_command.o: ${GENERIC_HFILES} xen_hyper_command.c
+	cc -c ${CFLAGS} xen_hyper_command.c ${WARNING_OPTIONS} ${WARNING_ERROR}
+
+xen_hyper_global_data.o: ${GENERIC_HFILES} xen_hyper_global_data.c
+	cc -c ${CFLAGS} xen_hyper_global_data.c ${WARNING_OPTIONS} ${WARNING_ERROR}
+
+xen_hyper_dump_tables.o: ${GENERIC_HFILES} xen_hyper_dump_tables.c
+	cc -c ${CFLAGS} xen_hyper_dump_tables.c ${WARNING_OPTIONS} ${WARNING_ERROR}
 
 ${PROGRAM}: force
 	@make --no-print-directory all
@@ -393,13 +475,13 @@
 
 gdb_files: make_configure
 	@./configure -q -b
-	@echo ${GDB_FILES}
+	@echo ${GDB_FILES} ${GDB_PATCH_FILES}
 
 show_files:
 	@if [ -f ${PROGRAM}  ]; then \
-		./${PROGRAM} --no_crashrc -h README > README; fi
-	@echo ${SOURCE_FILES} Makefile ${GDB_FILES} COPYING README \
-	.rh_rpm_package crash.8
+		./${PROGRAM} --no_scroll --no_crashrc -h README > README; echo $?; fi
+	@echo ${SOURCE_FILES} Makefile ${GDB_FILES} ${GDB_PATCH_FILES} COPYING README \
+	.rh_rpm_package crash.8 ${EXTENSION_SOURCE_FILES}
 
 ctags:
 	ctags ${SOURCE_FILES}
@@ -410,8 +492,8 @@
 
 do_tar:
 	@if [ -f ${PROGRAM}  ]; then \
-		./${PROGRAM} --no_crashrc -h README > README; fi
-	tar cvzf ${PROGRAM}.tar.gz ${TAR_FILES} ${GDB_FILES}
+		./${PROGRAM} --no_scroll --no_crashrc -h README > README; fi
+	tar cvzf ${PROGRAM}.tar.gz ${TAR_FILES} ${GDB_FILES} ${GDB_PATCH_FILES}
 	@echo; ls -l ${PROGRAM}.tar.gz
 
 # To create a base tar file for Red Hat RPM packaging, pass the base RPM
@@ -421,12 +503,12 @@
 # spec file will have its own release number, which will in turn get passed 
 # to the "all" target upon the initial build.
 
-RELEASE=4.0
+RELEASE=
 
 release: make_configure
 	@if [ "`id --user`" != "0" ]; then \
 		echo "make release: must be super-user"; exit 1; fi
-	@./configure -p "RPMPKG=${RPMPKG}" -u -g
+	@./configure -P "RPMPKG=${RPMPKG}" -u -g
 	@make --no-print-directory release_configure
 	@echo 
 	@echo "cvs tag this release if necessary"
@@ -446,10 +528,10 @@
 	@rm -f ${PROGRAM}-${RELEASE}.tar.gz 
 	@rm -f ${PROGRAM}-${RELEASE}.src.rpm
 	@chown root ./RELDIR/${PROGRAM}-${RELEASE}
-	@tar cf - ${SOURCE_FILES} Makefile ${GDB_FILES} COPYING \
-	.rh_rpm_package crash.8 | (cd ./RELDIR/${PROGRAM}-${RELEASE}; tar xf -)
+	@tar cf - ${SOURCE_FILES} Makefile ${GDB_FILES} ${GDB_PATCH_FILES} COPYING \
+	.rh_rpm_package crash.8 ${EXTENSION_SOURCE_FILES} | (cd ./RELDIR/${PROGRAM}-${RELEASE}; tar xf -)
 	@cp ${GDB}.tar.gz ./RELDIR/${PROGRAM}-${RELEASE}
-	@./${PROGRAM} --no_crashrc -h README > ./RELDIR/${PROGRAM}-${RELEASE}/README
+	@./${PROGRAM} --no_scroll --no_crashrc -h README > ./RELDIR/${PROGRAM}-${RELEASE}/README
 	@(cd ./RELDIR; find . -exec chown root {} ";")
 	@(cd ./RELDIR; find . -exec chgrp root {} ";")
 	@(cd ./RELDIR; find . -exec touch {} ";")
@@ -464,7 +546,7 @@
 	  cp ${PROGRAM}-${RELEASE}.tar.gz /usr/src/redhat/SOURCES; \
 	  /usr/bin/rpmbuild -bs ${PROGRAM}.spec > /dev/null; \
 	  rm -f /usr/src/redhat/SOURCES/${PROGRAM}-${RELEASE}.tar.gz; \
-	  cp /usr/src/redhat/SRPMS/${PROGRAM}-${RELEASE}.src.rpm . ; \
+	  mv /usr/src/redhat/SRPMS/${PROGRAM}-${RELEASE}.src.rpm . ; \
 	  ls -l ${PROGRAM}-${RELEASE}.src.rpm; \
 	exit 0; fi
 
@@ -488,3 +570,10 @@
 
 dis:
 	objdump --disassemble --line-numbers ${PROGRAM} > ${PROGRAM}.dis
+
+extensions: make_configure
+	@./configure -q -b
+	@make --no-print-directory do_extensions
+
+do_extensions:
+	@(cd extensions; make -i TARGET=$(TARGET) TARGET_CFLAGS=$(TARGET_CFLAGS))
--- crash/extensions/sial.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/extensions/sial.c	2008-10-28 14:53:10.000000000 -0400
@@ -0,0 +1,1021 @@
+/*
+ * $Id$
+ *
+ * This file is part of lcrash, an analysis tool for Linux memory dumps.
+ *
+ * Created by Silicon Graphics, Inc.
+ * Contributions by IBM, and others
+ *
+ * Copyright (C) 1999 - 2005 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2001, 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version. See the file COPYING for more
+ * information.
+ */
+
+#include "gdb-6.1/gdb/defs.h"
+#include "target.h"
+#include "symtab.h"
+#include "gdbtypes.h"
+#include "gdbcore.h"
+#include "frame.h"
+#include "value.h"
+#include "symfile.h"
+#include "objfiles.h"
+#include "gdbcmd.h"
+#include "call-cmds.h"
+#include "gdb_regex.h"
+#include "expression.h"
+#include "language.h"
+#include "demangle.h"
+#include "inferior.h"
+#include "linespec.h"
+#include "source.h"
+#include "filenames.h"		/* for FILENAME_CMP */
+#include "objc-lang.h"
+
+#include "hashtab.h"
+
+#include "gdb_obstack.h"
+#include "block.h"
+#include "dictionary.h"
+
+#include <sys/types.h>
+#include <fcntl.h>
+#include "gdb_string.h"
+#include "gdb_stat.h"
+#include <ctype.h>
+#include "cp-abi.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sial_api.h>
+
+/////////////////////////////////////////////////////////////////////////
+// some stuff from crash's defs.h, file which cannot be included here.
+// Hate to do this but this is a quick port. 
+// If anyone cares to work on the include and defs structure to make
+// this work cleanly...
+//
+/*
+ *  Global data (global_data.c) 
+ */
+extern char *args[];      
+extern int argcnt;            
+extern int argerrs;
+#define SYNOPSIS      (0x1)
+#define COMPLETE_HELP (0x2)
+#define PIPE_TO_LESS  (0x4)
+#define KVADDR (0x1)
+#define QUIET (0x4)
+
+typedef void (*cmd_func_t)(void);
+
+struct command_table_entry {               /* one for each command in menu */
+	char *name;
+	cmd_func_t func;
+	char **help_data;
+	ulong flags;
+};
+extern FILE *fp; 
+extern char *crash_global_cmd();
+
+//
+/////////////////////////////////////////////////////////////////////////
+/*
+	This is the glue between the sial interpreter and crash.
+*/
+
+static int
+apigetmem(ull iaddr, void *p, int nbytes)
+{
+	return readmem(iaddr, KVADDR, p, nbytes, NULL, QUIET);
+}
+
+// Since crash is target dependant (build for the 
+static uint8_t apigetuint8(void* ptr)
+{
+uint8_t val;
+    if(!readmem((unsigned long)ptr, KVADDR, (char*)&val, sizeof val, NULL, QUIET)) return (uint8_t)-1;
+    return val;
+}
+
+static uint16_t apigetuint16(void* ptr)
+{
+uint16_t val;
+    if(!readmem((unsigned long)ptr, KVADDR, (char*)&val, sizeof val, NULL, QUIET)) return (uint16_t)-1;
+    return val;
+}
+
+static uint32_t apigetuint32(void* ptr)
+{
+uint32_t val;
+    if(!readmem((unsigned long)ptr, KVADDR, (char*)&val, sizeof val, NULL, QUIET)) return (uint32_t)-1;
+    return val;
+}
+
+static uint64_t apigetuint64(void* ptr)
+{
+uint64_t val;
+    if(!readmem((unsigned long)ptr, KVADDR, (char*)&val, sizeof val, NULL, QUIET)) return (uint64_t)-1;
+    return val;
+}
+
+static int
+apiputmem(ull iaddr, void *p, int nbytes)
+{
+	return 1;
+}
+
+/* extract a complex type (struct, union and enum) */
+static int
+apigetctype(int ctype, char *name, TYPE_S *tout)
+{
+    struct symbol *sym;
+    struct type *type;
+    int v=0;
+    
+    sial_dbg_named(DBG_TYPE, name, 2, "Looking for type %d name [%s] in struct domain...", ctype, name);
+    sym = lookup_symbol(name, 0, STRUCT_DOMAIN, 0, (struct symtab **) NULL);
+    if(!sym) {
+            sial_dbg_named(DBG_TYPE, name, 2, "Not found.\nLooking for type %d name [%s] in var domain...", ctype, name);
+            sym = lookup_symbol(name, 0, VAR_DOMAIN, 0, (struct symtab **) NULL);
+            if(sym) {
+                sial_dbg_named(DBG_TYPE, name, 2, "found class=%d\n", sym->aclass);
+                if(sym->aclass == LOC_TYPEDEF) v=1;
+            }
+    }
+        
+    if (sym) {
+        type=sym->type;
+        if(sial_is_typedef(ctype) && v) goto match;
+        switch(TYPE_CODE(type)) {
+            case TYPE_CODE_TYPEDEF: case TYPE_CODE_INT: 
+                                    if(sial_is_typedef(ctype))  goto match; break;
+            case TYPE_CODE_ENUM:    if(sial_is_enum(ctype))     goto match; break;
+            case TYPE_CODE_STRUCT:  if(sial_is_struct(ctype))   goto match; break;
+            case TYPE_CODE_UNION:   if(sial_is_union(ctype))    goto match; break;
+        }
+        sial_dbg_named(DBG_TYPE, name, 2, "Found but no match.\n");
+    }
+    else sial_dbg_named(DBG_TYPE, name, 2, "Not Found.\n");
+
+    return 0;
+
+match:
+    sial_dbg_named(DBG_TYPE, name, 2, "Found.\n");
+    /* populate */
+    sial_type_settype(tout, ctype);
+    sial_type_setsize(tout, TYPE_LENGTH(type));
+    sial_type_setidx(tout, (ull)(unsigned long)type);
+    sial_pushref(tout, 0);
+    return 1;
+}
+
+/* set idx value to actual array indexes from specified size */
+static void
+sial_setupidx(TYPE_S*t, int ref, int nidx, int *idxlst)
+{
+        /* put the idxlst in index size format */
+        if(nidx) {
+
+                int i;
+
+                for(i=0;i<nidx-1;i++) {
+			/* kludge for array dimensions of [1] */
+			if (idxlst[i+1] == 0) {
+				idxlst[i+1] = 1;
+			}
+			idxlst[i]=idxlst[i]/idxlst[i+1];
+		}
+
+                /* divide by element size for last element bound */
+                if(ref) idxlst[i] /= sial_defbsize();
+                else idxlst[i] /= sial_type_getsize(t);
+                sial_type_setidxlst(t, idxlst);
+        }
+}
+/*
+	This function needs to drill down a typedef and
+	return the corresponding type.
+	If the typedef is from a basetype sial_parsetype() will be
+	called back to build the type 
+*/
+static char *
+drilldowntype(struct type *type, TYPE_S *t)
+{
+char *tstr=0;
+int fctflg=0, ref=0;
+int *idxlst=0;
+int nidx=0;
+
+	while(type)
+	{
+            check_typedef(type);
+
+            // check out for stub types and pull in the definition instead
+            if(TYPE_STUB(type) && TYPE_TAG_NAME(type)) {
+
+                struct symbol *sym=lookup_symbol(TYPE_TAG_NAME(type), 0, STRUCT_DOMAIN, 0, (struct symtab **) NULL);
+                if(sym) {
+                    type=sym->type;
+                } 
+            }
+
+            switch(TYPE_CODE(type)) {
+
+		/* typedef inserts a level of reference to the 1'dactual type */
+		case TYPE_CODE_PTR:
+	
+			ref++;
+                        type=TYPE_TARGET_TYPE(type);
+       			/* this could be a void*, in which case the drill down stops here */
+			if(!type) {
+
+				/* make it a char* */
+				sial_parsetype("char", t, ref);
+				return 0;
+
+			}
+		break;
+
+		/* handle pointer to functions */
+		case TYPE_CODE_FUNC:
+
+			fctflg=1;
+                        type=TYPE_TARGET_TYPE(type);
+		break;
+
+		/* Is this an array ? if so then just skip this type info and
+		   we only need information on the elements themselves */
+		case TYPE_CODE_ARRAY:
+                        if(!idxlst) idxlst=sial_calloc(sizeof(int)*(MAXIDX+1));
+                        if(nidx >= MAXIDX) sial_error("Too many indexes! max=%d\n", MAXIDX);
+                        if (TYPE_LENGTH (type) > 0 && TYPE_LENGTH (TYPE_TARGET_TYPE (type)) > 0)
+	                {
+                            idxlst[nidx++]=TYPE_LENGTH (type) / TYPE_LENGTH (check_typedef(TYPE_TARGET_TYPE (type)));
+                        }
+                        type=TYPE_TARGET_TYPE(type);
+		break;
+
+		/* typedef points to a typedef itself */
+		case TYPE_CODE_TYPEDEF:
+                        type=TYPE_TARGET_TYPE(type);
+		break;
+
+		case TYPE_CODE_INT:
+
+			sial_parsetype(tstr=TYPE_NAME(type), t, 0);
+			type=0;
+		break;
+
+		case TYPE_CODE_UNION: 
+			sial_type_mkunion(t);
+			goto label;
+
+		case TYPE_CODE_ENUM:
+			sial_type_mkenum(t);
+			goto label;
+
+		case TYPE_CODE_STRUCT:
+		{
+			sial_type_mkstruct(t);
+
+label:
+			sial_type_setsize(t, TYPE_LENGTH(type));
+			sial_type_setidx(t, (ull)(unsigned long)type);
+			tstr=TYPE_TAG_NAME(type);
+                        type=0;
+		}
+		break;
+
+		/* we don;t have all the info about it */
+		case TYPE_CODE_VOID:
+			sial_parsetype("int", t, 0);
+                        type=0;
+		break;
+
+
+		default: 
+			sial_error("Oops drilldowntype");
+		break;
+		}
+
+
+	}
+	sial_setupidx(t, ref, nidx, idxlst);
+	if(fctflg) sial_type_setfct(t, 1);
+	sial_pushref(t, ref+(nidx?1:0));
+	if(tstr) return sial_strdup(tstr);
+	return sial_strdup("");
+}
+
+static char *
+apigetrtype(ull idx, TYPE_S *t)
+{
+	return drilldowntype((struct type*)(unsigned long)(idx), t);
+}
+
+/*
+   	Return the name of a symbol at an address (if any)
+*/
+static char*
+apifindsym(char *p)
+{
+    return NULL;
+}
+
+
+/* 
+	Get the type, size and position information for a member of a structure.
+*/
+static char*
+apimember(char *mname,  ull tnum, TYPE_S *tm, MEMBER_S *m, ull *lnum)
+{
+struct type *type=(struct type*)(unsigned long)tnum;
+int midx;
+#define LASTNUM (*lnum)
+
+	/* if we're being asked the next member in a getfirst/getnext sequence */
+	if(mname && !mname[0] && LASTNUM) {
+
+		midx = LASTNUM;
+
+	} else {
+
+		if (TYPE_CODE(type) == TYPE_CODE_TYPEDEF) {
+			return 0;
+		}
+		if ((TYPE_CODE(type) != TYPE_CODE_STRUCT) && (TYPE_CODE(type) != TYPE_CODE_UNION)) {
+			return 0;
+		}
+                midx=0;
+	}
+	while(midx < TYPE_NFIELDS(type)) {
+        
+		if (!mname || !mname[0] || !strcmp(mname, TYPE_FIELD_NAME(type, midx))) {
+
+                        check_typedef(TYPE_FIELD_TYPE(type, midx));
+			sial_member_soffset(m, TYPE_FIELD_BITPOS(type, midx)/8);
+			sial_member_ssize(m, TYPE_FIELD_TYPE(type, midx)->length);
+			sial_member_snbits(m, TYPE_FIELD_BITSIZE(type, midx));
+			sial_member_sfbit(m, TYPE_FIELD_BITPOS(type, midx)%8);
+			sial_member_sname(m, TYPE_FIELD_NAME(type, midx));
+			LASTNUM=midx+1;
+			return drilldowntype(TYPE_FIELD_TYPE(type, midx), tm);
+		}
+		midx++;
+	}
+	return 0;
+}
+
+/*
+	This function gets the proper allignment value for a type.
+*/
+static int
+apialignment(ull idx)
+{
+struct type *type=(struct type *)(unsigned long)idx;
+
+    while(1)
+    {
+	switch(TYPE_CODE(type)) {
+
+	    case TYPE_CODE_ARRAY: case TYPE_CODE_TYPEDEF:
+		    type=TYPE_TARGET_TYPE(type);
+	    break;
+
+	    case TYPE_CODE_STRUCT:
+	    case TYPE_CODE_UNION:
+	    {
+		int max=0, cur;
+		int midx=0;
+
+		while(midx < TYPE_NFIELDS(type)) {
+                    cur=apialignment((ull)(unsigned long)TYPE_FIELD_TYPE(type, midx));
+	            if(cur > max) max=cur;
+	            midx++;
+		}
+		return max;
+            }
+            
+
+	    case TYPE_CODE_PTR:
+	    case TYPE_CODE_ENUM:
+	    case TYPE_CODE_INT:
+
+		    return TYPE_LENGTH (type);
+
+	    default:
+
+		    sial_error("Oops apialignment");
+	}
+    }
+}
+
+/* get the value of a symbol */
+static int
+apigetval(char *name, ull *val)
+{
+    if (symbol_exists(name)) {
+        *val=symbol_value(name);
+        return 1;
+    }
+    return 0;
+}
+
+/*
+	Get the list of enum symbols.
+*/
+ENUM_S*
+apigetenum(char *name)
+{
+    struct symbol *sym;
+
+    sym = lookup_symbol(name, 0, STRUCT_DOMAIN, 0, (struct symtab **) NULL);
+    if (sym && TYPE_CODE(sym->type)==TYPE_CODE_ENUM) {
+	ENUM_S *et=0;
+        struct type *type=sym->type;
+        int n=0;
+	while(n < TYPE_NFIELDS (type)) {
+      	    et=sial_add_enum(et, sial_strdup(TYPE_FIELD_NAME(type, n)), TYPE_FIELD_BITPOS(type, n));
+            n++;
+	}
+        return et;
+    }
+    return 0;
+}
+
+/*
+	Return the list of preprocessor defines.
+	For Irix we have to get the die for a startup.c file.
+	Of dwarf type DW_TAG_compile_unit.
+	the DW_AT_producer will contain the compile line.
+
+	We then need to parse that line to get all the -Dname[=value]
+*/
+DEF_S *
+apigetdefs(void)
+{
+DEF_S *dt=0;
+int i;
+static struct linuxdefs_s {
+
+	char *name;
+	char *value;
+
+} linuxdefs[] = {
+
+	{"crash",		"1"},
+	{"linux",		"1"},
+	{"__linux",		"1"},
+	{"__linux__",		"1"},
+	{"unix",		"1"},
+	{"__unix",		"1"},
+	{"__unix__",		"1"},
+	// helper macros
+	{"LINUX_2_2_16",	"(LINUX_RELEASE==0x020210)"},
+	{"LINUX_2_2_17",	"(LINUX_RELEASE==0x020211)"},
+	{"LINUX_2_4_0",		"(LINUX_RELEASE==0x020400)"},
+	{"LINUX_2_2_X",		"(((LINUX_RELEASE) & 0xffff00) == 0x020200)"},
+	{"LINUX_2_4_X",		"(((LINUX_RELEASE) & 0xffff00) == 0x020400)"},
+	{"LINUX_2_6_X",		"(((LINUX_RELEASE) & 0xffff00) == 0x020600)"},
+#ifdef i386
+	{"i386",         "1"},
+	{"__i386",       "1"},
+	{"__i386__",     "1"},
+#endif
+#ifdef s390
+	{"s390",         "1"},
+	{"__s390",       "1"},
+	{"__s390__",     "1"},
+#endif
+#ifdef s390x
+	 {"s390x",       "1"},
+	 {"__s390x",     "1"},
+	 {"__s390x__",   "1"},
+#endif
+#ifdef __ia64__
+	{"ia64",         "1"},
+	{"__ia64",       "1"},
+	{"__ia64__",     "1"},
+	{"__LP64__",     "1"},
+	{"_LONGLONG",    "1"},
+	{"__LONG_MAX__", "9223372036854775807L"},
+#endif
+#ifdef ppc64
+	 {"ppc64",       "1"},
+	 {"__ppc64",     "1"},
+	 {"__ppc64__",   "1"},
+#endif
+	};
+        
+static char *untdef[] = { 
+    "clock",  
+    "mode",  
+    "pid",  
+    "uid",  
+    "xtime",  
+    "init_task", 
+    "size", 
+    "type",
+    "level",
+    0 
+};
+
+#if 0
+How to extract basic set of -D flags from the kernel image
+
+	prod=sial_strdup(kl_getproducer());
+	for(p=prod; *p; p++) {
+
+		if(*p=='-' && *(p+1)=='D') {
+
+			char *def=p+2;
+
+			while(*p && *p != '=' && *p != ' ') p++;
+
+			if(!*p || *p == ' ') {
+
+				*p='\0';
+				dt=sial_add_def(dt, sial_strdup(def), sial_strdup("1"));
+
+			} else {
+
+				char *val=p+1;
+
+				*p++='\0';
+				while(*p && *p != ' ') p++;
+				*p='\0';
+
+				dt=sial_add_def(dt, sial_strdup(def), sial_strdup(val));
+			}
+		}
+	}
+#endif	
+
+        /* remove some tdef with very usual identifier.
+           could also be cases where the kernel defined a type and variable with same name e.g. xtime.
+           the same can be accomplished in source using #undef <tdefname> or forcing the evaluation of 
+           a indentifier as a variable name ex: __var(xtime).
+           
+           I tried to make the grammar as unambiqguous as I could.
+           
+           If this becomes to much of a problem I might diable usage of all image typedefs usage in sial!
+        */ 
+        {
+            char **tdefname=untdef;
+            while(*tdefname) sial_addneg(*tdefname++);;
+            
+        }
+        
+	/* insert constant defines from list above */
+	for(i=0;i<sizeof(linuxdefs)/sizeof(linuxdefs[0]);i++) {
+
+		dt=sial_add_def(dt, sial_strdup(linuxdefs[i].name), 
+			sial_strdup(linuxdefs[i].value));
+	}
+
+#if 1
+        {
+            ull addr;
+            char banner[200];
+            if(apigetval("linux_banner", &addr)) {
+                if(apigetmem(addr, banner, sizeof banner-1)) {
+                
+                    // parse the banner string and set up release macros
+                    banner[sizeof banner -1]='\0';
+                    char *tok=strtok(banner, " \t");
+                    if(tok) tok=strtok(NULL, " \t");
+                    if(tok) tok=strtok(NULL, " \t");
+                    if(tok) {
+                        int two, major, minor, ret;
+                        ret = sscanf(tok, "%d.%d.%d-", &two, &major, &minor);
+                        if( ret == 3) {
+                            sprintf(banner, "0x%02x%02x%02x", two, major, minor);
+		            dt=sial_add_def(dt, sial_strdup("LINUX_RELEASE"), sial_strdup(banner));
+                            sial_msg("Core LINUX_RELEASE == '%s'\n", tok);
+                        }
+                    }
+                }
+                else sial_msg("Sial init: could not read symbol 'linux_banner' from corefile.\n");
+            }
+            else sial_msg("Sial init: could not find symbol 'linux_banner' in corefile.\n");
+        }
+#endif
+	return dt;
+}
+
+apiops icops= {
+	apigetmem, 
+	apiputmem, 
+	apimember, 
+	apigetctype, 
+	apigetrtype, 
+	apialignment, 
+	apigetval, 
+	apigetenum, 
+	apigetdefs,
+	apigetuint8,
+	apigetuint16,
+	apigetuint32,
+	apigetuint64,
+	apifindsym
+};
+
+void
+sial_version(void)
+{
+	sial_msg("< Sial interpreter version %d.%d >\n"
+		, S_MAJOR, S_MINOR);
+}
+
+static void
+run_callback(void)
+{
+extern char *crash_global_cmd();
+FILE *ofp = NULL;
+
+	if (fp) {
+		ofp = sial_getofile();
+		sial_setofile(fp);
+	}
+
+	sial_cmd(crash_global_cmd(), args, argcnt);
+
+	if (ofp) 
+		sial_setofile(ofp);
+}
+
+
+void
+edit_cmd(void)
+{
+int c, file=0;
+        while ((c = getopt(argcnt, args, "lf")) != EOF) {
+                switch(c)
+                {
+                case 'l':
+                    sial_vilast();
+                    return;
+                break;
+                case 'f':
+                    file++;
+                break;
+                default:
+                        argerrs++;
+                        break;
+                }
+        }
+
+        if (argerrs)
+                cmd_usage(crash_global_cmd(), SYNOPSIS);
+
+        else if(args[optind]) {
+            while(args[optind]) {
+	        sial_vi(args[optind++], file);
+            }
+	}
+        else cmd_usage(crash_global_cmd(), SYNOPSIS);
+}
+
+char *edit_help[]={
+		"edit",
+                "Start a $EDITOR session of a sial function or file",
+                "<-f fileName>|<function name>",
+                "This command can be use during a tight development cycle",
+                "where frequent editing->run->editing sequences are executed.",
+                "To edit a known sial macro file use the -f option. To edit the file",
+                "at the location of a known function's declaration omit the -f option.",
+                "Use a single -l option to be brought to the last compile error location.",
+                "",
+                "EXAMPLES:",
+                "  %s> edit -f ps",
+                "  %s> edit ps",
+                "  %s> edit ps_opt",
+                "  %s> edit -l",
+                NULL
+};
+
+
+// these control debug mode when parsing (pre-processor and compile)
+int sialdebug=0, sialppdebug=0;
+
+void
+load_cmd(void)
+{
+	if(argcnt< 2) cmd_usage(crash_global_cmd(), SYNOPSIS);
+	else {
+            sial_setofile(fp);
+            sial_loadunload(1, args[1], 0);
+        }
+}
+
+char *load_help[]={
+		"load",
+                "Load a sial file",
+                "<fileName>|<Directory>",
+                "  Load a file or a directory. In the case of a directory",
+		"  all files in that directory will be loaded.",
+                NULL
+                
+};
+
+void
+unload_cmd(void)
+{
+	if(argcnt < 2) cmd_usage(crash_global_cmd(), SYNOPSIS);
+	else sial_loadunload(0, args[1], 0);
+}
+
+char *unload_help[]={
+		"unload",
+                "Unload a sial file",
+                "<fileName>|<Directory>",
+                "  Unload a file or a directory. In the case of a directory",
+		"  all files in that directory will be unloaded.",
+                NULL
+};
+
+void
+sdebug_cmd(void)
+{
+	if(argcnt < 2) sial_msg("Current sial debug level is %d\n", sial_getdbg());
+	else sial_setdbg(atoi(args[1]));
+}
+
+char *sdebug_help[]={
+		"sdebug",
+                "Print or set sial debug level",
+                "<Debug level 0..9>",
+                "  Set the debug of sial. Without any parameter, shows the current debug level.",
+                NULL
+};
+
+void
+sname_cmd(void)
+{
+	if(argcnt < 2) {
+            if(sial_getname()) sial_msg("Current sial name match is '%s'\n", sial_getname());
+            else sial_msg("No name match specified yet.\n");
+	} else sial_setname(args[1]);
+}
+
+char *sname_help[]={
+		"sname",
+                "Print or set sial name match.",
+                "<name>",
+                "  Set sial name string for matches. Debug messages that are object oriented",
+                "  will only be displayed if the object name (struct, type, ...) matches this",
+		"  value.",
+                NULL
+};
+
+void
+sclass_cmd(void)
+{
+	if(argcnt < 2) {
+            char **classes=sial_getclass();
+            sial_msg("Current sial classes are :");
+            while(*classes) sial_msg("'%s' ", *classes++);
+            sial_msg("\n");
+	
+        }
+        else {
+            int i;
+            for(i=1; i<argcnt; i++) sial_setclass(args[i]);
+        }
+}
+
+char *sclass_help[]={
+		"sclass",
+                "Print or set sial debug message class(es).",
+                "<className>[, <className>]",
+                "  Set sial debug classes. Only debug messages that are in the specified classes",
+                "  will be displayed.",
+                NULL
+};
+
+#define NCMDS 200
+static struct command_table_entry command_table[NCMDS] =  {
+
+	{"edit", edit_cmd, edit_help},
+	{"load", load_cmd, load_help},
+	{"unload", unload_cmd, unload_help},
+	{"sdebug", sdebug_cmd, sdebug_help},
+	{"sname", sname_cmd, sname_help},
+	{"sclass", sclass_cmd, sclass_help},
+	{(char *)0 }
+};
+
+static void
+add_sial_cmd(char *name, void (*cmd)(void), char **help, int flags)
+{
+struct command_table_entry *cp;
+struct command_table_entry *crash_cmd_table();
+
+    // check for a clash with native commands
+    for (cp = crash_cmd_table(); cp->name; cp++) {
+        if (!strcmp(cp->name, name)) {
+            sial_msg("Sial command name '%s' conflicts with native crash command.\n", name);
+            return;
+        }
+    }
+
+    // make sure we have enough space for the new command
+    if(!command_table[NCMDS-2].name) {
+        for (cp = command_table; cp->name; cp++);
+        cp->name=sial_strdup(name);
+        cp->func=cmd;
+        cp->help_data=help;
+        cp->flags=flags;
+    }
+}
+
+static void
+rm_sial_cmd(char *name)
+{
+struct command_table_entry *cp, *end;
+
+    for (cp = command_table; cp->name; cp++) {
+        if (!strcmp(cp->name, name)) {
+            sial_free(cp->name);
+            memmove(cp, cp+1, sizeof *cp *(NCMDS-(cp-command_table)-1));
+            break;
+        }
+    }
+}
+
+/*
+	This function is called for every new function
+	generated by a load command. This enables us to
+	register new commands.
+
+	We check here is the functions:
+
+	fname_help()
+	fname_opt()
+	and
+	fname_usage()
+
+	exist, and if so then we have a new command.
+	Then we associated (register) a function with
+	the standard sial callbacks.
+*/
+void
+reg_callback(char *name, int load)
+{
+char fname[MAX_SYMNAMELEN+sizeof("_usage")+1];
+char *help_str, *opt_str;
+char **help=malloc(sizeof *help * 5);
+
+    if(!help) return;
+    snprintf(fname, sizeof(fname), "%s_help", name);
+    if(sial_chkfname(fname, 0)) {
+        help_str=sial_strdup((char*)(unsigned long)sial_exefunc(fname, 0));
+        snprintf(fname, sizeof(fname), "%s_usage", name);
+        if(sial_chkfname(fname, 0)) {
+            if(load) {
+                opt_str=sial_strdup((char*)(unsigned long)sial_exefunc(fname, 0));
+                help[0]=sial_strdup(name);
+                help[1]="";
+                help[2]=sial_strdup(opt_str);
+                help[3]=sial_strdup(help_str);
+                help[4]=0;
+                add_sial_cmd(name, run_callback, help, 0);
+                return;
+            }
+            else rm_sial_cmd(name);
+        }
+        sial_free(help_str);
+    }
+    free(help);
+    return;
+}
+
+/* 
+ *  The _fini() function is called if the shared object is unloaded. 
+ *  If desired, perform any cleanups here. 
+ */
+void _fini() 
+{ 
+    // need to unload any files we have loaded
+    
+}
+
+VALUE_S *curtask(VALUE_S *v, ...)
+{
+unsigned long get_curtask();
+    return sial_makebtype((ull)get_curtask());
+}
+
+_init() /* Register the command set. */
+{ 
+#define LCDIR "/usr/share/sial/crash"
+#define LCIDIR "include"
+#define LCUDIR ".sial"
+
+
+	if(sial_open() >= 0) {
+
+		char *path, *ipath;
+		char *homepath=0;
+               	char *home=getenv("HOME");
+
+		/* set api, default size, and default sign for types */
+#ifdef i386
+#define SIAL_ABI  ABI_INTEL_X86
+#else 
+#ifdef __ia64__
+#define SIAL_ABI  ABI_INTEL_IA
+#else
+#ifdef __x86_64__
+#define SIAL_ABI  ABI_INTEL_IA
+#else
+#ifdef __s390__
+#define SIAL_ABI  ABI_S390
+#else
+#ifdef __s390x__
+#define SIAL_ABI  ABI_S390X
+#else
+#ifdef PPC64
+#define SIAL_ABI  ABI_PPC64
+#else
+#error sial: Unkown ABI 
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+		sial_apiset(&icops, SIAL_ABI, sizeof(long), 0);
+
+		sial_version();
+
+        	/* set the macro search path */
+        	if(!(path=getenv("SIAL_MPATH"))) {
+
+                	if(home) {
+
+                        	path=sial_alloc(strlen(home)+sizeof(LCUDIR)+sizeof(LCDIR)+4);
+				homepath=sial_alloc(strlen(home)+sizeof(LCUDIR)+2);
+
+				/* build a path for call to sial_load() */
+				strcpy(homepath, home);
+				strcat(homepath, "/");
+				strcat(homepath, LCUDIR);
+
+				/* built the official path */
+                        	strcpy(path, LCDIR);
+                        	strcat(path, ":");
+                        	strcat(path, home);
+                        	strcat(path, "/");
+				strcat(path, LCUDIR);
+                	}
+                	else path=LCDIR;
+		}
+		sial_setmpath(path);
+
+		fprintf(fp, "\tLoading sial commands from %s .... ",
+                                         path);
+
+		/* include path */
+		if(!(ipath=getenv("SIAL_IPATH"))) {
+
+                	if(home) {
+
+                        	ipath=sial_alloc(strlen(home)+sizeof(LCDIR)+sizeof(LCUDIR)+(sizeof(LCIDIR)*2)+(sizeof("/usr/include")+2)+6);
+
+				/* built the official path */
+                        	strcpy(ipath, LCDIR);
+                        	strcat(ipath, "/"LCIDIR":");
+                        	strcat(ipath, home);
+                        	strcat(ipath, "/");
+				strcat(ipath, LCUDIR);
+				strcat(ipath, "/"LCIDIR);
+				strcat(ipath, ":/usr/include");
+                	}
+                	else ipath=LCDIR"/"LCIDIR;
+		}
+		sial_setipath(ipath);
+
+		/* set the new function callback */
+		sial_setcallback(reg_callback);
+
+		/* load the default macros */
+		sial_loadall();
+
+		/* load some sial specific commands */
+                register_extension(command_table);
+                
+                /* some builtins */
+        	sial_builtin("int curtask()", curtask);
+                
+                fprintf(fp, "Done.\n");
+	} 
+        return 1;
+}
--- crash/extensions/dminfo.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/extensions/dminfo.c	2007-11-09 16:57:28.000000000 -0500
@@ -0,0 +1,1534 @@
+/* dminfo.c - crash extension module for device-mapper analysis
+ *
+ * Copyright (C) 2005 NEC Corporation
+ * Copyright (C) 2005 Red Hat, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "defs.h"		/* From the crash source top-level directory */
+
+int _init(void);
+int _fini(void);
+
+/*
+ * Indices of size-offset array (Used by GET_xxx macros)
+ *
+ * DM_<struct name>_<member name>
+ */
+enum {
+	DM_hash_cell_name_list = 0,
+	DM_hash_cell_name,
+	DM_hash_cell_md,
+
+	DM_mapped_device_disk,
+	DM_mapped_device_map,
+
+	DM_gendisk_major,
+	DM_gendisk_first_minor,
+	DM_gendisk_disk_name,
+
+	DM_dm_table_num_targets,
+	DM_dm_table_targets,
+	DM_dm_table_devices,
+
+	DM_dm_target_type,
+	DM_dm_target_begin,
+	DM_dm_target_len,
+	DM_dm_target_private,
+
+	DM_dm_dev_count,
+	DM_dm_dev_bdev,
+	DM_dm_dev_name,
+
+	DM_dm_io_md,
+	DM_dm_io_bio,
+
+	DM_target_type_name,
+
+	DM_target_io_io,
+
+	DM_block_device_bd_disk,
+
+	DM_bio_bi_private,
+
+	DM_bio_list_head,
+
+	DM_linear_c_dev,
+	DM_linear_c_start,
+
+	DM_multipath_hw_handler,
+	DM_multipath_nr_priority_groups,
+	DM_multipath_priority_groups,
+	DM_multipath_nr_valid_paths,
+	DM_multipath_current_pg,
+	DM_multipath_queue_if_no_path,
+	DM_multipath_queue_size,
+
+	DM_hw_handler_type,
+	DM_hw_handler_type_name,
+
+	DM_priority_group_ps,
+	DM_priority_group_pg_num,
+	DM_priority_group_bypassed,
+	DM_priority_group_nr_pgpaths,
+	DM_priority_group_pgpaths,
+
+	DM_path_selector_type,
+	DM_path_selector_type_name,
+
+	DM_pgpath_fail_count,
+	DM_pgpath_path,
+
+	DM_path_dev,
+	DM_path_is_active,
+
+	DM_mirror_set_rh,
+	DM_mirror_set_reads,
+	DM_mirror_set_writes,
+	DM_mirror_set_in_sync,
+	DM_mirror_set_nr_mirrors,
+	DM_mirror_set_mirror,
+
+	DM_region_hash_log,
+	DM_region_hash_quiesced_regions,
+	DM_region_hash_recovered_regions,
+
+	DM_dirty_log_type,
+	DM_dirty_log_type_name,
+
+	DM_mirror_error_count,
+	DM_mirror_dev,
+	DM_mirror_offset,
+
+	DM_crypt_config_dev,
+	DM_crypt_config_iv_mode,
+	DM_crypt_config_tfm,
+	DM_crypt_config_key_size,
+	DM_crypt_config_key,
+
+	DM_crypto_tfm_crt_u,
+	DM_crypto_tfm___crt_alg,
+
+	DM_crypto_alg_cra_name,
+
+	DM_cipher_tfm_cit_mode,
+
+	DM_stripe_c_stripes,
+	DM_stripe_c_chunk_mask,
+	DM_stripe_c_stripe,
+
+	DM_stripe_dev,
+
+	DM_dm_snapshot_origin,
+	DM_dm_snapshot_cow,
+	DM_dm_snapshot_chunk_size,
+	DM_dm_snapshot_valid,
+	DM_dm_snapshot_type,
+
+	NR_DMINFO_MEMBER_TABLE_ENTRY
+};
+
+/* Size-offset array for structure's member */
+static struct dminfo_member_entry {
+	unsigned long offset;
+	unsigned long size;
+} mbr_ary[NR_DMINFO_MEMBER_TABLE_ENTRY];
+
+/*
+ * Macros to retrieve data of given structure's member
+ *
+ * Macros except for the MSG assume 'struct s' is at 'addr'
+ */
+#define MSG(msg, s, m) msg ": " s "." m
+
+/* Initialize the size-offset array */
+#define INIT_MBR_TABLE(s, m) \
+	do { \
+		if (!mbr_ary[DM_##s##_##m].size) { \
+			mbr_ary[DM_##s##_##m].offset = MEMBER_OFFSET("struct " #s, #m); \
+			mbr_ary[DM_##s##_##m].size   = MEMBER_SIZE("struct " #s, #m); \
+		} \
+	} while (0)
+
+/*
+ * Store the data of member m in ret.
+ * Initialize the size-offset array for the member m if needed.
+ */
+#define GET_VALUE(addr, s, m, ret) \
+	do { \
+		INIT_MBR_TABLE(s, m); \
+		if (sizeof(ret) < mbr_ary[DM_##s##_##m].size) \
+			fprintf(fp, "%s\n", \
+				MSG("ERROR: GET_VALUE size_check", #s, #m)); \
+		readmem(addr + mbr_ary[DM_##s##_##m].offset, KVADDR, &ret, \
+			mbr_ary[DM_##s##_##m].size, MSG("GET_VALUE", #s, #m), \
+			FAULT_ON_ERROR);\
+	} while (0)
+
+/*
+ * Store the address of member m in ret.
+ * Initialize the size-offset array for the member m if needed.
+ */
+#define GET_ADDR(addr, s, m, ret) \
+	do { \
+		INIT_MBR_TABLE(s, m); \
+		ret = addr + mbr_ary[DM_##s##_##m].offset; \
+	} while (0)
+
+/*
+ * Store the string data of member m in ret.
+ * Initialize the size-offset array for the member m if needed.
+ */
+#define GET_STR(addr, s, m, ret, len) \
+	do { \
+		INIT_MBR_TABLE(s, m); \
+		if (!read_string(addr + mbr_ary[DM_##s##_##m].offset, ret, len - 1)) \
+			fprintf(fp, "%s\n", MSG("ERROR: GET_STR", #s, #m)); \
+	} while (0)
+
+/*
+ * Store the string data pointed by member m in ret.
+ * Initialize the size-offset array for the member m if needed.
+ */
+#define GET_PTR_STR(addr, s, m, ret, len) \
+	do { \
+		unsigned long tmp; \
+		INIT_MBR_TABLE(s, m); \
+		readmem(addr + mbr_ary[DM_##s##_##m].offset, KVADDR, &tmp, \
+			mbr_ary[DM_##s##_##m].size, MSG("GET_PTR_STR", #s, #m),\
+			FAULT_ON_ERROR);\
+		if (!read_string(tmp, ret, len - 1)) \
+			fprintf(fp, "%s\n", MSG("ERROR: GET_PTR_STR", #s, #m));\
+	} while (0)
+
+/*
+ * Utility function/macro to walk the list
+ */
+static unsigned long
+get_next_from_list_head(unsigned long addr)
+{
+	unsigned long ret;
+
+	readmem(addr + OFFSET(list_head_next), KVADDR, &ret, sizeof(void *),
+		MSG("get_next_from_list_head", "list_head", "next"),
+		FAULT_ON_ERROR);
+
+	return ret;
+}
+
+#define list_for_each(next, head, last) \
+	for (next = get_next_from_list_head(head), last = 0UL; \
+		next && next != head && next != last; \
+		last = next, next = get_next_from_list_head(next))
+
+/*
+ * device-mapper target analyzer
+ *
+ * device-mapper has various target driver: linear, mirror, multipath, etc.
+ * Information specific to target is stored in its own way.
+ * Target-specific analyzer is provided for each target driver for this reason.
+ */
+static struct dminfo_target_analyzer {
+	struct dminfo_target_analyzer *next;
+	char *target_name;
+	int (*ready) (void);	/* returns true if analyzer is available */
+	void (*show_table) (unsigned long);  /* display table info */
+	void (*show_status) (unsigned long); /* display status info */
+	void (*show_queue) (unsigned long);  /* display queued I/O info */
+} analyzers_head;
+
+static void
+dminfo_register_target_analyzer(struct dminfo_target_analyzer *ta)
+{
+	ta->next = analyzers_head.next;
+	analyzers_head.next = ta;
+}
+
+static struct
+dminfo_target_analyzer *find_target_analyzer(char *target_type)
+{
+	struct dminfo_target_analyzer *ta;
+
+	for (ta = analyzers_head.next; ta; ta = ta->next)
+		if (!strcmp(ta->target_name, target_type))
+			return ta;
+
+	return NULL;
+}
+
+/*
+ * zero target
+ */
+static int
+zero_ready(void)
+{
+	return 1;
+}
+
+static void
+zero_show_table(unsigned long target)
+{
+	unsigned long long start, len;
+
+	/* Get target information */
+	GET_VALUE(target, dm_target, begin, start);
+	GET_VALUE(target, dm_target, len, len);
+
+	fprintf(fp, "  begin:%llu len:%llu", start, len);
+}
+
+static void
+zero_show_status(unsigned long target)
+{
+	/* zero target has no status */
+	fprintf(fp, "  No status info");
+}
+
+static void
+zero_show_queue(unsigned long target)
+{
+	/* zero target has no queue */
+	fprintf(fp, "  No queue info");
+}
+
+static struct dminfo_target_analyzer zero_analyzer = {
+	.target_name      = "zero",
+	.ready            = zero_ready,
+	.show_table       = zero_show_table,
+	.show_status      = zero_show_status,
+	.show_queue       = zero_show_queue
+};
+
+/*
+ * error target
+ */
+static int
+error_ready(void)
+{
+	return 1;
+}
+
+static void
+error_show_table(unsigned long target)
+{
+	unsigned long long start, len;
+
+	/* Get target information */
+	GET_VALUE(target, dm_target, begin, start);
+	GET_VALUE(target, dm_target, len, len);
+
+	fprintf(fp, "  begin:%llu len:%llu", start, len);
+}
+
+static void
+error_show_status(unsigned long target)
+{
+	/* error target has no status */
+	fprintf(fp, "  No status info");
+}
+
+static void
+error_show_queue(unsigned long target)
+{
+	/* error target has no queue */
+	fprintf(fp, "  No queue info");
+}
+
+static struct dminfo_target_analyzer error_analyzer = {
+	.target_name      = "error",
+	.ready            = error_ready,
+	.show_table       = error_show_table,
+	.show_status      = error_show_status,
+	.show_queue       = error_show_queue
+};
+
+/*
+ * linear target
+ */
+static int
+linear_ready(void)
+{
+	static int debuginfo = 0;
+
+	if (debuginfo)
+		return 1;
+
+	if (STRUCT_EXISTS("struct linear_c")) {
+		debuginfo = 1;
+		return 1;
+	} else
+		fprintf(fp, "No such struct info: linear_c");
+
+	return 0;
+}
+
+static void
+linear_show_table(unsigned long target)
+{
+	unsigned long lc, dm_dev;
+	unsigned long long start, len, offset;
+	char devt[BUFSIZE];
+
+	/* Get target information */
+	GET_VALUE(target, dm_target, begin, start);
+	GET_VALUE(target, dm_target, len, len);
+	GET_VALUE(target, dm_target, private, lc);
+	GET_VALUE(lc, linear_c, dev, dm_dev);
+	GET_STR(dm_dev, dm_dev, name, devt, BUFSIZE);
+	GET_VALUE(lc, linear_c, start, offset);
+
+	fprintf(fp, "  begin:%llu len:%llu dev:%s offset:%llu",
+		start, len, devt, offset);
+}
+
+static void
+linear_show_status(unsigned long target)
+{
+	/* linear target has no status */
+	fprintf(fp, "  No status info");
+}
+
+static void
+linear_show_queue(unsigned long target)
+{
+	/* linear target has no I/O queue */
+	fprintf(fp, "  No queue info");
+}
+
+static struct dminfo_target_analyzer linear_analyzer = {
+	.target_name      = "linear",
+	.ready            = linear_ready,
+	.show_table       = linear_show_table,
+	.show_status      = linear_show_status,
+	.show_queue       = linear_show_queue
+};
+
+/*
+ * mirror target
+ */
+static int
+mirror_ready(void)
+{
+	static int debuginfo = 0;
+
+	if (debuginfo)
+		return 1;
+
+	if (STRUCT_EXISTS("struct mirror_set")) {
+		debuginfo = 1;
+		return 1;
+	} else
+		fprintf(fp, "No such struct info: mirror_set");
+
+	return 0;
+}
+
+static void
+mirror_show_table(unsigned long target)
+{
+	unsigned int i, nr_mir;
+	unsigned long ms, rh, log, log_type, mir_size, mir_head, mir, dm_dev;
+	unsigned long long offset;
+	char buf[BUFSIZE];
+
+	/* Get the address of struct mirror_set */
+	GET_VALUE(target, dm_target, private, ms);
+
+	/* Get the log-type name of the mirror_set */
+	GET_ADDR(ms, mirror_set, rh, rh);
+	GET_VALUE(rh, region_hash, log, log);
+	GET_VALUE(log, dirty_log, type, log_type);
+	GET_PTR_STR(log_type, dirty_log_type, name, buf, BUFSIZE);
+	fprintf(fp, "  log:%s", buf);
+
+	/*
+	 * Display information for each mirror disks.
+	 *
+	 * mir_head = mirror_set.mirror.
+	 * This is the head of struct mirror array.
+	 */
+	fprintf(fp, " dev:");
+	mir_size = STRUCT_SIZE("struct mirror");
+	GET_ADDR(ms, mirror_set, mirror, mir_head);
+	GET_VALUE(ms, mirror_set, nr_mirrors, nr_mir);
+	for (i = 0; i < nr_mir; i++) {
+		mir = mir_head + mir_size * i; /* Get next mirror */
+
+		/* Get the devt of the mirror disk */
+		GET_VALUE(mir, mirror, dev, dm_dev);
+		GET_STR(dm_dev, dm_dev, name, buf, BUFSIZE);
+
+		/* Get the offset of the mirror disk */
+		GET_VALUE(mir, mirror, offset, offset);
+
+		fprintf(fp, "%s(%llu)%s", buf, offset,
+			i == nr_mir - 1 ? "" : ",");
+	}
+	if (i != nr_mir)
+		fprintf(fp, " ERROR: dev are less than nr_mir:%d", nr_mir);
+}
+
+static void
+mirror_show_status(unsigned long target)
+{
+	unsigned int i, nr_mir, synced, nr_error;
+	unsigned long ms, mir_size, mir_head, mir, dm_dev;
+	char buf[BUFSIZE];
+
+	/* Get the address of struct mirror_set */
+	GET_VALUE(target, dm_target, private, ms);
+
+	/* Get the status info of the mirror_set */
+	GET_VALUE(ms, mirror_set, in_sync, synced);
+	fprintf(fp, "  in_sync:%d", synced);
+
+	/*
+	 * Display information for each mirror disks.
+	 *
+	 * mir_head = mirror_set.mirror.
+	 * This is the head of struct mirror array.
+	 */
+	fprintf(fp, " dev:");
+	mir_size = STRUCT_SIZE("struct mirror");
+	GET_ADDR(ms, mirror_set, mirror, mir_head);
+	GET_VALUE(ms, mirror_set, nr_mirrors, nr_mir);
+	for (i = 0; i < nr_mir; i++) {
+		mir = mir_head + mir_size * i; /* Get next mirror */
+
+		/* Get the devt of the mirror disk */
+		GET_VALUE(mir, mirror, dev, dm_dev);
+		GET_STR(dm_dev, dm_dev, name, buf, BUFSIZE);
+
+		/* Get the offset of the mirror disk */
+		GET_VALUE(mir, mirror, error_count, nr_error);
+
+		fprintf(fp, "%s(%c,%d)%s", buf, nr_error ? 'D' : 'A', nr_error,
+			i == nr_mir - 1 ? "" : ",");
+	}
+	if (i != nr_mir)
+		fprintf(fp, " ERROR: dev are less than nr_mir:%d", nr_mir);
+}
+
+static void
+mirror_show_queue(unsigned long target)
+{
+	unsigned long ms, rlist, wlist, rhead, whead;
+	unsigned long rh, quis_head, rcov_head, quis_next, rcov_next;
+
+	/* Get the address of struct mirror_set */
+	GET_VALUE(target, dm_target, private, ms);
+
+	/* Get the address of queued I/O lists in struct mirror_set */
+	GET_ADDR(ms, mirror_set, reads, rlist);
+	GET_ADDR(ms, mirror_set, writes, wlist);
+
+	/* Get the head of queued I/O lists */
+	GET_VALUE(rlist, bio_list, head, rhead);
+	GET_VALUE(wlist, bio_list, head, whead);
+	fprintf(fp, "  %s", rhead ? "reads" : "(reads)");
+	fprintf(fp, " %s", whead ? "writes" : "(writes)");
+
+	/* Get the address of the struct region_hash */
+	GET_ADDR(ms, mirror_set, rh, rh);
+
+	/* Get the address of recover region lists in struct region_hash */
+	GET_ADDR(rh, region_hash, quiesced_regions, quis_head);
+	GET_ADDR(rh, region_hash, recovered_regions, rcov_head);
+
+	/* Get the head of recover region lists */
+	quis_next = get_next_from_list_head(quis_head);
+	rcov_next = get_next_from_list_head(rcov_head);
+
+	fprintf(fp, " %s", quis_next != quis_head ? "quiesced" : "(quiesced)");
+	fprintf(fp, " %s", rcov_next != rcov_head ? "recovered" : "(recovered)");
+}
+
+static struct dminfo_target_analyzer mirror_analyzer = {
+	.target_name      = "mirror",
+	.ready            = mirror_ready,
+	.show_table       = mirror_show_table,
+	.show_status      = mirror_show_status,
+	.show_queue       = mirror_show_queue
+};
+
+/*
+ * multipath target
+ */
+static int
+multipath_ready(void)
+{
+	static int debuginfo = 0;
+
+	if (debuginfo)
+		return 1;
+
+	if (STRUCT_EXISTS("struct multipath")) {
+		debuginfo = 1;
+		return 1;
+	} else
+		fprintf(fp, "No such struct info: multipath");
+
+	return 0;
+}
+
+static void
+multipath_show_table(unsigned long target)
+{
+	int i, j;
+	unsigned int queue_if_no_path, nr_pgs, pg_id, nr_paths;
+	unsigned long mp, hwh, hwh_type, ps, ps_type, path, dm_dev;
+	unsigned long pg_head, pg_next, pg_last;
+	unsigned long path_head, path_next, path_last;
+	char name[BUFSIZE];
+
+	/* Get the address of struct multipath */
+	GET_VALUE(target, dm_target, private, mp);
+
+	/* Get features information */
+	GET_VALUE(mp, multipath, queue_if_no_path, queue_if_no_path);
+
+	/* Get the hardware-handler information */
+	GET_ADDR(mp, multipath, hw_handler, hwh);
+	GET_VALUE(hwh, hw_handler, type, hwh_type);
+	if (hwh_type)
+		GET_PTR_STR(hwh_type, hw_handler_type, name, name, BUFSIZE);
+	else
+		strcpy(name, "none");
+
+	/* Get the number of priority groups */
+	GET_VALUE(mp, multipath, nr_priority_groups, nr_pgs);
+
+	fprintf(fp, "  queue_if_no_path:%d hwh:%s nr_pgs:%d\n",
+		queue_if_no_path, name, nr_pgs);
+
+	/* Display information for each priority group */
+	fprintf(fp, "    %-2s  %-13s  %-8s  %s",
+		"PG", "PATH_SELECTOR", "NR_PATHS", "PATHS");
+	GET_ADDR(mp, multipath, priority_groups, pg_head);
+	i = 0;
+	list_for_each (pg_next, pg_head, pg_last) {
+		/* pg_next == struct priority_group */
+
+		/* Get the index of the priority group */
+		GET_VALUE(pg_next, priority_group, pg_num, pg_id);
+
+		/* Get the name of path selector */
+		GET_ADDR(pg_next, priority_group, ps, ps);
+		GET_VALUE(ps, path_selector, type, ps_type);
+		GET_PTR_STR(ps_type, path_selector_type, name, name, BUFSIZE);
+
+		/* Get the number of paths in the priority group */
+		GET_VALUE(pg_next, priority_group, nr_pgpaths, nr_paths);
+
+		fprintf(fp, "\n    %-2d  %-13s  %-8d ", pg_id, name, nr_paths);
+
+		/* Display information for each path */
+		GET_ADDR(pg_next, priority_group, pgpaths, path_head);
+		j = 0;
+		list_for_each (path_next, path_head, path_last) {
+			/* path_next == struct pgpath */
+
+			/* Get the devt of the pgpath */
+			GET_ADDR(path_next, pgpath, path, path);
+			GET_VALUE(path, path, dev, dm_dev);
+			GET_STR(dm_dev, dm_dev, name, name, BUFSIZE);
+
+			fprintf(fp, " %s", name);
+			j++;
+		}
+		if (j != nr_paths)
+			fprintf(fp, " ERROR: paths are less than nr_paths:%d",
+				nr_paths);
+		i++;
+	}
+	if (i != nr_pgs)
+		fprintf(fp, " ERROR: pgs are less than nr_pgs:%d", nr_pgs);
+}
+
+static void
+multipath_show_status(unsigned long target)
+{
+	int i, j;
+	unsigned int queue_if_no_path, nr_pgs, pg_id, nr_paths;
+	unsigned int bypassed_pg, path_active, nr_fails;
+	unsigned long mp, hwh, hwh_type, cur_pg, path, dm_dev;
+	unsigned long pg_head, pg_next, pg_last;
+	unsigned long path_head, path_next, path_last;
+	char buf[BUFSIZE], path_status;
+
+	/* Get the address of struct multipath */
+	GET_VALUE(target, dm_target, private, mp);
+
+	/* Get features information */
+	GET_VALUE(mp, multipath, queue_if_no_path, queue_if_no_path);
+
+	/* Get the hardware-handler information */
+	GET_ADDR(mp, multipath, hw_handler, hwh);
+	GET_VALUE(hwh, hw_handler, type, hwh_type);
+	if (hwh_type)
+		GET_PTR_STR(hwh_type, hw_handler_type, name, buf, BUFSIZE);
+	else
+		strcpy(buf, "none");
+
+	/* Get the number of priority groups */
+	GET_VALUE(mp, multipath, nr_priority_groups, nr_pgs);
+
+	fprintf(fp, "  queue_if_no_path:%d hwh:%s nr_pgs:%d\n",
+		queue_if_no_path, buf, nr_pgs);
+
+	/* Display information for each priority group */
+	fprintf(fp, "    %-2s  %-9s  %-8s  %s",
+		"PG", "PG_STATUS", "NR_PATHS", "PATHS");
+	GET_ADDR(mp, multipath, priority_groups, pg_head);
+	i = 0;
+	list_for_each (pg_next, pg_head, pg_last) {
+		/* pg_next == struct priority_group */
+
+		/* Get the index of the priority group */
+		GET_VALUE(pg_next, priority_group, pg_num, pg_id);
+
+		/* Get the status of the priority group */
+		GET_VALUE(pg_next, priority_group, bypassed, bypassed_pg);
+		if (bypassed_pg)
+			strcpy(buf, "disabled");
+		else {
+			GET_VALUE(mp, multipath, current_pg, cur_pg);
+			if (pg_next == cur_pg)
+				strcpy(buf, "active");
+			else
+				strcpy(buf, "enabled");
+		}
+
+		/* Get the number of paths in the priority group */
+		GET_VALUE(pg_next, priority_group, nr_pgpaths, nr_paths);
+
+		fprintf(fp, "\n    %-2d  %-9s  %-8d ", pg_id, buf, nr_paths);
+
+		/* Display information for each path */
+		GET_ADDR(pg_next, priority_group, pgpaths, path_head);
+		j = 0;
+		list_for_each (path_next, path_head, path_last) {
+			/* path_next == struct pgpath */
+
+			/* Get the devt of the pgpath */
+			GET_ADDR(path_next, pgpath, path, path);
+			GET_VALUE(path, path, dev, dm_dev);
+			GET_STR(dm_dev, dm_dev, name, buf, BUFSIZE);
+
+			/* Get the status of the path */
+			GET_VALUE(path, path, is_active, path_active);
+			GET_VALUE(path_next, pgpath, fail_count, nr_fails);
+			path_status = path_active ? 'A' : 'F';
+
+			fprintf(fp, " %s(%c,%u)", buf, path_status, nr_fails);
+			j++;
+		}
+		if (j != nr_paths)
+			fprintf(fp, " ERROR: paths are less than nr_paths:%d",
+				nr_paths);
+		i++;
+	}
+	if (i != nr_pgs)
+		fprintf(fp, " ERROR: pgs are less than nr_pgs:%d", nr_pgs);
+}
+
+static void
+multipath_show_queue(unsigned long target)
+{
+	unsigned int queue_size;
+	unsigned long mp;
+
+	/* Get the address of struct multipath */
+	GET_VALUE(target, dm_target, private, mp);
+
+	/* Get the size of queued I/Os in this 'target' */
+	GET_VALUE(mp, multipath, queue_size, queue_size);
+
+	fprintf(fp, "  queue_size:%d", queue_size);
+}
+
+static struct dminfo_target_analyzer multipath_analyzer = {
+	.target_name      = "multipath",
+	.ready            = multipath_ready,
+	.show_table       = multipath_show_table,
+	.show_status      = multipath_show_status,
+	.show_queue       = multipath_show_queue
+};
+
+/*
+ * crypt target
+ */
+static int
+crypt_ready(void)
+{
+	static int debuginfo = 0;
+
+	if (debuginfo)
+		return 1;
+
+	if (STRUCT_EXISTS("struct crypt_config")) {
+		debuginfo = 1;
+		return 1;
+	} else
+		fprintf(fp, "No such struct info: crypt_config");
+
+	return 0;
+}
+
+#define DMINFO_CRYPTO_TFM_MODE_ECB 0x00000001
+#define DMINFO_CRYPTO_TFM_MODE_CBC 0x00000002
+
+static void
+crypt_show_table(unsigned long target)
+{
+	int i, cit_mode, key_size;
+	unsigned long cc, tfm, crt_alg, cipher, iv_mode, dm_dev;
+	char buf[BUFSIZE], *chainmode;
+
+	/* Get the address of struct crypt_config */
+	GET_VALUE(target, dm_target, private, cc);
+
+	/* Get the cipher name of the crypt_tfm */
+	GET_VALUE(cc, crypt_config, tfm, tfm);
+	GET_VALUE(tfm, crypto_tfm, __crt_alg, crt_alg);
+	GET_STR(crt_alg, crypto_alg, cra_name, buf, BUFSIZE);
+	fprintf(fp, "  type:%s", buf);
+
+	/* Get the cit_mode of the crypt_tfm */
+	GET_ADDR(tfm, crypto_tfm, crt_u, cipher);
+	GET_VALUE(cipher, cipher_tfm, cit_mode, cit_mode);
+
+	if (MEMBER_EXISTS("struct crypt_config", "iv_mode")) {
+		if (cit_mode == DMINFO_CRYPTO_TFM_MODE_CBC)
+			chainmode = "cbc";
+		else if (cit_mode == DMINFO_CRYPTO_TFM_MODE_ECB) 
+			chainmode = "ecb";
+		else
+			chainmode = "unknown";
+
+		/* Get the iv_mode of the crypt_config */
+		GET_VALUE(cc, crypt_config, iv_mode, iv_mode);
+		if (iv_mode) {
+			GET_PTR_STR(cc, crypt_config, iv_mode, buf, BUFSIZE);
+			fprintf(fp, "-%s-%s", chainmode, buf);
+		} else
+			fprintf(fp, "-%s", chainmode);
+
+	} else {
+		/* Compatibility mode for old dm-crypt cipher strings */
+		if (cit_mode == DMINFO_CRYPTO_TFM_MODE_CBC)
+			chainmode = "plain";
+		else if (cit_mode == DMINFO_CRYPTO_TFM_MODE_ECB) 
+			chainmode = "ecb";
+		else
+			chainmode = "unknown";
+
+		fprintf(fp, "-%s", chainmode);
+	}
+
+	/* Get the devt of the crypt_config */
+	GET_VALUE(cc, crypt_config, dev, dm_dev);
+	GET_STR(dm_dev, dm_dev, name, buf, BUFSIZE);
+	fprintf(fp, " dev:%s", buf);
+
+	/*
+	 * Get the key of the crypt_config.
+	 */
+	GET_VALUE(cc, crypt_config, key_size, key_size);
+	GET_STR(cc, crypt_config, key, buf, MIN(key_size + 1, BUFSIZE));
+	fprintf(fp, " key:");
+	for (i = 0; i < key_size; i++)
+		fprintf(fp, "%02x", (unsigned char)buf[i]);
+}
+
+static void
+crypt_show_status(unsigned long target)
+{
+	/* crypt target has no status */
+	fprintf(fp, "  No status info");
+}
+
+static void
+crypt_show_queue(unsigned long target)
+{
+	/* crypt target has no queue */
+	fprintf(fp, "  No queue info");
+}
+
+static struct dminfo_target_analyzer crypt_analyzer = {
+	.target_name      = "crypt",
+	.ready            = crypt_ready,
+	.show_table       = crypt_show_table,
+	.show_status      = crypt_show_status,
+	.show_queue       = crypt_show_queue
+};
+
+/*
+ * stripe target
+ */
+static int
+stripe_ready(void)
+{
+	static int debuginfo = 0;
+
+	if (debuginfo)
+		return 1;
+
+	if (STRUCT_EXISTS("struct stripe_c")) {
+		debuginfo = 1;
+		return 1;
+	} else
+		fprintf(fp, "No such struct info: stripe_c");
+
+	return 0;
+}
+
+static void
+stripe_show_table(unsigned long target)
+{
+	unsigned int i, n_stripe;
+	unsigned long sc, stripe_size, s, head, dm_dev;
+	unsigned long long mask;
+	char buf[BUFSIZE];
+
+	/* Get the address of struct stripe_c */
+	GET_VALUE(target, dm_target, private, sc);
+
+	/* Get the chunk_size of the stripe_c */
+	GET_VALUE(sc, stripe_c, chunk_mask, mask);
+	fprintf(fp, "  chunk_size:%llu", mask + 1);
+
+	/*
+	 * Display the information of each stripe disks.
+	 *
+	 * head = stripe_c.stripe.
+	 * This is the head of struct stripe array.
+	 */
+	stripe_size = STRUCT_SIZE("struct stripe");
+	GET_ADDR(sc, stripe_c, stripe, head);
+	GET_VALUE(sc, stripe_c, stripes, n_stripe);
+	fprintf(fp, " dev:");
+	for (i = 0; i < n_stripe; i++) {
+		s = head + stripe_size * i; /* Get next stripe */
+
+		/* Get the devt of the stripe disk */
+		GET_VALUE(s, stripe, dev, dm_dev);
+		GET_STR(dm_dev, dm_dev, name, buf, BUFSIZE);
+
+		fprintf(fp, "%s%s", buf, i == n_stripe - 1 ? "" : ",");
+	}
+	if (i != n_stripe)
+		fprintf(fp, " ERROR: dev are less than n_stripe:%d", n_stripe);
+}
+
+static void
+stripe_show_status(unsigned long target)
+{
+	/* stripe target has no status */
+	fprintf(fp, "  No status info");
+}
+
+static void
+stripe_show_queue(unsigned long target)
+{
+	/* stripe target has no queue */
+	fprintf(fp, "  No queue info");
+}
+
+static struct dminfo_target_analyzer stripe_analyzer = {
+	.target_name      = "striped",
+	.ready            = stripe_ready,
+	.show_table       = stripe_show_table,
+	.show_status      = stripe_show_status,
+	.show_queue       = stripe_show_queue
+};
+
+/*
+ * snapshot target
+ */
+static int
+snapshot_ready(void)
+{
+	static int debuginfo = 0;
+
+	if (debuginfo)
+		return 1;
+
+	if (STRUCT_EXISTS("struct dm_snapshot")) {
+		debuginfo = 1;
+		return 1;
+	} else
+		fprintf(fp, "No such struct info: dm_snapshot");
+
+	return 0;
+}
+
+static void
+snapshot_show_table(unsigned long target)
+{
+	unsigned long snap, orig_dev, cow_dev;
+	unsigned long long chunk_size;
+	char orig_name[BUFSIZE], cow_name[BUFSIZE], type;
+
+	/* Get the address of struct dm_snapshot */
+	GET_VALUE(target, dm_target, private, snap);
+
+	/* Get snapshot parameters of the dm_snapshot */
+	GET_VALUE(snap, dm_snapshot, origin, orig_dev);
+	GET_STR(orig_dev, dm_dev, name, orig_name, BUFSIZE);
+	GET_VALUE(snap, dm_snapshot, cow, cow_dev);
+	GET_STR(cow_dev, dm_dev, name, cow_name, BUFSIZE);
+	GET_VALUE(snap, dm_snapshot, type, type);
+	GET_VALUE(snap, dm_snapshot, chunk_size, chunk_size);
+
+	fprintf(fp, "  orig:%s cow:%s type:%c chunk_size:%llu",
+		orig_name, cow_name, type, chunk_size);
+}
+
+static void
+snapshot_show_status(unsigned long target)
+{
+	int valid;
+	unsigned long snap;
+
+	/* Get the address of struct dm_snapshot */
+	GET_VALUE(target, dm_target, private, snap);
+
+	/* Get snapshot parameters of the dm_snapshot */
+	GET_VALUE(snap, dm_snapshot, valid, valid);
+
+	fprintf(fp, "  vaild:%d", valid);
+}
+
+static void
+snapshot_show_queue(unsigned long target)
+{
+	fprintf(fp, "  No queue info");
+}
+
+static struct dminfo_target_analyzer snapshot_analyzer = {
+	.target_name      = "snapshot",
+	.ready            = snapshot_ready,
+	.show_table       = snapshot_show_table,
+	.show_status      = snapshot_show_status,
+	.show_queue       = snapshot_show_queue
+};
+
+/*
+ * snapshot-origin target
+ */
+static int
+origin_ready(void)
+{
+	return 1;
+}
+
+static void
+origin_show_table(unsigned long target)
+{
+	unsigned long dm_dev;
+	char buf[BUFSIZE];
+
+	/* Get the name of the struct dm_dev */
+	GET_VALUE(target, dm_target, private, dm_dev);
+	GET_STR(dm_dev, dm_dev, name, buf, BUFSIZE);
+
+	fprintf(fp, "  orig_dev:%s", buf);
+}
+
+static void
+origin_show_status(unsigned long target)
+{
+	/* snapshot-origin target has no status */
+	fprintf(fp, "  No status info");
+}
+
+static void
+origin_show_queue(unsigned long target)
+{
+	/* snapshot-origin target has no queue */
+	fprintf(fp, "  No queue info");
+}
+
+static struct dminfo_target_analyzer snapshot_origin_analyzer = {
+	.target_name      = "snapshot-origin",
+	.ready            = origin_ready,
+	.show_table       = origin_show_table,
+	.show_status      = origin_show_status,
+	.show_queue       = origin_show_queue
+};
+
+/*
+ * Core part of dminfo
+ */
+#define DMINFO_LIST   0
+#define DMINFO_DEPS   1
+#define DMINFO_TABLE  2
+#define DMINFO_STATUS 3
+#define DMINFO_QUEUE  4
+
+static int
+dm_core_ready(void)
+{
+	static int debuginfo = 0;
+
+	if (debuginfo)
+		return 1;
+
+	if (STRUCT_EXISTS("struct hash_cell")) {
+		debuginfo = 1;
+		return 1;
+	} else
+		fprintf(fp, "No such struct info: hash_cell\n");
+
+	return 0;
+}
+
+/* Display dependency information of the 'table' */
+static void
+dminfo_show_deps(unsigned long table)
+{
+	int major, minor, count;
+	unsigned long head, next, last, dev, bdev;
+	char buf[BUFSIZE];
+
+	/* head = dm_table.devices */
+	GET_ADDR(table, dm_table, devices, head);
+
+	fprintf(fp, "  %-3s  %-3s  %-16s  %-5s  %s\n",
+		"MAJ", "MIN", "GENDISK", "COUNT", "DEVNAME");
+
+	list_for_each (next, head, last) {
+		/* Get dependency information. (next == struct *dm_dev) */
+		GET_VALUE(next, dm_dev, count, count);
+		GET_VALUE(next, dm_dev, bdev, bdev);
+		GET_VALUE(bdev, block_device, bd_disk, dev);
+		GET_VALUE(dev, gendisk, major, major);
+		GET_VALUE(dev, gendisk, first_minor, minor);
+		GET_STR(dev, gendisk, disk_name, buf, BUFSIZE);
+
+		fprintf(fp, "  %-3d  %-3d  %-16lx  %-5d  %s\n",
+			major, minor, dev, count, buf);
+	}
+}
+
+/*
+ * Display target specific information in the 'table', if the target
+ * analyzer is registered and available.
+ */
+static void
+dminfo_show_details(unsigned long table, unsigned int num_targets, int info_type)
+{
+	unsigned int i;
+	unsigned long head, target_size, target, target_type;
+	struct dminfo_target_analyzer *ta;
+	char buf[BUFSIZE];
+
+	/*
+	 * head = dm_table.targets.
+	 * This is the head of struct dm_target array.
+	 */
+	GET_VALUE(table, dm_table, targets, head);
+	target_size = STRUCT_SIZE("struct dm_target");
+
+	fprintf(fp, "  %-16s  %-11s  %s\n",
+		"TARGET", "TARGET_TYPE", "PRIVATE_DATA");
+
+	for (i = 0; i < num_targets; i++, fprintf(fp, "\n")) {
+		target = head + target_size * i; /* Get next target */
+
+		/* Get target information */
+		GET_VALUE(target, dm_target, type, target_type);
+		GET_PTR_STR(target_type, target_type, name, buf, BUFSIZE);
+
+		fprintf(fp, "  %-16lx  %-11s", target, buf);
+
+		if (!(ta = find_target_analyzer(buf)) || !ta->ready
+			|| !ta->ready())
+			continue;
+
+		switch (info_type) {
+		case DMINFO_TABLE:
+			if (ta->show_table)
+				ta->show_table(target);
+			break;
+		case DMINFO_STATUS:
+			if (ta->show_status)
+				ta->show_status(target);
+			break;
+		case DMINFO_QUEUE:
+			if (ta->show_queue)
+				ta->show_queue(target);
+			break;
+		default:
+			break;
+		}
+	}
+
+	if (i != num_targets)
+		fprintf(fp, " ERROR: targets are less than num_targets:%d",
+			num_targets);
+}
+
+/*
+ * Display lists (and detail information if specified) of existing
+ * dm devices.
+ */
+static void
+dminfo_show_list(int additional_info)
+{
+	int i, major, minor, array_len;
+	unsigned int num_targets;
+	unsigned long _name_buckets, head, next, last, md, dev, table;
+	char buf[BUFSIZE];
+
+	_name_buckets = symbol_value("_name_buckets");
+	array_len = get_array_length("_name_buckets", NULL, 0);
+
+	if (additional_info == DMINFO_LIST)
+		fprintf(fp, "%-3s  %-3s  %-16s  %-16s  %-7s  %s\n",
+			"MAJ", "MIN", "MAP_DEV", "DM_TABLE",
+			"TARGETS", "MAPNAME");
+
+	for (i = 0; i < array_len; i++) {
+		/* head = _name_buckets[i] */
+		head = _name_buckets + (i * SIZE(list_head));
+
+		list_for_each (next, head, last) { /* next == hash_cell */
+			/* Get device and table information */
+			GET_PTR_STR(next, hash_cell, name, buf, BUFSIZE);
+			GET_VALUE(next, hash_cell, md, md);
+			GET_VALUE(md, mapped_device, disk, dev);
+			GET_VALUE(dev, gendisk, major, major);
+			GET_VALUE(dev, gendisk, first_minor, minor);
+			GET_VALUE(md, mapped_device, map, table);
+			GET_VALUE(table, dm_table, num_targets, num_targets);
+
+			if (additional_info != DMINFO_LIST)
+				fprintf(fp, "%-3s  %-3s  %-16s  %-16s  %-7s  %s\n",
+					"MAJ", "MIN", "MAP_DEV", "DM_TABLE",
+					"TARGETS", "MAPNAME");
+
+			fprintf(fp, "%-3d  %-3d  %-16lx  %-16lx  %-7d  %s\n",
+				major, minor, md, table, num_targets, buf);
+
+			switch(additional_info) {
+			case DMINFO_DEPS:
+				dminfo_show_deps(table);
+				break;
+			case DMINFO_TABLE:
+			case DMINFO_STATUS:
+			case DMINFO_QUEUE:
+				dminfo_show_details(table, num_targets,
+					additional_info);
+				break;
+			default:
+				break;
+			}
+
+			if (additional_info != DMINFO_LIST)
+				fprintf(fp, "\n");
+		}
+	}
+}
+
+/*
+ * Display the original bio information for the 'bio'.
+ * If the 'bio' is for dm devices, the original bio information is pointed
+ * by bio.bi_private as struct target_io.
+ */
+static void
+dminfo_show_bio(unsigned long bio)
+{
+	int major, minor;
+	unsigned long target_io, dm_io, dm_bio, md, dev;
+	char buf[BUFSIZE];
+
+	/* Get original bio and device information */
+	GET_VALUE(bio, bio, bi_private, target_io);
+	GET_VALUE(target_io, target_io, io, dm_io);
+	GET_VALUE(dm_io, dm_io, bio, dm_bio);
+	GET_VALUE(dm_io, dm_io, md, md);
+	GET_VALUE(md, mapped_device, disk, dev);
+	GET_VALUE(dev, gendisk, major, major);
+	GET_VALUE(dev, gendisk, first_minor, minor);
+	GET_STR(dev, gendisk, disk_name, buf, BUFSIZE);
+
+	fprintf(fp, "%-16s  %-3s  %-3s  %-16s  %s\n",
+		"DM_BIO_ADDRESS", "MAJ", "MIN", "MAP_DEV", "DEVNAME");
+	fprintf(fp, "%-16lx  %-3d  %-3d  %-16lx  %s\n",
+		dm_bio, major, minor, md, buf);
+}
+
+static void
+cmd_dminfo(void)
+{
+	int c, additional_info = DMINFO_LIST;
+	unsigned long bio;
+
+	if (!dm_core_ready())
+		return;
+
+	/* Parse command line option */
+	while ((c = getopt(argcnt, args, "b:dlqst")) != EOF) {
+		switch(c)
+		{
+		case 'b':
+			bio = stol(optarg, FAULT_ON_ERROR, NULL);
+			dminfo_show_bio(bio);
+			return;
+		case 'd':
+			additional_info = DMINFO_DEPS;
+			break;
+		case 'l':
+			additional_info = DMINFO_LIST;
+			break;
+		case 'q':
+			additional_info = DMINFO_QUEUE;
+			break;
+		case 's':
+			additional_info = DMINFO_STATUS;
+			break;
+		case 't':
+			additional_info = DMINFO_TABLE;
+			break;
+		default:
+			argerrs++;
+			break;
+		}
+	}
+
+	if (argerrs)
+		cmd_usage(pc->curcmd, SYNOPSIS);
+
+	dminfo_show_list(additional_info);
+}
+
+/*
+ * dminfo help
+ */
+static char *help_dminfo[] = {
+	"dminfo",				/* command name */
+	"device mapper (dm) information",	/* short description */
+	"[-b bio | -d | -l | -q | -s | -t]",	/* argument synopsis */
+	"  This command displays information about device-mapper mapped ",
+        "  devices (dm devices).",
+        "  If no argument is entered, displays lists of existing dm devices.",
+        "  It's same as -l option.",
+	"",
+	"    -b bio  displays the information of the dm device which the bio",
+	"            is submitted in.  If the bio isn't for dm devices,",
+	"            results will be error.",
+	"        -d  displays dependency information for existing dm devices.",
+	"        -l  displays lists of existing dm devices.",
+	"        -q  displays queued I/O information for each target of",
+	"            existing dm devices.",
+	"        -s  displays status information for each target of existing",
+	"            dm devices.",
+	"        -t  displays table information for each target of existing",
+	"            dm devices.",
+	"",
+	"EXAMPLE",
+	"  Display lists of dm devices.  \"MAP_DEV\" is the address of the",
+	"  struct mapped_device.  \"DM_TABLE\" is the address of the struct",
+	"  dm_table.  \"TARGETS\" is the number of targets which are in",
+	"  the struct dm_table.",
+	"",
+	"    %s> dminfo",
+	"    MAJ  MIN  MAP_DEV           DM_TABLE          TARGETS  MAPNAME",
+	"    253  8    c4866c80          c4866280          1        vg0-snap0",
+	"    253  6    f6a04a80          f6a04580          1        vg0-lv0-real",
+	"    253  0    c4840380          c4841880          1        mp0",
+	"    253  5    f7c50c80          c488e480          1        via_cbeheddbdd",
+	"    253  7    c4866a80          c4866380          1        vg0-snap0-cow",
+	"    253  4    d441e280          c919ed80          1        dummy1",
+	"    253  3    f5dc4280          cba81d80          1        dummy0",
+	"    253  2    f7c53180          c4866180          1        vg0-lv0",
+	"    253  1    f746d280          f746cd80          1        mp0p1",
+	"",
+	"  Display the dm device information which the bio is submitted in.",
+	"  The bio (ceacee80) is a clone of the bio (ceacee00) which is",
+	"  submitted in the dm-3 (dummy0).  And the bio (ceacee00) is a clone",
+	"  of the bio (ceaced80) which is submitted in the dm-4 (dummy1), too.",
+	"  The bio (ceaced80) is the original bio.",
+	"",
+	"    %s> dminfo -b ceacee80",
+	"    DM_BIO_ADDRESS    MAJ  MIN  MAP_DEV           DEVNAME",
+	"    ceacee00          253  3    f5dc4280          dm-3",
+	"    crash> dminfo -b ceacee00",
+	"    DM_BIO_ADDRESS    MAJ  MIN  MAP_DEV           DEVNAME",
+	"    ceaced80          253  4    d441e280          dm-4",
+	"    crash> dminfo -b ceaced80",
+	"    dminfo: invalid kernel virtual address: 64  type: \"GET_VALUE: dm_io.bio\"",
+	"",
+	"  Display dependency information for each target.",
+	"  The vg0-snap0 depends on thd dm-6 (vg0-lv0-real) and the dm-7",
+	"  (vg0-snap0-cow)",
+	"",
+	"    %s> dminfo -d",
+	"    MAJ  MIN  MAP_DEV           DM_TABLE          TARGETS  MAPNAME",
+	"    253  8    c4866c80          c4866280          1        vg0-snap0",
+	"      MAJ  MIN  GENDISK           COUNT  DEVNAME",
+	"      253  7    c4866980          1      dm-7",
+	"      253  6    f6a04280          1      dm-6",
+	"",
+	"    MAJ  MIN  MAP_DEV           DM_TABLE          TARGETS  MAPNAME",
+	"    253  6    f6a04a80          f6a04580          1        vg0-lv0-real",
+	"      MAJ  MIN  GENDISK           COUNT  DEVNAME",
+	"      8    0    f7f24c80          1      sda",
+	"",
+	"    MAJ  MIN  MAP_DEV           DM_TABLE          TARGETS  MAPNAME",
+	"    253  7    c4866a80          c4866380          1        vg0-snap0-cow",
+	"      MAJ  MIN  GENDISK           COUNT  DEVNAME",
+	"      8    0    f7f24c80          1      sda",
+	"",
+	"    MAJ  MIN  MAP_DEV           DM_TABLE          TARGETS  MAPNAME",
+	"    253  2    f7c53180          c4866180          1        vg0-lv0",
+	"      MAJ  MIN  GENDISK           COUNT  DEVNAME",
+	"      253  6    f6a04280          1      dm-6",
+	"",
+	"  Display queued I/O information for each target.",
+	"  The information is displayed under the \"PRIVATE_DATA\" column.",
+	"",
+	"    %s> dminfo -q",
+	"    MAJ  MIN  MAP_DEV           DM_TABLE          TARGETS  MAPNAME",
+	"    253  5    f7c50c80          c488e480          1        via_cbeheddbdd",
+	"      TARGET            TARGET_TYPE  PRIVATE_DATA",
+	"      f8961080          mirror       (reads) (writes) (quiesced) (recovered)",
+	"",
+	"      --------------------------------------------------------------",
+	"       \"reads/writes\" are members of the struct mirror_set, and",
+	"       \"quiesced/recovered\" are members of the struct region_hash.",
+	"       If the list is empty, the member is bracketed by \"()\".",
+	"      --------------------------------------------------------------",
+	"",
+	"    MAJ  MIN  MAP_DEV           DM_TABLE          TARGETS  MAPNAME",
+	"    253  0    c4840380          c4841880          1        mp0",
+	"      TARGET            TARGET_TYPE  PRIVATE_DATA",
+	"      f8802080          multipath    queue_size:0",
+	"",
+	"    MAJ  MIN  MAP_DEV           DM_TABLE          TARGETS  MAPNAME",
+	"    253  1    f746d280          f746cd80          1        mp0p1",
+	"      TARGET            TARGET_TYPE  PRIVATE_DATA",
+	"      f8821080          linear       No queue info",
+	"",
+	"  Display status information for each target.",
+	"  The information is displayed under the \"PRIVATE_DATA\" column.",
+	"",
+	"    %s> dminfo -s",
+	"    MAJ  MIN  MAP_DEV           DM_TABLE          TARGETS  MAPNAME",
+	"    253  0    c4840380          c4841880          1        mp0",
+	"      TARGET            TARGET_TYPE  PRIVATE_DATA",
+	"      f8802080          multipath    queue_if_no_path:0 hwh:none nr_pgs:1",
+	"        PG  PG_STATUS  NR_PATHS  PATHS",
+	"        1   active     2         8:16(A,0) 8:32(A,0)",
+	"",
+	"      --------------------------------------------------------------",
+	"       Format of \"PATHS\": <major>:<minor>(<status>,<fail_count>)",
+	"         Status: A:active, F:faulty",
+	"         Fail_count: the value of the struct pgpath.fail_count",
+	"      --------------------------------------------------------------",
+	"",
+	"    MAJ  MIN  MAP_DEV           DM_TABLE          TARGETS  MAPNAME",
+	"    253  5    f7c50c80          c488e480          1        via_cbeheddbdd",
+	"      TARGET            TARGET_TYPE  PRIVATE_DATA",
+	"      f8961080          mirror       in_sync:1 dev:8:16(A,0),8:32(A,0)",
+	"",
+	"      --------------------------------------------------------------",
+	"       Format of \"dev\": <major>:<minor>(<status>,<error_count>)",
+	"         Status: A:active, D:degraded",
+	"         Error_count: the value of the struct mirror.error_count",
+	"      --------------------------------------------------------------",
+	"",
+	"    MAJ  MIN  MAP_DEV           DM_TABLE          TARGETS  MAPNAME",
+	"    253  1    f746d280          f746cd80          1        mp0p1",
+	"      TARGET            TARGET_TYPE  PRIVATE_DATA",
+	"      f8821080          linear       No status info",
+	"",
+	"  Display table information for each target.",
+	"  The information is displayed under the \"PRIVATE_DATA\" column.",
+	"",
+	"    %s> dminfo -t",
+	"    MAJ  MIN  MAP_DEV           DM_TABLE          TARGETS  MAPNAME",
+	"    253  8    c4866c80          c4866280          1        vg0-snap0",
+	"      TARGET            TARGET_TYPE  PRIVATE_DATA",
+	"      f89b4080          snapshot     orig:253:6 cow:253:7 type:P chunk_size:16",
+	"",
+	"    MAJ  MIN  MAP_DEV           DM_TABLE          TARGETS  MAPNAME",
+	"    253  6    f6a04a80          f6a04580          1        vg0-lv0-real",
+	"      TARGET            TARGET_TYPE  PRIVATE_DATA",
+	"      f890f080          linear       begin:0 len:204800 dev:8:5 offset:384",
+	"",
+	"    MAJ  MIN  MAP_DEV           DM_TABLE          TARGETS  MAPNAME",
+	"    253  0    c4840380          c4841880          1        mp0",
+	"      TARGET            TARGET_TYPE  PRIVATE_DATA",
+	"      f8802080          multipath    queue_if_no_path:0 hwh:none nr_pgs:1",
+	"        PG  PATH_SELECTOR  NR_PATHS  PATHS",
+	"        1   round-robin    2         8:16 8:32",
+	"",
+	"    MAJ  MIN  MAP_DEV           DM_TABLE          TARGETS  MAPNAME",
+	"    253  5    f7c50c80          c488e480          1        via_cbeheddbdd",
+	"      TARGET            TARGET_TYPE  PRIVATE_DATA",
+	"      f8961080          mirror       log:core dev:8:16(0),8:32(0)",
+	"",
+	"      --------------------------------------------------------------",
+	"       Format of \"dev\": <major>:<minor>(<offset>)",
+	"         Offset: the value of the struct mirror.offset",
+	"      --------------------------------------------------------------",
+	"",
+	"    MAJ  MIN  MAP_DEV           DM_TABLE          TARGETS  MAPNAME",
+	"    253  7    c4866a80          c4866380          1        vg0-snap0-cow",
+	"      TARGET            TARGET_TYPE  PRIVATE_DATA",
+	"      f899d080          linear       begin:0 len:8192 dev:8:5 offset:205184",
+	"",
+	"    MAJ  MIN  MAP_DEV           DM_TABLE          TARGETS  MAPNAME",
+	"    253  2    f7c53180          c4866180          1        vg0-lv0",
+	"      TARGET            TARGET_TYPE  PRIVATE_DATA",
+	"      f8bbc080          snapshot-origin  orig_dev:253:6",
+	"",
+	"    MAJ  MIN  MAP_DEV           DM_TABLE          TARGETS  MAPNAME",
+	"    253  1    f746d280          f746cd80          1        mp0p1",
+	"      TARGET            TARGET_TYPE  PRIVATE_DATA",
+	"      f8821080          linear       begin:0 len:2040192 dev:253:0 offset:63",
+	NULL
+};
+
+/*
+ * Registering command extension
+ */
+
+static struct command_table_entry command_table[] = {
+	{"dminfo", cmd_dminfo, help_dminfo, 0},
+	{NULL, NULL, NULL, 0},
+};
+
+int _init(void)
+{
+	register_extension(command_table);
+
+	dminfo_register_target_analyzer(&zero_analyzer);
+	dminfo_register_target_analyzer(&error_analyzer);
+	dminfo_register_target_analyzer(&linear_analyzer);
+	dminfo_register_target_analyzer(&mirror_analyzer);
+	dminfo_register_target_analyzer(&multipath_analyzer);
+	dminfo_register_target_analyzer(&crypt_analyzer);
+	dminfo_register_target_analyzer(&stripe_analyzer);
+	dminfo_register_target_analyzer(&snapshot_analyzer);
+	dminfo_register_target_analyzer(&snapshot_origin_analyzer);
+
+	return 0;
+}
+
+int _fini(void)
+{
+	return 0;
+}
--- crash/extensions/echo.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/extensions/echo.c	2007-11-09 16:57:28.000000000 -0500
@@ -0,0 +1,114 @@
+/* echo.c - simple example of a crash extension
+ *
+ * Copyright (C) 2001, 2002 Mission Critical Linux, Inc.
+ * Copyright (C) 2002, 2003, 2004, 2005 David Anderson
+ * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "defs.h"    /* From the crash source top-level directory */
+
+int _init(void);
+int _fini(void);
+
+void cmd_echo(void);     /* Declare the commands and their help data. */
+char *help_echo[];
+
+static struct command_table_entry command_table[] = {
+	{ "echo", cmd_echo, help_echo, 0 },    /* One or more commands, */
+	{ NULL }                               /* terminated by NULL, */
+};
+
+
+int 
+_init(void) /* Register the command set. */
+{ 
+        register_extension(command_table);
+	return 1;
+}
+ 
+/* 
+ *  The _fini() function is called if the shared object is unloaded. 
+ *  If desired, perform any cleanups here. 
+ */
+int 
+_fini(void) 
+{ 
+	return 1;
+}
+
+
+/* 
+ *  Arguments are passed to the command functions in the global args[argcnt]
+ *  array.  See getopt(3) for info on dash arguments.  Check out defs.h and
+ *  other crash commands for usage of the myriad of utility routines available
+ *  to accomplish what your task.
+ */
+void
+cmd_echo(void)
+{
+        int c;
+
+        while ((c = getopt(argcnt, args, "")) != EOF) {
+                switch(c)
+                {
+                default:
+                        argerrs++;
+                        break;
+                }
+        }
+
+        if (argerrs)
+                cmd_usage(pc->curcmd, SYNOPSIS);
+
+        while (args[optind]) 
+                fprintf(fp, "%s ", args[optind++]);
+
+        fprintf(fp, "\n");
+}
+
+/* 
+ *  The optional help data is simply an array of strings in a defined format.
+ *  For example, the "help echo" command will use the help_echo[] string
+ *  array below to create a help page that looks like this:
+ * 
+ *    NAME
+ *      echo - echoes back its arguments
+ *
+ *    SYNOPSIS
+ *      echo arg ...
+ *
+ *    DESCRIPTION
+ *      This command simply echoes back its arguments.
+ *
+ *    EXAMPLE
+ *      Echo back all command arguments:
+ *
+ *        crash> echo hello, world
+ *        hello, world
+ *
+ */
+ 
+char *help_echo[] = {
+        "echo",                        /* command name */
+        "echoes back its arguments",   /* short description */
+        "arg ...",                     /* argument synopsis, or " " if none */
+ 
+        "  This command simply echoes back its arguments.",
+        "\nEXAMPLE",
+        "  Echo back all command arguments:\n",
+        "    crash> echo hello, world",
+        "    hello, world",
+        NULL
+};
+
+
--- crash/extensions/sial.mk.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/extensions/sial.mk	2007-10-30 11:13:30.000000000 -0400
@@ -0,0 +1,17 @@
+#
+ifeq ($(TARGET), PPC64)
+        TARGET_FLAGS = -D$(TARGET) -m64
+else
+        TARGET_FLAGS = -D$(TARGET)
+endif
+
+all: sial.so
+
+lib-sial: 
+	cd libsial && make
+        
+sial.so: ../defs.h sial.c lib-sial
+	gcc -g -I.. -Ilibsial -I../gdb-6.1/bfd -I../gdb-6.1/include -I../gdb-6.1/gdb -I../gdb-6.1/gdb/config -nostartfiles -shared -rdynamic -o sial.so sial.c -fPIC $(TARGET_FLAGS) -Llibsial -lsial 
+
+clean:
+	cd libsial && make clean
--- crash/extensions/libsial/sial_define.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/extensions/libsial/sial_define.c	2007-10-30 11:13:53.000000000 -0400
@@ -0,0 +1,519 @@
+/*
+ * Copyright 2001 Silicon Graphics, Inc. All rights reserved.
+ */
+#include <string.h>
+#include <stdio.h>
+#include "sial.h"
+/*
+	This set of functions handle #define for simple constant or macros.
+	We read from the current parser input strem untill end of line.
+
+	The big thing is that we need to do some parsing to get the deinf names
+	and parameters. Also at the time of the macro instanciation, we need to parse
+	the parameters again. That leads to a more complex package...
+*/
+
+#define MAXP 20
+typedef struct mac_s {
+
+	char *name;		/* this macro name */
+	int np;			/* number of parameters */
+	int issub;		/* subs have to be threated differently */
+	char **p;		/* parameters names */
+	char *buf;		/* text for the macro itself */
+	struct mac_s *next;	/* next on the list */
+	srcpos_t pos;
+
+} mac_t;
+
+typedef struct {
+	mac_t *m;
+	char **subs;
+} smac_t;
+
+static mac_t* macs=0;
+
+/* we have to add a space at the end of the value
+   Again, this is to prevent a EOF on the parsing stream */
+def_t*
+sial_add_def(def_t*d, char*name, char*val)
+{
+def_t*nd=sial_alloc(sizeof(def_t));
+char *buf=sial_alloc(strlen(val)+2);
+
+	strcpy(buf, val);
+	strcat(buf, " ");
+	sial_free(val);
+	nd->name=name;
+	nd->val=buf;
+	nd->next=d;
+	return nd;
+}
+
+/* search for a macro is the current list */
+mac_t * 
+sial_getmac(char *name, int takeof)
+{
+mac_t *m;
+mac_t *prev=0;
+mac_t *sial_getcurmac(void);
+
+	if(takeof || !(m=sial_getcurmac())) m=macs;
+
+	for(; m; m=m->next) {
+
+		if( !strcmp(m->name, name) ) {
+
+			if(takeof) {
+
+				if(!prev) macs=m->next;
+				else prev->next=m->next;
+
+			}
+			return m;
+		}
+		prev=m;
+	}
+	return 0;
+}
+
+node_t*
+sial_macexists(node_t*var)
+{
+char *name=NODE_NAME(var);
+int val;
+
+	if(sial_getmac(name, 0)) val=1;
+	else val=0;
+	return sial_makenum(B_UL, val);
+}
+static void
+sial_freemac(mac_t*m)
+{
+int i;
+
+	for(i=0;i<m->np;i++) sial_free(m->p[i]);
+	if(m->np) sial_free(m->p);
+	sial_free(m);
+}
+
+/*
+	These are called at 2 different points.
+	One call at the very begining. One call for each file.
+*/
+void* sial_curmac(void) { return macs; }
+
+void
+sial_flushmacs(void *vtag)
+{
+mac_t *m, *next;
+mac_t *tag=(mac_t *)vtag;
+
+	for(m=macs; m!=tag; m=next) {
+
+		next=m->next;
+		sial_freemac(m);
+	}
+	macs=m;
+}
+
+/* this function is called to register a new macro.
+   The text associated w/ the macro is still on the parser stream.
+   Untill eol.
+*/
+void
+sial_newmac(char *mname, char *buf, int np, char **p, int silent)
+{
+char *p2;
+mac_t *m;
+
+	{
+		char *p=buf+strlen(buf)-1;
+
+		/* eliminate trailing blanks */
+		while(*p && (*p==' ' || *p=='\t')) p--;
+		*(p+1)='\0';
+
+		/* eliminate leading blanks */
+		p=buf;
+		while(*p && (*p==' ' || *p=='\t')) p++;
+
+		/* copy and append a space. This is to prevent unloading of the
+	   	macro before the sial_chkvarmac() call as been performed */
+		p2=sial_alloc(strlen(p)+2);
+		strcpy(p2, p);
+		sial_free(buf);
+		p2[strlen(p2)+1]='\0';
+		p2[strlen(p2)]=' ';
+		buf=p2;
+	}
+
+	if((m=sial_getmac(mname, 1)) && strcmp(m->buf, buf)) {
+
+		/* when processing the compile options, be silent. */
+		if(!silent) {
+
+			sial_warning("Macro redefinition '%s' with different value_t\n"
+				"value_t=[%s]\n"
+				"Previous value_t at %s:%d=[%s]\n"
+				, mname, buf, m->pos.file, m->pos.line, m->buf);
+		}
+
+	}
+	m=(mac_t*)sial_alloc(sizeof(mac_t));
+	m->name=sial_strdup(mname);
+	m->np=np;
+	m->p=p;
+	m->buf=buf;
+	m->next=macs;
+	m->issub=0;
+	sial_setpos(&m->pos);
+	macs=m;
+}
+
+/* this function is called by the enum declaration function and
+   when a enum type is extracted from the image to push a set
+   of define's onto the stack, that correspond to each identifier 
+   in the enum.
+*/
+void
+sial_pushenums(enum_t *et)
+{
+	while(et) {
+
+		char *buf=sial_alloc(40);
+
+		sprintf(buf, "%d", et->value);
+		sial_newmac(et->name, buf, 0, 0, 0);
+		et=et->next;
+	}
+}
+
+static void
+sial_skipcomment(void)
+{
+int c;
+
+	while((c=sial_input())) {
+
+		if(c=='*') {
+
+			int c2;
+
+			if((c2=sial_input())=='/') return;
+			sial_unput(c2);
+		}
+	}
+}
+
+static void
+sial_skipstr(void)
+{
+int c;
+
+	while((c=sial_input())) {
+
+		if(c=='\\') sial_input();
+		else if(c=='"') return;
+	}
+}
+
+
+/* skip over strings and comment to a specific chracter */
+static void
+sial_skipto(int x)
+{
+int c;
+
+	while((c=sial_input())) {
+
+		if(c==x) return;
+
+		switch(c) {
+
+			case '\\':
+			sial_input();
+			break;
+
+			case '"':
+			sial_skipstr();
+			break;
+
+			case '/': {
+
+				int c2;
+
+				if((c2=sial_input())=='*') {
+
+					sial_skipcomment();
+
+				} else sial_unput(c2);
+			}
+			break;
+
+			case '(':
+
+				sial_skipto(')');
+			break;
+
+			case ')':
+			sial_error("Missing parameters to macro");
+			break;
+		}
+
+	}
+
+	sial_error("Expected '%c'", x);
+}
+
+
+/*
+   This function gets called when the buffer for a macro as been fully
+   parsed. We need to take the associated parameter substitution macros
+   of of the stack and deallocate associated data.
+*/
+static void
+sial_popmac(void *vsm)
+{
+smac_t *sm=(smac_t *)vsm;
+int i;
+
+	for(i=0;i<sm->m->np;i++) {
+
+		mac_t *m=sial_getmac(sm->m->p[i], 1);
+
+		if(!m) sial_error("Oops macro pop!");
+		sial_free(m->buf);
+		sial_free(m->name);
+		sial_free(m);
+	}
+	sial_free(sm->subs);
+	sial_free(sm);
+}
+
+/* 
+
+  need to get the actual parameters from the parser stream.
+  This can be simple variable or complex multiple line expressions
+  with strings and commants imbedded in them... 
+
+*/
+static int 
+sial_pushmac(mac_t *m)
+{
+int i;
+char **subs=sial_alloc(sizeof(char*)*m->np);
+smac_t *sm;
+int siallex(void);
+
+	/* the next token should be a '(' */
+	if(siallex() != '(') {
+
+		sial_error("Expected '(' after '%s'", m->name);
+		
+	}
+
+	/* get the parameters */
+	for(i=0;i<m->np;i++) {
+
+		char *p=sial_cursorp();
+		int nc;
+
+		if(i<m->np-1) sial_skipto(',');
+		else sial_skipto(')');
+
+		nc=sial_cursorp()-p-1;
+		subs[i]=sial_alloc(nc+2);
+		strncpy(subs[i], p, nc);
+		subs[i][nc]=' ';
+		subs[i][nc+1]='\0';
+	}
+
+	/* take care of the macro() case. ex: IS_R10000()i.e. no parms */
+	if(!m->np) 
+		sial_skipto(')');
+
+	sm=sial_alloc(sizeof(smac_t));
+
+	sm->m=m;
+	sm->subs=subs;
+
+	/* we push the associated buffer on the stream */
+	sial_pushbuf(m->buf, 0, sial_popmac, sm, 0);
+
+	/* we push the subs onto the macro stack */
+	for(i=0;i<m->np;i++) {
+
+		mac_t *pm=sial_alloc(sizeof(mac_t));
+
+		pm->name=sial_alloc(strlen(m->p[i])+1);
+		strcpy(pm->name, m->p[i]);
+		pm->np=0;
+		pm->p=0;
+		pm->buf=subs[i];
+		pm->next=macs;
+		pm->issub=1;
+		macs=pm;
+	}
+	return 1;
+	
+}
+
+
+/*
+	This one is called from the lexer to check if a 'var' is to be substituted for
+	a macro
+*/
+int
+sial_chkmacvar(char *mname)
+{
+mac_t *m;
+
+	if((m=sial_getmac(mname, 0))) {
+
+
+		/* simple constant ? */
+		if(!m->p) {
+
+			sial_pushbuf(m->buf, 0, 0, 0, m->issub ? m->next : 0);
+
+		} else {
+			return sial_pushmac(m);
+		}
+		return 1;
+
+	}
+	return 0;
+
+}
+
+/*
+	Skip an unsupported preprocessor directive.
+*/
+void
+sial_skip_directive(void)
+{
+	sial_free(sial_getline());
+}
+
+void
+sial_undefine(void)
+{
+int c;
+int i=0;
+char mname[MAX_SYMNAMELEN+1];
+mac_t *m;
+
+	/* skip all white spaces */
+	while((c=sial_input()) == ' ' || c == '\t') if(c=='\n' || !c) {
+
+		sial_error("Macro name expected");
+	}
+
+	mname[i++]=c;
+
+	/* get the constant or macro name */
+	while((c=sial_input()) != ' ' && c != '\t') {
+
+		if(c=='\n' || !c) break;
+		if(i==MAX_SYMNAMELEN) break;
+		mname[i++]=c;
+	}
+	mname[i]='\0';
+	if((m=sial_getmac(mname, 1))) sial_freemac(m);
+        else sial_addneg(mname);
+}
+
+/*
+	This one is called from the lexer after #define as been detected 
+*/
+void
+sial_define(void)
+{
+int c;
+int i=0;
+char mname[MAX_SYMNAMELEN+1];
+
+	/* skip all white spaces */
+	while((c=sial_input()) == ' ' || c == '\t') if(c=='\n' || !c) goto serror;
+
+	mname[i++]=c;
+
+	/* get the constant or macro name */
+	while((c=sial_input()) != ' ' && c != '\t' && c != '(') {
+
+		if(c=='\n' || !c) break;
+
+		if(i==MAX_SYMNAMELEN) break;
+
+		mname[i++]=c;
+	}
+	mname[i]='\0';
+
+	/* does this macro have paraneters */
+	/* If so, '(' will be right after name of macro. No spaces. */
+	if(c=='(') {
+
+		int np, nc, done;
+		char **pnames;
+		char curname[MAX_SYMNAMELEN+1];
+
+		np=nc=done=0;
+		pnames=(char **)sial_alloc(sizeof(char*)*MAXP);
+		
+		while(!done) {
+
+			c=sial_input();
+
+			switch(c) {
+				case '\n': case 0:
+				goto serror;
+
+				/* continuation */
+				case '\\':
+				if(sial_input()!='\n') goto serror;
+				break;
+
+				case ',':
+				if(!nc) goto serror;
+last:
+				curname[nc]='\0';
+				pnames[np]=sial_alloc(strlen(curname)+1);
+				strcpy(pnames[np], curname);
+				nc=0;
+				np++;
+				break;
+
+				case ')':
+				done=1;
+				if(nc) goto last;
+				break;
+
+				case ' ':
+				case '\t':
+				break;
+
+				default:
+				curname[nc++]=c;
+				break;
+			}
+		}
+		sial_newmac(mname, sial_getline(), np, pnames, 0);
+		return;
+
+	} else if(c == '\n') {
+
+		/* if nothing speciied then set to "1" */
+		sial_newmac(mname, sial_strdup("1"), 0, 0, 0);
+
+	} else {
+
+		sial_newmac(mname, sial_getline(), 0, 0, 0);
+	}
+		
+	return;
+
+serror:
+
+	sial_error("Syntax error on macro definition");
+}
--- crash/extensions/libsial/sialpp.l.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/extensions/libsial/sialpp.l	2007-09-24 16:07:06.000000000 -0400
@@ -0,0 +1,85 @@
+%{
+/*
+ * Copyright 2001 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#define YY_NO_UNPUT
+%}
+
+%{
+#include <string.h>
+
+#ifdef linux
+#define YY_INPUT(buf,result,max_size) \
+{ \
+	int c = sial_input(); \
+	result = (c == EOF) ? YY_NULL : (buf[0] = c, 1); \
+}
+#endif
+
+#include	"sial.h"
+#include	"sialpp.tab.h"
+#if linux
+#define yylval sialpplval
+#endif
+
+#define retok(t) return(t)
+int nomacs=0;
+extern int sial_chkmacvar(char *);
+extern node_t *sial_newchar(void);
+%}
+
+ABC		[a-zA-Z_]
+ABCN		[a-zA-Z0-9_]
+N		[0-9]
+X		[0-9a-fA-F]
+
+%%
+
+[ \t\n]+	{ ; }
+
+"defined"	{ retok(P_DEFINED); }
+"&&"		{ retok(P_BAND); }
+"||"		{ retok(P_BOR); }
+"<"		{ retok(P_LT); }
+"<="		{ retok(P_LE); }
+"=="		{ retok(P_EQ); }
+">="		{ retok(P_GE); }
+">"		{ retok(P_GT); }
+"!="		{ retok(P_NE); }
+"|"		{ retok(P_OR); }
+"!"		{ retok(P_NOT); }
+"^"		{ retok(P_XOR); }
+">>"		{ retok(P_SHR); }
+"<<"		{ retok(P_SHL); }
+"+"		{ retok(P_ADD); }
+"-"		{ retok(P_SUB); }
+"/"		{ retok(P_DIV); }
+"%"		{ retok(P_MOD); }
+"*"		{ retok(P_MUL); }
+
+(("0x"+){X}+[lL]*|{N}+[lL]*)	{ yylval.n = sial_newnum(yytext); retok(P_NUMBER); }
+
+{ABC}{ABCN}*	{ 
+				if(strlen(yytext) > MAX_SYMNAMELEN) {
+
+					sial_error("Symbol name too long");
+				}
+				if(nomacs || !sial_chkmacvar(yytext)) {
+
+					yylval.n = sial_newvnode(yytext); 
+					retok(P_VAR);
+				}
+		}
+
+\'.\'		{ yylval.n = sial_makenum(B_SC, yytext[1]); retok(P_NUMBER); }
+\'\\.\'		{ yylval.n = sial_makenum(B_SC, sial_getseq(yytext[2])); retok(P_NUMBER); }
+
+
+.		{ retok(yylval.i = yytext[0]); }
+
+%%
+#undef input
+#undef unput
+#define input()       sial_input()
+#define unput(c)      sial_unput(c)
--- crash/extensions/libsial/sial_node.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/extensions/libsial/sial_node.c	2007-09-24 16:07:06.000000000 -0400
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2001 Silicon Graphics, Inc. All rights reserved.
+ */
+/*
+	These function are use to allocate a new node.
+	It's a layer between the type specific functions and the parser.
+*/
+#include "sial.h"
+#include <setjmp.h>
+
+/*
+	Allocate a new node structure
+*/
+node_t*
+sial_newnode()
+{
+node_t*n;
+
+	n = (node_t*) sial_calloc(sizeof(node_t));
+	TAG(n);
+	return n;
+}
+
+void
+sial_free_siblings(node_t*ni)
+{
+	while(ni) {
+
+		node_t*next=ni->next;
+
+		NODE_FREE(ni);
+
+		ni=next;
+	}
+}
+
+/*
+	This function is called du ring compile time
+	to exevaluate constant expression, like sizeof() and
+	array sizes and enum constant.
+*/
+value_t *
+sial_exenode(node_t*n)
+{
+value_t *v;
+int *exval;
+jmp_buf exitjmp;
+void *sa;
+srcpos_t p;
+
+	sial_curpos(&n->pos, &p);
+	sa=sial_setexcept();
+
+	if(!setjmp(exitjmp)) {
+
+		sial_pushjmp(J_EXIT, &exitjmp, &exval);
+		v=NODE_EXE(n);
+		sial_rmexcept(sa);
+		sial_popjmp(J_EXIT);
+
+	} else {
+
+		sial_rmexcept(sa);
+		return 0;
+
+	}
+	sial_curpos(&p, 0);
+	return v;
+}
--- crash/extensions/libsial/README.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/extensions/libsial/README	2007-09-24 16:07:06.000000000 -0400
@@ -0,0 +1,1024 @@
+
+               SIAL : Simple Image Access Language
+               ===================================
+
+    Sial is a C interpreter that permits easy access to the symbol
+and type information stored in a executable image like a coredump or live
+memory interfaces (e.g. /dev/kmem, /dev/mem). It support a FULL C
+syntax and the same variable and function scope and type. Symbols and
+type information in the image become standard variables and types in
+the sial scripts's context.
+
+    This README focuses on the differences between sial and a C compiler
+so, for C syntax information, please refer to a C reference manual. I
+also explain the mechanisms of the API that allow sial to be inserted
+into any debugging tool that deals with objects and can furnish symbol
+and type information to sial through the API. The more specific lcrash
+sial implementation is described and a howto on creating commands is
+also given here.
+
+Preprocessor commands
+---------------------
+
+	All preprocessor commands I knew of are supported.  #define,
+	#undef, #ifdef, #if, #ifndef, #else, #elif, #endif and #include.
+
+	This one is ignored:  #ident #pragma
+
+	Sial has a builtin secondary parser for preprocessor expression
+	evaluation.
+
+Symbols
+-------
+
+	The symbols from the system image and their associated value
+	are available from within sial. Since, most of the time, no
+	type information is associated with the symbols, a reference to
+	a symbol return the actual address of the symbol within the
+	system image. So you might say that sial adds a level of
+	reference for symbol information. Let's say there is a (int)
+	symbol called "nproc" that contains the number of processes
+	currently running on the system. To get the value of nproc from
+	sial one would have to write something like this:
+
+        void
+        showprocs()
+        {
+        int i;
+        int np;
+
+                np=*(int*)nproc;
+
+                for(i=0;i<np;i++) {
+
+                        do something...
+                }
+        }
+
+	Here you see that we threat nproc as a pointer to a void and
+	cast it to a int*.  More precisely, since no type information
+	is available each symbol should be treated as a void* pointer
+	and cast to the proper type when referenced.
+
+	A second example is the case of a pointer. Let's say that
+	symbol procs is a pointer to a 'struct proc' type. Then from
+	sial it should be treated as a 'struct proc **' not a 'struct
+	proc *'.
+
+        void
+        showprocs()
+        {
+        struct proc* p;
+
+                for(p=*(struct proc**)procs; p; p=p->p_next)
+
+                        do something...
+                }
+        }
+
+Variable Initialization
+--------------------
+
+	Variable assignment at the time of declaration is supported.
+	Also, the function __init() will be executed, if it is defined,
+	right after the macro file as been compiled.
+
+	Using an uinitialized variable will generate a run time error.
+
+Variable types
+--------------
+
+	All types made available from the API can be used. 
+	These are the types already defined in the executable image.
+
+	Floating point types are not supported at this time. I have no
+	plan to support it.
+
+	Declaration of arrays is not supported. To access a array from
+	the image, use a pointer.
+
+	Unions and structures type declarations are supported and can be
+	used to create additional types that become available within
+	the same macro file.
+
+	Typedef are supported.
+
+	Function pointers are not supported (use 'string' type instead,
+	see "Operators" below)
+
+	Sial defines a 'string' types to handle ansi C strings within
+	the interpreter. This string type also support some of the
+	operator (+, ==, =, !=, etc... see below)
+
+Variable Scope
+--------------
+
+	All symbols available in the system image become global
+	variable in the sial context.
+
+	Variable declared within sial can be given one of 3 different
+	scopes like in normal C.
+
+	GLOBAL:  A variable that is declared outside a
+	function that is not marked as static. this variable if
+	available to all macros that have been load into the
+	interpreter.
+
+	Ex:  file1:
+
+	    int global; int func() { }
+
+        file2:
+
+            func2()
+            {
+			int i;
+
+				i=global;
+			}
+
+	NB: since sial currently validates variable existence only at
+	run time there is no need to declare a 'global' as an 'extern'
+	variable. At run time, if none of the currently loaded macros
+	define a global variable called 'global' then 'i=global' will
+	fail with a 'unknown variable' error.
+
+	FILE:  A Variable that is declared outside any
+	functions that is tagged with the static keyword. This
+	variables is available to all functions defined in the
+	same macro file.
+
+        Ex:
+
+        file1:
+
+		static int maxproc=100;
+		sraruc int array;
+
+		__init() 
+		{
+		int i;
+		
+			for(i=0;i<10;i++) array[i]=-1;
+			
+		}
+
+		void func1()
+		{
+		int i;
+
+				for(i=0;i<maxproc;i++) ...
+		}
+		int getmaxproc() { return maxproc; }
+
+		__init() is used to initialized the static 'array'.
+		Array is a dynamic array. See below.
+		Both func1() and getmaxproc() use 'maxproc'.
+		getmaxproc() makes the local static variable 'maxproc'
+		available to other, external functions.
+
+	BLOCK: A variable that is declared at the start of a statement
+	block. This variable is available only within this statement
+	block.
+
+        Ex:
+		func()
+		{
+		int i;
+
+				if(some condition) {
+
+						int i;
+
+				}
+		}
+
+    FUNCTION PARAMETER:
+
+        Same scope as BLOCK, for this function.
+
+Storage classes
+---------------
+
+	In the context of sial the register or volatile storage classes
+	do not make any sense and are ignored by the interpreter.
+
+	Like in true C, a variable declared within a function will be
+	automatic (stack) by default. The 'static' keyword can be used
+	to make a variable persistent. Outside of any function, all
+	variables are considered persistent and the 'static' keyword
+	can be used to change the variable scope from 'global' to
+	'file' (see above).
+
+
+Operators
+---------
+
+        All standard C operators are supported.
+        
+        Sial support the following operators for the 'string' type.
+
+        +, !=, <, >, <=, >=.
+
+        examples:
+
+        s = "hello" + "World";
+
+        if("hello" == "world" ) { ... }
+
+        The 'in' operator is supported on dynamic arrays.
+
+        if(i in array) { ... }
+        str=i in array ? "yes" : "no";
+
+	Function callbacks
+	------------------
+
+	Function calls through function pointers is not possible
+	currently. Instead, use a 'string' type to achieve the same
+	result. When sial is about to perform a call, it will look at
+	the type of the variable used to name the function. If the type
+	is 'string' it will use the value that string and call that
+	function instead.
+
+        func0(int i, int j)
+        {
+                printf("i=%d j=%d\n", i,j);
+        }
+        func1(string func)
+        {
+                func(1,2);
+        }
+        main()
+        {
+                func1("func0");
+        }
+
+	In the above example, func1() ends up calling func0() not
+	func.  This can be used as a callback mechanism, specially
+	useful for creating generating function that call s linked list
+	of objects and calls a variable function for each object. Like
+	a function that walks tasks or procs.
+
+	The sizeof() operator is evaluated at execution time.  So you
+	can supply a type, a variable, or any valid expression and the
+	appropriate size will be returned.  The expression *will* be
+	executed, so be careful.
+
+Statements
+----------
+
+        All C statements except 'goto' are supported.
+
+        The 'for(i in array)' is supported on dynamic arrays.
+
+Dynamic arrays 
+-------------------------------
+
+	When indexing through a non pointer variable you end up
+	creating a dynamic array.
+
+    Example:
+
+        int func()
+        {
+        char *cp, c;
+        int array, i;
+
+                cp=(char *)symbol;
+                c=cp[10];
+
+                array[0]="one string";
+                array[12]="second string";
+
+                for(i in array) { 
+
+                        printf("array[%d]=%s\n", i, array[i]);
+                }
+                
+        }
+
+	In the 'c=cp[10]' statement, sial goes to the system image to
+	get one 'char' at the address symbol+10.
+
+	In the second case, 'array' is not a pointer, it's a 'int'. So
+	sial threats all indexing through it as dynamic.
+
+	Additionally, sial supports multi levels of dynamic index,
+	which makes possible to create random trees of indexed values:
+
+        int func()
+        {
+        int array, i, j;
+
+                array[10]="array10";
+                array[10][3]="array10,3";
+                array[20]="array20";
+                array[20][99]="array20,99";
+
+                for(i in array) {
+
+                        printf("array[%d]=%s\n", i, array[i]);
+
+                        for(j in array[i]) {
+
+                              printf("array[%d][%d]=%s\n", i, j, array[i][j]);
+
+                        }
+
+                }
+        }
+
+	I think it is a good thing to have, since system image access
+	and analysis require frequent lists search. So for example,
+	with dynamic arrays, one can walk the proc list taking note of
+	the proc*, then walking a user thread list taking note of the
+	thread* and gathering some metrics for each of these threads.
+	In order to get to these metrics at some later point in the
+	execution, something like this could be used:
+
+        func()
+        {
+        proc *p;
+
+                for(p in procs) {
+
+                        thread *t;
+
+                        for(t in procs[p]) {
+
+                                int rss, size;
+
+                                /* we use index 0 for rss and 1 for size */
+                                printf("proc %p, thread %p, rss:size = %d:%d\n"
+                                      , p, t, procs[p][t][0], procs[p][t][1]);
+                        }
+                }
+        }
+
+	Arrays are always passed by reference. On creation the
+	reference count is set to one. So this array will exist
+	untill the variable it's assigned to dies.
+
+	Arrays can be created by dso's. See the DSo section below for more
+	information and examples of this.
+	
+
+Sial API
+--------
+
+	Sial can be integrated into any tool that needs to access
+	symbol and type information from some object. Currently it is
+	integrated in lcrash and icrash (tools that access Linux and
+	Irix kernel images respectively), but it should be possible to
+	use it, for example, in dbx or gdb. The API gives a simple
+	interface through which the host application sends symbol and
+	type (including member) information and gives access to the
+	image itself so that sial can read random blocks of data from
+	the image.
+
+    >> sial_builtin(bt *bt)
+
+	Install some set of builtin function. See below
+	(builtin api).
+
+
+    >> sial_chkfname(char *fname, int silent);
+
+	Will check for the exsistance of a function in sial.
+	Typically used to check xtra entry points before the
+	application registers a new command (see sial_setcallback).
+
+    >> sial_open():
+
+	The first function that should be called is sial_open().
+	sial_open() will return a value of 1 if everything is ok or 0
+	in case of some problem. This call initializes internal date
+	for the sial package.
+
+    >> sial_setapi(apiops* ops, int nbytes):
+
+	This function will setup the callbacks that sial will use 
+	to get information from the application.
+
+	See 'callback interface' below.
+
+    >> sial_load(char *name);
+
+	To have sial load and compile a macro or a set of macro
+	use sial_load().  Parameter name gives the name of the
+	file to compile. If name points to a directory instead,
+	then all the files in this directory will be load.  So
+	an application would call sial_load() when it first
+	starts up specifying some well known files or
+	directories to load. For example $HOME/.xxx and
+	/etc/xxx would be loaded, ~/.xxx containing user
+	defined macros, and /etc/xxx containing system macros.
+
+    >> sial_unload(char *funcname)
+
+	To unload the a macro file use this function.
+	"funcname" is the name of any global function in the
+	file you want to unload.
+
+    >> void sial_setcallback(void (*scb)(char *));
+
+	To be called prior to any load calls.
+	After each loads, sial will call this function
+	back with the name of each functions compiled.
+	Typicly, the application will then perform checks
+	and potencially install a new command for this
+	function.
+
+	ex:
+	void
+	reg_callback(char *name)
+	{
+	char fname[MAX_SYMNAMELEN+sizeof("_usage")+1];
+	_command_t cmds[2];
+
+        	snprintf(fname, sizeof(fname), "%s_help", name);
+        	if(!sial_chkfname(fname, 0)) return;
+        	snprintf(fname, sizeof(fname), "%s_usage", name);
+        	if(!sial_chkfname(fname, 0)) return;
+
+        	cmds[0].cmd=strdup(name);
+        	cmds[0].real_cmd=0;
+        	cmds[0].cmdfunc=run_callback;
+        	cmds[0].cmdparse=parse_callback;
+        	cmds[0].cmdusage=usage_callback;
+        	cmds[0].cmdhelp=help_callback;
+        	cmds[1].cmd=0;
+        	unregister_cmd(cmds[0].cmd);
+        	(void)register_cmds(cmds);
+	}
+
+    >> sial_setipath(char *path)
+
+	When sial processes a #include directive it will use
+	the specified path as a search path.
+	The usual PATH format is supported ex:
+	"/etc/include:/usr/include".
+
+    >> sial_setmpath(char *path)
+
+	When sial_load() is called with a relative path name or
+	just the name of a file, it will use a search PATH to
+	locate it. The path parameter to sial_set,path() sets
+	this path. The usual PATH format is supported ex:
+	"/etc/xxx:/usr/lib/xxx".
+
+    >> sial_setofile(FILE *ofile)
+
+       	All output of sial commands will be send to file ofile.
+
+    >> sial_cmd(char *cmd, char **argv, int nargs)
+
+	This is the way to execute a sial command that as been
+	loaded.  'cmd' is the name of function to call.  'argv'
+	are the argument to this function.  'nargs' is the
+	number of argument in array 'argv'.
+
+	Sial_cmd() will process argv and make the corresponding
+	values available to the function by creating global
+	variables that the function can test and use.
+
+    >> sial_showallhelp()
+
+	This command will send a complete list of the commands
+	long with the usage and help for each one of them. This
+	function should be called when the user request
+	something like 'help all'.
+
+    >> sial_showhelp(char *func)
+
+	This will display the help information for a particular
+	function loaded in sial.
+
+The callback interface
+----------------------
+
+	Everytime sial needs some piece of information, it will call
+	the application back for it. THe sial_apiset() function is used
+	to install this callback interface into sial. Here is the list
+	of callback functions:
+
+        typedef unsigned long long ull;
+
+	Sial_apiset() passes this structure to sial:
+
+	typedef struct {
+
+        	int (*getmem)(ull, void *, int);
+        	int (*putmem)(ull, void *, int);
+        	int (*member)(char *, ull, type * , member *);
+        	int (*getctype)(int ctype, char * , type*);
+        	char* (*getrtype)(ull, type *);
+        	int (*alignment)(ull);
+        	int (*getval)(char *, ull *);
+        	enum_t* (*getenum)(char *name);
+        	def_t*  (*getdefs)();
+        	uint8_t (*get_uint8)(void*);
+        	uint16_t (*get_uint16)(void*);
+        	uint32_t (*get_uint32)(void*);
+        	uint64_t (*get_uint64)(void*);
+	} apiops;
+
+
+    The apiops* struct defines the following member and function pointers:
+
+    -getmem(ull addr, void *buffer, int nbytes)
+
+	Read nbytes from image at virtual address addr (32 or
+	64 bit) to buffer.
+
+    -putmem(ull addr, void *buffer, int nbytes)
+
+	Write nbytes from buffer to image at virtual address
+	addr (32 or 64 bit).
+
+    -member(char *name, ull pidx, type *tm, member *m);
+
+	Get information on a structure member called name.
+	Pidx is a unique type index for the parent structure.
+	The getctype() function should fill in this index in
+	it's type*. The dwarf model uses unique indexes (die
+	offsets) that can be used here.  'tm' will hold
+	information on the type of the member.  'm' will hold
+	information on the member specific stuff (bit sizes,
+	bit offset etc.).
+
+	Use the sial_member_...() functions to setup m.
+	Use the sial_type_...() functions to setup t.
+
+    -getctype(int ctype, char *name, type *tout)
+
+	Get type information for a complex type.  Ctype
+	specifies that name is a type of type struct/union or
+	enum.  tout contain the returned type information.
+
+    -getrtype(ull idx, type *t)
+
+	Gets the type string linked to a typedef. For example,
+	the gettdeftype(<idx of type ull>) would return
+	"unsigned long long". This enables sial to drill down a
+	typedef (a typedef can be build from a typedef
+	itself) in order to perform proper type validation for
+	assignment or function parameters or return values.
+
+    -getval(char *sname, ull *value)
+
+	Returns the value of symbol "sname" from the image. The
+	value is returned in 'value'.  On any image this is
+	address of the symbol within the image itself, not the
+	value of the symbol itself. See explanation of this
+	above.
+
+    -getenum(char *name);
+
+	Return a list of enum values.
+	Sial will make these available as symbol for the duration
+	of the compile.
+
+    -getdefs()
+
+	Return a list of #defines to be active througout the
+	sial session.
+
+    -get_uint8/16/32/64()
+
+	Return converted unsigned integers. As parameters are passed pointers
+	to unsigned int values in dump representation. The return values are 
+	the corresponding unsigned int values in the representation of the 
+	host architecture, where sial is running.
+
+The builtin API
+---------------
+
+	Sometime it is necessary to create a C function that will
+	handle some piece of the work, that a macro cannot do. Sial's
+	builtin function are implemented this way. Generic function
+	like 'printf' or 'getstr' can get some parameter input from the
+	macros and do something (printf) or they get some information,
+	map it to a sial value and return it to a macro (getstr).
+
+
+	Sial can load new functiosn from DSOs. If the extension of
+	a file name is ".so" then sial opens it and gets a list
+	of function specs from it. Unload of that file will
+	uninstall these functions.
+
+	The API between the dso and sial is quite simple at this time.
+	It has not been exercised as must as it would need to, so it
+	might get more flexible and thus complex in the future.
+
+	Here are two examples of simple extensions. 
+
+	This is an example of a simple extension. An equivalent
+	os the "hello world" C program, but this one gets 2 parameters
+	, one int and one string and returns the received int.
+
+	#include "sial_api.h"
+
+	value *
+	helloworld(value *vi, value *vs)
+	{
+	int i=sial_getval(vi);
+	char *s=(char*)sial_getval(vs);
+
+		sial_msg("Hello to the world![%d] s=[%s]\n", i, s);
+		return sial_makebtype(1);
+	}
+
+	BT_SPEC_TABLE = {
+		{ "int hello(int i, string s)",	helloworld},
+		{ 0, 0}
+	};
+
+	static char *buf;
+
+	BT_INIDSO_FUNC()
+	{
+		sial_msg("Hello world being initialized\n");
+		buf=sial_alloc(1000);
+		return 1;
+	}
+
+	BT_ENDDSO_FUNC()
+	{
+		sial_msg("Hello world being shutdown\n");
+		sial_free(buf);
+	}
+
+	The BT_SPEC_TABLE is scanned. It's a simple table
+	with 2 entries per functions and terminated with
+	a NULL prototype.
+
+	The DSO initializer function is called.
+	If it returns 0 then installtion is terminates.
+	If it returns 1 we proceed forward.
+
+	The prototype is compiled and a syntax error
+	will send the error message to the application
+	output file (stdout usually).
+
+	When the prototype as compiled with no errors 
+	the function is installed and ready to be used from
+	sial macros.
+
+	Type checking is performed by sial at
+	execution time on both, the function parameters
+	andthe function return.
+
+	DSO's can also receive, create and manipulate dynamic arrays.
+	Here is an example of this:
+
+	#include "sial_api.h"
+
+	#ifdef ARRAY_STATIC
+	static value *v;
+	#endif
+
+	value *
+	mkarray(value* vi)
+	{
+	int i=sial_getval(vi);
+	#ifndef ARRAY_STATIC
+	value *v=sial_makebtype(0);
+	#endif
+
+		sial_msg("Received value [%d]\n", i);
+		/* build an array indexed w/ int w/ 2 string values */
+		sial_addvalarray(v, sial_makebtype(0)
+			, sial_makestr("Value of index 0"));
+		sial_addvalarray(v, sial_makebtype(2)
+			, sial_makestr("Value of index 2"));
+	#if ARRAY_STATIC
+		/* 
+	   	For a static array use :
+		Then the array will persist until you Free it.
+		*/
+	   	sial_refarray(v, 1);
+	#endif
+		return v;
+	}
+
+	value *
+	showstrarray(value* va)
+	{
+	value *v1=sial_strindex(va, "foo");
+	value *v2=sial_strindex(va, "goo");
+
+		printf("array[1]=%d\n", sial_getval(v1));
+		printf("array[2]=%d\n", sial_getval(v2));
+		sial_addvalarray(va, sial_makestr("gaa"), sial_makebtype(3));
+		sial_addvalarray(va, sial_makestr("doo"), sial_makebtype(4));
+		sial_freeval(v1);
+		sial_freeval(v2);
+		return sial_makebtype(0);
+	}
+
+	value *
+	showintarray(value* va)
+	{
+	value *v1=sial_intindex(va, 1);
+	value *v2=sial_intindex(va, 2);
+
+		printf("array[1]=%d\n", sial_getval(v1));
+		printf("array[2]=%d\n", sial_getval(v2));
+		sial_freeval(v1);
+		sial_freeval(v2);
+		return sial_makebtype(0);
+	}
+
+	BT_SPEC_TABLE = {
+		{ "int mkarray(int i)",	mkarray},
+		{ "void showintarray(int i)",showintarray},
+		{ "void showstrarray(int i)",showstrarray},
+		{ 0, 0}
+	};
+
+	static char *buf;
+
+	BT_INIDSO_FUNC()
+	{
+		sial_msg("mkarray initialized\n");
+	#ifdef ARRAY_STATIC
+		/* we will need a static value to attach the
+		   array too */
+		v=sial_makebtype(0);
+	#endif
+		return 1;
+	}
+
+	BT_ENDDSO_FUNC()
+	{
+		sial_msg("mkarray being shutdown\n");
+	#ifdef ARRAY_STATIC
+		sial_freeval(v);
+		/* freing the value decrements the reference
+		   count by one. So, if none of the calling
+		   macros copied the value to a static
+		   sial variable, it will free the array */
+	#endif
+	}
+
+Macro Construction
+------------------
+
+	When sial as been integrated into an application and a basic
+	set of builtin command as been created, it is time to start
+	creating the macro themselves. Some basic rules and conventions
+	apply to macro construction that make the coding and
+	documenting steps of macro definition easy.
+
+	I will use the function foo as an example. Function foo is
+	defined in file /usr/tmp/sial/foo. Function foo is a user
+	callable function, meaning that it can be executed by the
+	sial_cmd() function. The command input section of the
+	application can thus call sial_cmd("foo", char *argv, int
+	nargs) to execute the foo macro.
+
+	------------ file foo -------------
+
+	foo_opt(){ return "ab:c"; }
+
+	foo_usage(){ return "[-a] [-b barg] [-c] addr [addr [addr...]]"; }
+
+	foo_help(){ return "This is an example function"; }
+
+	static int
+	doproc(proc_t *p)
+	{
+			printf("p=0x%p\n", p);
+	}
+
+	int foo()
+	{
+	int all, i;
+	string barg;
+
+			if(exists(aflag)) all=1;
+			else all=0;
+
+			if(exists("bflag")) bval=barg;
+
+			for(i in argv) {
+
+					proc_t *p;
+
+					p=(proc_t*)atoi(argv[i], 16);
+
+					doproc(p);
+
+			}
+	}
+
+	------------ end of file foo --------------
+
+	The application calls sial_load() to load foo. Sial calls
+	back the application with the names of all fucntions declared
+	in that file. The aplication can then register commands for
+	the user to type according to this list of functions.
+	In this case 'foo'.
+
+	The application then uses sial_cmd() to run a specific
+	command 'foo'.
+
+	Before executing the command, sial checks if a foo_opt()
+	function exists and if so, calls it. This function returns the
+	proper getopt() argument specification string. If this function
+	does not exists then all arguments are passed down to the foo()
+	function directly.
+
+	If the arguments supplied by the user do not follow the proper
+	syntax then the function foo_usage() will be called, if it
+	exists. If the foo_usage() function does not exists, a generic
+	error message is generated by sial.
+
+	If the command 'help foo' is issued, the application should be
+	calling sial_exefunc("help_foo", 0) whish will return a VALUE_S
+	for the help for foo. Or whatever foo_help() returns.
+
+	Each option, their associated value and addition arguments are
+	made available to the foo funtion by creating the following
+	global variables before the actual call.
+
+	Each option, if specified, will trigger the existence of flag
+	variable. In the foo() case, this means that variables aflag,
+	bflag and cflag can possibly exist. The function
+	exists("variable name") can then be used to test for this
+	option presence.
+
+	If an option has an associated value (getopt's ':' is specified
+	on the string foo_opt() returns) this value is made available
+	as a string type variable called Xarg, where X  is the option
+	letter. In the case of foo() variable 'string barg' would exist
+	if the -b option was supplied by the user.
+
+	The rest of the arguments supplied by the user are made
+	available in an array of 'string' called argv. argv[0] is
+	set to the name of the function 'foo' and argc is a global
+	that defines how many argv their are.
+
+Builtin functions
+=================
+
+	Here is a description of the current set of builtin functions.
+
+	unsigned long long 
+	atoi(string value [, int base])
+	
+		Convert a string value to a long long. Base is the base
+		that should be used to process the string e.g. 8, 10 or
+		16. If not specified, then the standard numeric format
+		will be scnanned for ex: 
+
+			0x[0-9a-fA-F]+ : hexadecimal
+			0[0-7]+        : octal
+			[1-9]+[0-9]*   : decimal
+		
+		This function is used when converting command line
+		arguments to pointers.
+		
+		Example:
+		
+		void
+		mycommand()
+		{
+		int i;
+		
+			for(i=1;i<argc;i++) {
+			
+				struct proc *p;
+				
+				p=(struct proc*)atoi(argv[i],16);
+				
+				...
+			}
+		}
+		
+		User types this at the lcrash prompt:
+		
+		>> mycommand 0xa80000004ab14578
+		
+	int exists(string name)
+
+		Checks for the existance of a variable. Returns 1 if
+		the variables does exist and 0 otherwise. This function
+		is mostly used to test if some options were specified
+		on when the macro was executed from command line.
+
+		It can also be used to test for image variable.
+
+		example:
+
+		void
+		mycommand()
+		{
+			if(exists("aflag")) {
+
+				// user has specified -a option
+			}
+		}
+
+	void exit()
+	
+		Terminate macro excution now.
+
+	int getchar()
+
+		Get a single character from tty.
+
+	string gets()
+
+		Get a line of input from tty.
+	
+	string getstr(void *)
+	
+		Gets a null terminated string from the image at the
+		address specified. Sial will read a series of 16 byte
+		values from the image untill it find the \0 character.
+		Up to 4000 bytes will be read this way.
+		
+	string getnstr(void *, int n)
+	
+		Gets n characters from the image at the specified
+		address and returns the corresponding string.
+
+	string itoa(unsigned long long)
+	
+		Convert a unsigned long long to a decimal string.
+
+	void printf(char *fmt, ...);
+
+		Send a formatted message to the screen or output file.
+		For proper allignment of output on 32 and 64 bit systems
+		one can use the %> sequence along with the %p format.
+
+		On a 32 bit system %p will print a 8 character hexadecimal
+		value and on a 64 bit system it will print a 16 character
+		value. So, to get proper alignment on both type of systems
+		use the %> format which will print nothing on a 64 bit system
+		but will print 8 times the following character on a 32 bit
+		system.
+
+		example:
+
+		struct proc *p;
+
+		  printf("Proc	%>	uid 	pid\n");
+		  printf("0x%p		%8d	%8d\n"
+			, p, p->p_uid,p_p_pid);
+
+	int sial_depend(string file)
+
+		Loads a macro or directory of macros called
+		'file'. Contrary to sial_load() it will not
+		give any error messages. Returns 1 on success
+		0 otherwise.
+
+	int sial_load(string file)
+	
+		Loads and compiles a sial macro file.
+		returns 1 if successful or 0 otherwise.
+		
+	void sial_unload(string file)
+	
+		Unload's a sial macro file
+		
+	string sprintf(string format, ...)
+	
+		Creates a string from the result of a sprintf.
+		Example:
+		
+		void
+		mycommand()
+		{
+		
+		string msg;
+		
+			msg=sprintf("i=%d\n", i);
+		}
+		The result will be truncated to maxbytes if it would be
+		longer.
+		
+	int strlen(string s)
+	
+		Return the length of string s.
+		
+	string substr(string s, int start, int len)
+	
+		Creates a string from the substring starting a charcater
+		'start' of 's' for 'len' characters.
+		example:
+		
+			s=substr("this is the original", 6, 2);
+			
+			So 's' will become "is".
+
+
+	--------------------------------------------------------
+
+Questions/Comments
+Luc Chouinard, lucchouina@yahoo.com
--- crash/extensions/libsial/sial_op.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/extensions/libsial/sial_op.c	2007-11-09 15:46:25.000000000 -0500
@@ -0,0 +1,904 @@
+/*
+ * Copyright 2001 Silicon Graphics, Inc. All rights reserved.
+ */
+#include "sial.h"
+#include "sial.tab.h"
+#include <stdarg.h>
+#include <string.h>
+
+#define MAXPARMS 10
+
+typedef struct {
+
+	int op;			/* operator */
+	int np;			/* number of operands */
+	node_t*parms[MAXPARMS];	/* operands */
+
+	srcpos_t pos;
+
+} oper;
+
+#define P1 (o->parms[0])
+#define P2 (o->parms[1])
+#define P3 (o->parms[2])
+#define P4 (o->parms[3])
+
+#define V1 (v1?v1:(v1=NODE_EXE(P1)))
+#define V2 (v2?v2:(v2=NODE_EXE(P2)))
+#define V3 (v3?v3:(v3=NODE_EXE(P3)))
+#define V4 (v4?v4:(v4=NODE_EXE(P4)))
+
+#define L1 (unival(V1))
+#define L2 (unival(V2))
+#define L3 (unival(V3))
+#define L4 (unival(V4))
+
+#define S1 ((V1)->v.data)
+#define S2 ((V2)->v.data)
+#define S3 ((V3)->v.data)
+#define S4 ((V4)->v.data)
+
+void sial_do_deref(int n, value_t *v, value_t *ref);
+ul
+sial_bool(value_t *v)
+{
+	switch(v->type.type) {
+
+	case V_BASE:
+		switch(v->type.size) {
+			case 1: return !(!(v->v.uc));
+			case 2: return !(!(v->v.us));
+			case 4: return !(!(v->v.ul));
+			case 8: return !(!(v->v.ull));
+			default: sial_error("Oops sial_bool()[%d]", v->type.size); break;
+		}
+	case V_STRING : 	return !(!(*((char*)(v->v.data))));
+	case V_REF:		return sial_defbsize()==8?(!(!(v->v.ull))):(!(!(v->v.ul)));
+	default :
+
+		sial_error("Invalid operand for boolean expression");
+		return 0;
+	}
+}
+
+static int cops[]={BAND,BOR,NOT,LT,LE,EQ,GE,GT,NE,CEXPR};
+#define NCOPS (sizeof(cops)/sizeof(cops[0]))
+
+static int
+is_cond(int op)
+{
+int i;
+
+	for(i=0;i<NCOPS;i++) {
+
+		if(cops[i]==op) return 1;
+
+	}
+	return 0;
+}
+
+struct {
+	int cur, equiv;
+} cpls [] ={
+	{ ADDME, ADD },
+	{ SUBME, SUB },
+	{ DIVME, DIV },
+	{ MULME, MUL },
+	{ SHLME, SHL },
+	{ SHRME, SHR },
+	{ XORME, XOR },
+	{ ANDME, AND },
+	{ ORME,  OR  },
+	{ MODME, MOD },
+};
+#define NOPS (sizeof(cpls)/sizeof(cpls[0]))
+/* get the equivalent operation ME type operators */
+static int getop(int op)
+{
+int i;
+	for(i=0;i<NOPS;i++) {
+
+		if(cpls[i].cur==op) return cpls[i].equiv;
+
+	}
+	return op;
+}
+
+static void
+sial_transfer(value_t *v1, value_t *v2, ull rl)
+{
+	sial_dupval(v1, v2);
+	switch(TYPE_SIZE(&v1->type)) {
+
+		case 1: v1->v.uc=rl; break;
+		case 2: v1->v.us=rl; break;
+		case 4: v1->v.ul=rl; break;
+		case 8: v1->v.ull=rl; break;
+
+	}
+	/* the result of an assignment cannot be a lvalue_t */
+	v1->set=0;
+}
+
+#define anyop(t) (V1->type.type==t || (o->np>1 && V2->type.type==t))
+
+typedef struct {
+	node_t*index;
+	node_t*var;
+	srcpos_t pos;
+} index_t ;
+
+static value_t *
+sial_exeindex(index_t  *i)
+{
+value_t *var;
+value_t *vi=NODE_EXE(i->index);
+value_t *v;
+srcpos_t p;
+
+	sial_curpos(&i->pos, &p);
+
+	/* we need to make believe it's been initiazed */
+	sial_setini(i->var);
+	var=NODE_EXE(i->var);
+
+	/* check the type of the variable */
+	/* if it's a pointer then index through the image */
+	if(var->type.type==V_REF) {
+
+		int size;
+		int n=sial_getval(vi);
+		value_t *ref;
+
+		/* if this is an array and we're not at the rightmost index */
+		if(var->type.idxlst && var->type.idxlst[1]) {
+
+			int i, size=var->type.size;
+
+			v=sial_cloneval(var);
+
+			v->type.idxlst[0]=0;
+			for(i=1; var->type.idxlst[i]; i++) {
+
+				size *= var->type.idxlst[i];
+				v->type.idxlst[i]=var->type.idxlst[i+1];
+			}
+
+			if(sial_defbsize()==4) {
+
+				v->v.ul+=size*n;
+				v->mem=v->v.ul;
+
+			} else {
+
+				v->v.ull+=size*n;
+				v->mem=v->v.ull;
+			}
+			
+
+		} else {
+
+			v=sial_newval();
+			ref=sial_cloneval(var);
+
+			if(var->type.ref==1) size=var->type.size;
+			else size=sial_defbsize();
+
+			if(sial_defbsize()==4) {
+
+				ref->v.ul+=size*n;
+				ref->mem=ref->v.ul;
+
+			} else {
+
+				ref->v.ull+=size*n;
+				ref->mem=ref->v.ull;
+			}
+			sial_do_deref(1, v, ref);
+			sial_freeval(ref);
+		}
+
+	} else {
+
+		v=sial_newval();
+
+		/* use dynamic indexing aka awk indexing */
+		sial_valindex(var, vi, v);
+	}
+
+	/* discard expression results */
+	sial_freeval(var);
+	sial_freeval(vi);
+	sial_curpos(&p, 0);
+
+	return v;
+}
+
+void
+sial_freeindex(index_t  *i)
+{
+	NODE_FREE(i->index);
+	NODE_FREE(i->var);
+	sial_free(i);
+}
+
+node_t*
+sial_newindex(node_t*var, node_t*idx)
+{
+index_t  *i=sial_alloc(sizeof(index_t ));
+node_t*n=sial_newnode();
+
+	i->index=idx;
+	i->var=var;
+	n->exe=(xfct_t)sial_exeindex;
+	n->free=(ffct_t)sial_freeindex;
+	n->data=i;
+	sial_setpos(&i->pos);
+	return n;
+}
+
+typedef struct {
+	node_t*fname;
+	node_t*parms;
+	srcpos_t pos;
+	void *file;
+} call;
+
+static value_t *
+sial_execall(call *c)
+{
+value_t *rv;
+srcpos_t p;
+
+	sial_curpos(&c->pos, &p);
+	rv=sial_docall(c->fname, c->parms, c->file);
+	sial_curpos(&p, 0);
+	return rv;
+}
+
+void
+sial_freecall(call *c)
+{
+	NODE_FREE(c->fname);
+	sial_free_siblings(c->parms);
+	sial_free(c);
+}
+
+node_t*
+sial_newcall(node_t* fname, node_t* parms)
+{
+node_t*n=sial_newnode();
+call *c=sial_alloc(sizeof(call));
+
+	c->fname=fname;
+	c->file=sial_getcurfile();
+	c->parms=parms;
+	n->exe=(xfct_t)sial_execall;
+	n->free=(ffct_t)sial_freecall;
+	n->data=c;
+	sial_setpos(&c->pos);
+	return n;
+}
+
+typedef struct {
+	node_t*expr;
+	srcpos_t pos;
+} adrof;
+
+static value_t *
+sial_exeadrof(adrof *a)
+{
+value_t *rv, *v=NODE_EXE(a->expr);
+
+#if 0
+	/* we can only do this op on something that came from system image
+	   Must not allow creation of references to local variable */
+	if(!v->mem) {
+
+		sial_freeval(v);
+		sial_rerror(&a->pos, "Invalid operand to '&' operator");
+
+	}
+#endif
+	/* create the reference */
+	rv=sial_newval();
+	sial_duptype(&rv->type, &v->type);
+	sial_pushref(&rv->type, 1);
+
+	/* remmember position in image */
+	if(sial_defbsize()==8) rv->v.ull=v->mem;
+	else rv->v.ul=v->mem;
+	rv->mem=0;
+
+	sial_freeval(v);
+
+	return rv;
+}
+
+void
+sial_freeadrof(adrof *a)
+{
+	NODE_FREE(a->expr);
+	sial_free(a);
+}
+
+node_t*
+sial_newadrof(node_t* expr)
+{
+node_t*n=sial_newnode();
+adrof *a=sial_alloc(sizeof(adrof));
+
+	a->expr=expr;
+	n->exe=(xfct_t)sial_exeadrof;
+	n->free=(ffct_t)sial_freeadrof;
+	n->data=a;
+	sial_setpos(&a->pos);
+	return n;
+}
+
+static int
+sial_reftobase(value_t *v)
+{
+int idx= v->type.idx;
+
+	if(v->type.type==V_REF) {
+
+		if(sial_defbsize()==4) 
+			v->type.idx=B_UL;
+		else 
+			v->type.idx=B_ULL;
+	}
+	return idx;
+}
+
+static value_t*
+sial_docomp(int op, value_t *v1, value_t *v2)
+{
+
+	/* if one parameter is string then both must be */
+	if(v1->type.type == V_STRING || v2->type.type == V_STRING) {
+
+		if(v1->type.type != V_STRING || v2->type.type != V_STRING) {
+
+			sial_error("Invalid condition arguments");
+		}
+		else {
+
+			switch(op) {
+
+				case EQ: {	/* expr == expr */
+
+					return sial_makebtype(!strcmp(v1->v.data, v2->v.data));
+
+				}
+				case GT: case GE: {	/* expr > expr */
+
+					return sial_makebtype(strcmp(v1->v.data, v2->v.data) > 0);
+
+				}
+				case LE: case LT: {	/* expr <= expr */
+
+					return sial_makebtype(strcmp(v1->v.data, v2->v.data) < 0);
+
+				}
+				case NE: {	/* expr != expr */
+
+					return sial_makebtype(strcmp(v1->v.data, v2->v.data));
+
+				}
+				default: {
+
+					sial_error("Oops conditional unknown 1");
+
+				}
+			}
+		}
+
+	}
+	else {
+
+		int idx1, idx2;
+		value_t *v=sial_newval();
+
+		/* make sure pointers are forced to proper basetype
+		   before calling sial_baseop()*/
+		idx1=sial_reftobase(v1);
+		idx2=sial_reftobase(v2);
+		
+
+		switch(op) {
+
+			case EQ:
+			case GT:
+			case GE: 
+			case LE:
+			case LT:
+			case NE:
+				sial_baseop(op, v1, v2, v);
+			break;
+			default: {
+
+				sial_error("Oops conditional unknown 2");
+
+			}
+		}
+		v1->type.idx=idx1;
+		v2->type.idx=idx2;
+		return v;
+	}
+	return 0;
+}
+
+static value_t *
+sial_exeop(oper *o)
+{
+value_t *v=0, *v1=0, *v2=0, *v3=0, *v4=0;
+int top;
+srcpos_t p;
+
+	sial_curpos(&o->pos, &p);
+
+	/* if ME (op on myself) operator, translate to normal operator
+	   we will re-assign onto self when done */
+
+	top=getop(o->op);
+
+	if(top == ASSIGN) {
+
+		goto doop;
+
+	} else if(top == IN) {
+
+		/* the val in array[] test is valid for anything but struct/union */
+		v=sial_makebtype((ull)sial_lookuparray(P1,P2));
+
+	}
+	else if(is_cond(top)) {
+
+		/* the operands are eithr BASE (integer) or REF (pointer) */ 
+		/* all conditional operators accept a mixture of pointers and integer */
+		/* set the return as a basetype even if bool */
+
+		switch(top) {
+
+			case CEXPR: {	/* conditional expression expr ? : stmt : stmt */
+
+				if(sial_bool(V1)) {
+
+					v=sial_cloneval(V2);
+
+				} else {
+
+					v=sial_cloneval(V3);
+
+				}
+
+			}
+			break;
+			case BOR: {	/* a || b */
+
+				v=sial_makebtype((ull)(sial_bool(V1) || sial_bool(V2)));
+
+			}
+			break;
+			case BAND: {	/* a && b */
+
+				v=sial_makebtype((ull)(sial_bool(V1) && sial_bool(V2)));
+
+			}
+			break;
+			case NOT: {	/* ! expr */
+
+				v=sial_makebtype((ull)(! sial_bool(V1)));
+
+			}
+			break;
+			default: {
+
+				v=sial_docomp(top, V1, V2);
+
+			}
+		}
+
+	} else if(anyop(V_STRING)) {
+
+		if(top == ADD) 
+		{
+		char *buf;
+
+			if(V1->type.type != V_STRING || V2->type.type != V_STRING) {
+
+				sial_rerror(&P1->pos, "String concatenation needs two strings!");
+
+			}
+			buf=sial_alloc(strlen(S1)+strlen(S2)+1);
+			strcpy(buf, S1);
+			strcat(buf, S2);
+			v=sial_makestr(buf);
+			sial_free(buf);
+		}
+		else {
+
+			sial_rerror(&P1->pos, "Invalid string operator");
+
+		}
+	}
+	/* arithmetic operator */
+	else if(anyop(V_REF)) { 
+
+		int size;
+		value_t *vt;
+
+		/* make sure we have the base type second */
+		if(V1->type.type != V_REF) { vt=V1; v1=V2; v2=vt; }
+
+
+		if(V1->type.type == V_BASE) {
+inval:
+			sial_error("Invalid operand on pointer operation");
+		}
+
+		/* get the size of whas we reference */
+		size=V1->type.size;
+	
+		switch(top) {
+			case ADD: {	/* expr + expr */
+				/* adding two pointers ? */
+				if(V2->type.type == V_REF) goto inval;
+
+				V1;
+				sial_transfer(v=sial_newval(), v1,
+					      unival(v1) + L2 * size);
+			}
+			break;
+			case SUB: {	/* expr - expr */
+				/* different results if mixed types.
+				   if both are pointers then result is a V_BASE */
+				if(V2->type.type == V_REF)
+					v=sial_makebtype(L1 - L2);
+
+				else {
+					V1;
+					sial_transfer(v=sial_newval(), v1,
+						      unival(v1) - L2 * size);
+				}
+			}
+			break;
+			case PREDECR: { /* pre is easy */
+				V1;
+				sial_transfer(v=sial_newval(), v1,
+					      unival(v1) - size);
+				sial_setval(v1, v);
+			}
+			break;
+			case PREINCR: {
+				V1;
+				sial_transfer(v=sial_newval(), v1,
+					      unival(v1) + size);
+				sial_setval(v1, v);
+			}
+			break;
+			case POSTINCR: {
+				V1;
+				sial_transfer(v=sial_newval(), v1,
+					      unival(v1) + size);
+				sial_setval(v1, v);
+				sial_transfer(v, v1, unival(v1));
+			}
+			break;
+			case POSTDECR: {
+				V1;
+				sial_transfer(v=sial_newval(), v1,
+					      unival(v1) - size);
+				sial_setval(v1, v);
+				sial_transfer(v, v1, unival(v1));
+			}
+			break;
+			default:
+				sial_error("Invalid operation on pointer [%d]",top);
+		}
+	}
+	else {
+
+		/* both operands are V_BASE */
+		switch(top) {
+
+			/* for mod and div, we check for divide by zero */
+			case MOD: case DIV:
+				if(!L2) {
+					sial_rerror(&P1->pos, "Mod by zero");
+				}
+			case ADD: case SUB: case MUL: case XOR: 
+			case OR: case AND: case SHL: case SHR:
+			{
+				sial_baseop(top, V1, V2, v=sial_newval());
+			}
+			break;
+			case UMINUS: {
+
+				value_t *v0=sial_newval();
+				sial_defbtype(v0, (ull)0);
+				/* keep original type of v1 */
+				v=sial_newval();
+				sial_duptype(&v0->type, &V1->type);
+				sial_duptype(&v->type, &V1->type);
+				sial_baseop(SUB, v0, V1, v);
+				sial_freeval(v0);
+				/* must make result signed */
+				sial_mkvsigned(v);
+			}
+			break;
+			case FLIP: {
+
+				value_t *v0=sial_newval();
+				sial_defbtype(v0, (ull)0xffffffffffffffffll);
+				/* keep original type of v1 */
+				sial_duptype(&v0->type, &V1->type);
+				sial_baseop(XOR, v0, V1, v=sial_newval());
+				sial_freeval(v0);
+			}
+			break;
+			case PREDECR: { /* pre is easy */
+				V1;
+				sial_transfer(v=sial_newval(), v1,
+					      unival(v1) - 1);
+				sial_setval(v1, v);
+			}
+			break;
+			case PREINCR: {
+				V1;
+				sial_transfer(v=sial_newval(), v1,
+					      unival(v1) + 1);
+				sial_setval(v1, v);
+			}
+			break;
+			case POSTINCR: {
+				V1;
+				sial_transfer(v=sial_newval(), v1,
+					      unival(v1) + 1);
+				sial_setval(v1, v);
+				sial_transfer(v, v1, unival(v1));
+			}
+			break;
+			case POSTDECR: {
+				V1;
+				sial_transfer(v=sial_newval(), v1,
+					      unival(v1) - 1);
+				sial_setval(v1, v);
+				sial_transfer(v, v1, unival(v1));
+			}
+			break;
+			default: sial_rerror(&P1->pos, "Oops ops ! [%d]", top);
+		}
+	}
+doop:
+	/* need to assign the value_t back to P1 */
+	if(top != o->op || top==ASSIGN) {
+
+		/* in the case the Lvalue_t is a variable , bypass execution and set ini */
+		if(P1->exe == sial_exevar) {
+
+			char *name=NODE_NAME(P1);
+			var_t*va=sial_getvarbyname(name, 0, 0);
+			value_t *vp;
+
+			sial_free(name);
+
+			if(top != o->op) vp=v;
+			else vp=V2;
+
+			sial_chkandconvert(va->v, vp);
+
+			sial_freeval(v);
+			v=sial_cloneval(va->v);
+			va->ini=1;
+
+		} else {
+
+			if(!(V1->set)) {
+
+				sial_rerror(&P1->pos, "Not Lvalue_t on assignment");
+
+			}
+			else {
+
+				/* if it's a Me-op then v is already set */
+				V1;
+				if(top != o->op) {
+					sial_setval(v1, v);
+				} else {
+					sial_setval(v1, V2);
+					v=sial_cloneval(V2);
+				}
+
+			}
+		}
+		/* the result of a assignment if not an Lvalue_t */
+		v->set=0;
+	}
+	sial_freeval(v1);
+	sial_freeval(v2);
+	sial_freeval(v3);
+	sial_freeval(v4);
+	sial_setpos(&p);
+	return v;
+}
+
+void
+sial_freeop(oper *o)
+{
+int i;
+
+	for(i=0;i<o->np;i++) NODE_FREE(o->parms[i]);
+	sial_free(o);
+}
+
+node_t*
+sial_newop(int op, int nargs, ...)
+{
+va_list ap;
+node_t*n=sial_newnode();
+oper *o=sial_alloc(sizeof(oper));
+int i;
+
+	o->op=op;
+	o->np=nargs;
+
+	sial_setpos(&o->pos);
+
+	va_start(ap, nargs);
+
+	for(i=0 ; i<MAXPARMS; i++) {
+
+		if(!(o->parms[i]=va_arg(ap, node_t*))) break;;
+	}
+
+	n->exe=(xfct_t)sial_exeop;
+	n->free=(ffct_t)sial_freeop;
+	n->data=o;
+	
+	va_end(ap);
+	return n;
+}
+
+/* mult is a special case since the parse always return a PTR token 
+   for the '*' signed. The PTR token value_t is the number of '* found.
+*/
+node_t*
+sial_newmult(node_t*n1, node_t*n2, int n)
+{
+	if(n>1) {
+
+		sial_error("Syntax error");
+	}
+	return sial_newop(MUL, 2, n1, n2);
+}
+/*
+	This function is called when we want to set a value_t in live memory
+	using a pointer to it.
+*/
+static void
+sial_setderef(value_t *v1, value_t *v2)
+{
+	void *sial_adrval(value_t *);
+	sial_transval(v2->type.size, v1->type.size, v2, sial_issigned(v2->type.typattr));
+	API_PUTMEM(v1->mem, sial_adrval(v2), v2->type.size);
+}
+
+/*
+	Do a de-referencing from a pointer (ref) and put the result in v.
+*/
+typedef struct {
+	int lev;
+	node_t*n;
+} ptrto;
+
+void
+sial_do_deref(int n, value_t *v, value_t *ref)
+{
+ull madr, new_madr;
+
+	if(n > ref->type.ref) {
+
+		sial_error("Too many levels of dereference");
+
+	}else {
+	
+
+		if(sial_defbsize()==4) madr=(ull)ref->v.ul;
+		else madr=ref->v.ull;
+
+		/* copy the target type to the returned value_t's type_t*/
+		sial_duptype(&v->type, &ref->type);
+
+		/* do a number of deferences according to PTR value_t */
+		while(n--) {
+
+			sial_popref(&v->type, 1);
+
+			if(!v->type.ref) {
+
+				/* make sure the pointer is pointing into the vmcore */
+				if(is_ctype(v->type.type)) {
+
+					v->v.data=sial_alloc(v->type.size);
+					sial_getmem(madr, v->v.data, v->type.size);
+
+				} else {
+
+					/* get the data from the system image */
+					switch(TYPE_SIZE(&v->type)) {
+
+						case 1: sial_getmem(madr, &v->v.uc, 1); 
+							break;
+						case 2: sial_getmem(madr, &v->v.us, 2); 
+							break;
+						case 4: sial_getmem(madr, &v->v.ul, 4);
+							break;
+						case 8: sial_getmem(madr, &v->v.ull, 8); 
+							break;
+
+					}
+				}
+			}
+			else {
+		
+				/* get the pointer at this address */
+				if(sial_defbsize()==4) {
+
+					sial_getmem(madr, &v->v.ul, 4);
+					new_madr=v->v.ul;
+
+				} else {
+
+					sial_getmem(madr, &v->v.ull, 8);
+					new_madr=v->v.ull;
+				}
+			}
+
+			/* remember this address. For the '&' operator */
+			v->mem=madr;
+			madr=new_madr;
+		}
+	}
+
+	/* we can always assign to a reference */
+	v->set=1;
+	v->setval=v;
+	v->setfct=sial_setderef;
+}
+
+static value_t *
+sial_exepto(ptrto *pto)
+{
+value_t *v=sial_newval();
+int n=pto->lev;
+value_t *ref=NODE_EXE(pto->n);
+
+	sial_do_deref(n, v, ref);
+	sial_freeval(ref);
+	return v;
+}
+
+static void
+sial_freepto(ptrto *pto)
+{
+	NODE_FREE(pto->n);
+	sial_free(pto);
+}
+	
+
+/* same thing for the ptrto operator */
+node_t*
+sial_newptrto(int lev, node_t*n)
+{
+ptrto *pto=sial_alloc(sizeof(ptrto));
+node_t*nn=sial_newnode();
+
+	pto->lev=lev;
+	pto->n=n;
+	nn->exe=(xfct_t)sial_exepto;
+	nn->free=(ffct_t)sial_freepto;
+	nn->data=pto;
+	return nn;
+}
--- crash/extensions/libsial/sial_func.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/extensions/libsial/sial_func.c	2007-09-24 16:07:06.000000000 -0400
@@ -0,0 +1,1191 @@
+/*
+ * Copyright 2001 Silicon Graphics, Inc. All rights reserved.
+ */
+#include <stdio.h>
+#include <errno.h>
+#include <setjmp.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <dlfcn.h>
+#include <string.h>
+#include <sys/types.h>
+#include <time.h>
+#include <sys/stat.h>
+#include "sial.h"
+
+/*
+	The next few functions manege the files and associated functions.
+*/
+struct fdata;
+
+typedef struct fctype_t {
+	int idx;
+	struct fctype_t*next;
+
+} fctype_t;
+
+typedef struct func {
+
+	char *name;	 	/* name of the function */
+	var_t*varlist;		/* parameters information */
+	var_t*rvar;		/* return value_t information */
+	node_t*body;		/* execution node for body */
+	int local;		/* load i.e. static ? */
+	srcpos_t pos;		/* source position of function declaration */
+	struct fdata *file;	/* back pointer to corresponding file */
+	struct func *next;	/* linked list */
+
+} func;
+
+typedef struct fdata {
+
+	char *fname;		/* name of the file */
+	int  isdso;		/* is this from a loadable module ? 
+				   `globs' becomes the handle */
+	time_t time;		/* load time */
+	var_t*fsvs;		/* associated list of static variables */
+	var_t*fgvs;		/* associated list of global variables */
+	void *globs;		/* handle for these globals */
+	func *funcs;		/* chained list of functions */
+	fctype_t *ctypes;	/* ctypes declared by this function */
+	struct fdata *next; 	/* chained list of files */
+
+} fdata;
+
+static fdata *fall=0;
+void sialparse(void);
+static func * sial_getfbyname(char *name, fdata *thisfd);
+value_t * sial_execmcfunc(func *f, value_t **vp);
+
+ull
+sial_getval(value_t*v)
+{
+ull ret=0;
+
+	if(!v) return 0;
+
+	/* need to cast properly here */
+	if(v->type.type==V_BASE || v->type.type==V_REF) {
+
+		if(v->type.type==V_REF || !sial_issigned(v->type.typattr)) {
+
+			switch(TYPE_SIZE(&v->type)) {
+				case 1: ret= (ull) v->v.uc; break;
+				case 2: ret= (ull) v->v.us; break;
+				case 4: ret= (ull) v->v.ul; break;
+				case 8: ret= (ull) v->v.ull; break;
+				default: sial_error("Oops getval base");
+			}
+
+		} else {
+
+			switch(TYPE_SIZE(&v->type)) {
+				case 1: ret= (ull) v->v.sc; break;
+				case 2: ret= (ull) v->v.ss; break;
+				case 4: ret= (ull) v->v.sl; break;
+				case 8: ret= (ull) v->v.sll; break;
+				default: sial_error("Oops getval base");
+			}
+		}
+	}
+	/* in the case of a struct/union we pass a pointer to it */
+	else ret = (unsigned long)v->v.data;
+	return ret;
+}
+
+static int
+sial_dohelp(char *fname)
+{
+char buf[MAX_SYMNAMELEN+1];
+char *hstr;
+
+	sprintf(buf, "%s_help", fname);
+
+	if(sial_chkfname(buf, 0)) {
+
+		char buf2[MAX_SYMNAMELEN+1];
+		char *ustr;
+
+		sprintf(buf2, "%s_usage", fname);
+		ustr=(char*)(unsigned long)sial_exefunc(buf2, 0);
+		sial_msg("COMMAND: %s %s\n\n", fname , ustr?ustr:"");
+		hstr=(char*)(unsigned long)sial_exefunc(buf, 0);
+		sial_format(1, hstr);
+		sial_format(0, "\n");
+		sial_msg("\n");
+		return 1;
+	}
+	return 0;
+}
+
+void
+sial_showallhelp()
+{
+fdata *filep;
+
+	for(filep=fall; filep; filep=filep->next) {
+
+		func *funcp;
+
+		for(funcp=filep->funcs;funcp;funcp=funcp->next) {
+
+			(void)sial_dohelp(funcp->name);
+			
+		}
+	}
+}
+
+int
+sial_showhelp(char *fname)
+{
+	return sial_dohelp(fname);
+}
+
+void*
+sial_getcurfile() { return fall; }
+
+int
+sial_isnew(void *p)
+{
+fdata *fd=(fdata *)p;
+struct stat stats;
+
+	if(!stat(fd->fname, &stats)) {
+
+		if(stats.st_mtime > fd->time) {
+
+			return 1;
+		}
+	}
+	return 0;
+}
+
+void *
+sial_findfile(char *name, int unlink)
+{
+fdata *fd;
+fdata *last=0;
+
+	for(fd=fall; fd; last=fd, fd=fd->next) {
+
+		if(!strcmp(fd->fname, name)) {
+
+			/* remove from the list ?*/
+			if(unlink) {
+
+				if(!last) fall=fd->next;
+				else last->next=fd->next;
+			
+			}
+			return fd;
+		}
+
+	}
+	return 0;
+}
+
+void
+sial_freefunc(func *fn)
+{
+	sial_free(fn->name);
+	NODE_FREE(fn->body);
+	if(fn->varlist) sial_freesvs(fn->varlist);
+	sial_freevar(fn->rvar);
+	sial_free(fn);
+}
+
+static void
+sial_unloadso(fdata *fd)
+{
+typedef int (*fp_t)(void);
+fp_t fp;
+func *f;
+
+	if((fp=(fp_t)dlsym(fd->globs, BT_ENDDSO_SYM))) {
+
+		fp();
+	}
+	for(f=fd->funcs; f; ) {
+
+		func *n=f->next;
+		sial_rmbuiltin(f->varlist);
+		sial_freevar(f->varlist);
+		sial_free(f);
+		f=n;
+	}
+
+	dlclose(fd->globs);
+
+	if(fall==fd) fall=fd->next;
+	else {
+
+		fdata *last=fall;
+
+		while(last->next) {
+
+			if(last->next==fd) {
+
+				last->next=fd->next;
+				break;
+			}
+			last=last->next;
+		}
+	}
+
+	/* free the associated static and global variables */
+	if(fd->fsvs) sial_freesvs(fd->fsvs);
+	if(fd->fgvs) sial_freesvs(fd->fgvs);
+	sial_free(fd->fname);
+	sial_free(fd);
+}
+
+static void (*cb)(char *, int)=0;
+void sial_setcallback(void (*scb)(char *, int))
+{ cb=scb; }
+static void
+sial_docallback( fdata *fd, int load)
+{
+func *f;
+
+	if(!cb) return;
+
+	for(f=fd->funcs; f; f=f->next) {
+
+		cb(f->name, load);
+	}
+}
+
+void
+sial_freefile(fdata *fd)
+{
+	if(fd) {
+
+		func *fct, *nxt;
+		fctype_t *ct, *nct;
+
+		if(fd->isdso) {
+
+			sial_unloadso(fd);
+			return;
+		}
+
+		/* free the associated static and global variables */
+		if(fd->fsvs) sial_freesvs(fd->fsvs);
+		if(fd->fgvs) sial_freesvs(fd->fgvs);
+
+		/* free all function nodes */
+                // let debugger know ...
+                sial_docallback(fd, 0);
+		for(fct=fd->funcs; fct; fct=nxt) {
+
+			nxt=fct->next;
+			sial_freefunc(fct);
+		}
+
+		for(ct=fd->ctypes; ct; ct=nct) {
+
+			nct=ct->next;
+			sial_free(ct);
+		}
+		sial_free(fd->fname);
+		if(fd->globs) sial_rm_globals(fd->globs);
+		sial_free(fd);
+	}
+	else sial_warning("Oops freefile!");
+}
+
+int
+sial_deletefile(char *name)
+{
+fdata *fd=sial_findfile(name, 0);
+
+	if(fd) {
+
+		sial_freefile(fd);
+                (void)sial_findfile(name, 1);
+		return 1;
+
+	}
+	return 0;
+}
+
+static int parsing=0;
+static jmp_buf parjmp;
+
+void
+sial_parseback(void)
+{
+	if(parsing) {
+
+		parsing=0;
+		longjmp(parjmp, 1);
+	}
+}
+
+/* link in a new set of static file variables */
+int
+sial_file_decl(var_t*svs)
+{
+	sial_validate_vars(svs);
+
+	if(!fall->fsvs)
+		fall->fsvs=(void*)sial_newvlist();
+
+	if(!fall->fgvs)
+		fall->fgvs=(void*)sial_newvlist();
+
+	(void)sial_addnewsvs(fall->fgvs, fall->fsvs, svs);
+	
+	return 1;
+}
+
+typedef struct sigaction sact;
+static int sigs[]={SIGSEGV, SIGILL, SIGTRAP, SIGINT, SIGPIPE};
+#define S_NSIG (sizeof(sigs)/sizeof(sigs[0]))
+
+void
+sial_except_handler(int sig)
+{
+static int i=0;
+	if(sig != SIGPIPE && sig != SIGINT) sial_error("Exception caught!");
+	sial_dojmp(J_EXIT, &i);
+}
+
+void *
+sial_setexcept()
+{
+int i;
+sact *osa=sial_alloc(S_NSIG*sizeof(sact));
+#if linux
+sact na;
+
+	memset(&na, 0, sizeof(na));
+	na.sa_handler=sial_except_handler;
+	na.sa_flags=SA_NODEFER;
+
+#else
+sact na={ SA_NODEFER+SA_SIGINFO, sial_except_handler, 0, 0 };
+#endif
+	
+
+	for(i=0;i<S_NSIG;i++) {
+		if(sigaction(sigs[i], &na, &osa[i])) {
+			sial_msg("Oops! Could'nt set handlers!");
+		}
+	}
+	return osa;
+}
+
+void
+sial_rmexcept(void *sa)
+{
+sact *osa=(sact *)sa;
+int i;
+
+	for(i=0;i<S_NSIG;i++) {
+		sigaction(sigs[i], &osa[i], 0);
+	}
+	sial_free(osa);
+}
+
+/*
+	This function is used to compile the prototype
+	information being given for a builtin function.
+
+	We push a dummy fdata on the stacj so that the declared
+	function ends up in the global list of that fdata.
+*/
+var_t*
+sial_parsexpr(char *expr)
+{
+fdata *fd=sial_calloc(sizeof(fdata));
+char *exp2=sial_alloc(strlen(expr)+2);
+var_t*ret=0;
+
+	strcpy(exp2, expr);
+	strcat(exp2, ";");
+
+	/* put it on the list */
+	fd->fname="__expr__";
+	fd->next=fall;
+	fall=fd;
+
+	sial_pushbuf(exp2, "stdin", 0, 0, 0);
+	parsing=1;
+	if(!setjmp(parjmp)) {
+
+		sial_rsteofoneol();
+		sial_settakeproto(1);
+		sialparse();
+		sial_settakeproto(0);
+
+		/* remove longjump for parsing */
+		parsing=0;
+
+		if(!fall->fgvs) {
+
+			sial_error("Invalid function declaration.");
+
+		} 
+
+		ret=fall->fgvs->next;
+
+	} else {
+
+		sial_popallin();
+		ret=0;
+
+	}
+	sial_free(exp2);
+	/* only free the top of the fgvs list to keep 'ret' */
+	if(fall->fgvs) sial_freevar(fall->fgvs);
+	if(fall->fsvs) sial_freesvs(fall->fsvs);
+	fall=fd->next;
+	sial_free(fd);
+	return ret;
+}
+
+/*
+	Load a dso file.
+	We are looking for the btinit() and btshutdown() functions.
+
+	btinit() will should initialized the package and call sial_builtin()
+	to install the sial functions.
+
+	btshutdown(), if it exists, will be called when an unload of the 
+	file is requested. The dso should deallocate memory etc... at that
+	time.
+*/
+static int
+sial_loadso(char *fname, int silent)
+{
+void *h;
+
+	if((h=dlopen(fname, RTLD_LAZY))) {
+
+		typedef int (*fp_t)(void);
+		fp_t fp;
+
+		if((fp=(fp_t)dlsym(h, BT_INIDSO_SYM))) {
+
+			btspec_t *sp;
+
+			if(fp()) {
+
+				if((sp=(btspec_t *)dlsym(h, BT_SPEC_SYM))) {
+
+					int i;
+					fdata *fd=sial_calloc(sizeof(fdata));
+					func **ff=&fd->funcs;
+
+					fd->fname=fname;
+					fd->isdso=1;
+					fd->globs=h;
+
+					for(i=0;sp[i].proto;i++) {
+
+						var_t*v;
+
+						if((v=sial_builtin(sp[i].proto, sp[i].fp))) {
+
+							func *f=sial_alloc(sizeof(func));
+
+							f->varlist=v;
+							f->next=*ff;
+							*ff=f;
+						}
+					}
+					fd->next=fall;
+					fall=fd;
+					return 1;
+
+				} else if(!silent) {
+
+					sial_msg("Missing '%s' table in dso [%s]", BT_SPEC_SYM, fname);
+
+				}
+
+			} else if(!silent) {
+
+				sial_msg("Could not initialize dso [%s]", fname);
+
+			}
+
+		} else if(!silent) {
+
+			sial_msg("Missing '%s' function in dso [%s]", BT_INIDSO_SYM, fname);
+		}
+		dlclose(h);
+	}
+	else if(!silent) sial_msg(dlerror());
+	sial_free(fname);
+	return 0;
+}
+
+void
+sial_addfunc_ctype(int idx)
+{
+fctype_t *fct=sial_alloc(sizeof(fctype_t));
+
+	fct->idx=idx;
+	fct->next=fall->ctypes;
+	fall->ctypes=fct;
+}
+
+int 
+sial_newfile(char *name, int silent)
+{
+fdata *fd;
+fdata *oldf;
+char *fname=sial_strdup(name);
+void *mtag;
+
+	/* check if this is a dso type file */
+	if(!strcmp(fname+strlen(fname)-3, ".so")) {
+
+		if(sial_findfile(name,0)) {
+
+			if(!silent) 
+				sial_msg("Warning: dso must be unloaded before reload\n");
+			return 0;
+		}
+		return sial_loadso(fname, silent);
+		
+	}
+
+	fd=sial_calloc(sizeof(fdata));
+	oldf=sial_findfile(name,1);
+
+	/* push this file onto the parser stack */
+	if(!sial_pushfile(fname)) {
+
+		sial_free(fname);
+		if(!silent && errno != EISDIR) sial_msg("File %s : %s\n", name, strerror(errno));
+		return 0;
+	}
+
+	/* we also need to remove the globals for this file
+	   before starting the parsing */
+	if(oldf && oldf->globs) {
+
+		sial_rm_globals(oldf->globs);
+		oldf->globs=0;
+
+	}
+
+	needvar=instruct=0;
+
+	fd->fname=fname;
+
+	/* put it on the list */
+	fd->next=fall;
+	fall=fd;
+	
+	/* we tag the current ctype list so we know later what to clean up */
+	sial_tagst();
+
+	/* we also tag the macro stack so we can erase out defines and
+	   keep the compiler and api ones. */
+	mtag=sial_curmac();
+
+	parsing=1;
+	if(!setjmp(parjmp)) {
+
+		func *fct;
+		int ret=1;
+
+		/* parse it */
+		sial_rsteofoneol();
+
+		sialparse();
+
+		/* remove longjump for parsing */
+		parsing=0;
+
+		/* before adding the globals we need to push all the static 
+		   variables for this file since the initialization expressions
+		   might use them (e.g. sizeof('a static var')). Eh, as long as
+		   we keep the interpreter handling a superset of the 'standard' C
+		   I don't have a problem with it. Do you ? */
+
+		{
+			int lev;
+
+			lev=sial_addsvs(S_STAT, fd->fsvs);
+
+			/* ok to add the resulting globals now */
+			fall->globs=sial_add_globals(fall->fgvs);
+
+			sial_setsvlev(lev);
+		}
+
+		/* ok to free olf version */
+		if(oldf) sial_freefile(oldf);
+
+		sial_flushtdefs();
+		sial_flushmacs(mtag);
+
+		/* we proceed with the callback */
+		sial_docallback(fd, 1);
+
+		fd->time=time(0);
+
+		/* compilation was ok , check for a __init() function to execute */
+		if((fct=sial_getfbyname("__init", fd))) {
+
+			int *exval;
+			jmp_buf exitjmp;
+			sact *sa;
+
+			sa=sial_setexcept();
+
+			if(!setjmp(exitjmp)) {
+
+				sial_pushjmp(J_EXIT, &exitjmp, &exval);
+				sial_freeval(sial_execmcfunc(fct, 0));
+				sial_rmexcept(sa);
+				sial_popjmp(J_EXIT);
+
+			}
+			else {
+
+				sial_rmexcept(sa);
+				ret=0;
+			}
+
+		}
+		return ret;
+	}
+	else {
+
+		/* remove all streams from the stack */
+		sial_popallin();
+
+		/* error, free this partial one and reinstall old one */
+		if(oldf) {
+			/* we zap the top pointer (it's fd) */
+			oldf->next=fall->next;
+			fall=oldf;
+			oldf->globs=sial_add_globals(oldf->fgvs);
+		}
+		else {
+
+			fall=fall->next;
+		}
+
+		/* and free fd */
+		sial_freefile(fd);
+	}
+	sial_flushtdefs();
+	sial_flushmacs(mtag);
+	return 0;
+}
+
+/* scan the current list of functions for the one named name */
+static func *
+sial_getfbyname(char *name, fdata *thisfd)
+{
+fdata *fd;
+
+	/* check localy first */
+	if(thisfd) {
+
+		for(fd=fall; fd; fd=fd->next) {
+
+			func *f;
+
+			if(fd->isdso) continue;
+
+			/* skip non-local function */
+			if(thisfd != fd) continue;
+
+			for(f=fd->funcs; f; f=f->next) {
+
+				if(!strcmp(f->name, name)) return f;
+			}
+		}
+	}
+
+	/* check global function */
+	for(fd=fall; fd; fd=fd->next) {
+
+		func *f;
+
+		if(fd->isdso) continue;
+
+		for(f=fd->funcs; f; f=f->next) {
+
+			/* skip static functions not local */
+			if(f->local) continue;
+
+			if(!strcmp(f->name, name)) return f;
+		}
+	}
+	return 0;
+}
+
+/* external boolean to check if a function exists */
+int sial_funcexists(char *name)
+{
+	return !(!(sial_getfbyname(name, 0)));
+}
+
+/*
+	This combined set of functions enables the aplication to
+	get alist of currently defined commands that have a help.
+*/
+static fdata *nxtfdata=0;
+static func *nxtfunc;
+void
+sial_rstscan(void)
+{
+	nxtfdata=0;
+}
+char *
+sial_getnxtfct(void)
+{
+	if(!nxtfdata) {
+
+		if(!fall) return 0;
+		nxtfdata=fall;
+		nxtfunc=nxtfdata->funcs;;
+	}
+
+	while(nxtfdata) {
+
+		if(!nxtfdata->isdso) for(; nxtfunc; nxtfunc=nxtfunc->next) {
+
+			int l=strlen(nxtfunc->name);
+
+			if(l > 5) {
+
+				if(!strcmp(nxtfunc->name+l-5, "_help")) {
+
+					char buf[MAX_SYMNAMELEN+1];
+					func *ret;
+
+					strncpy(buf, nxtfunc->name, l-5);
+					buf[l-5]='\0';
+
+					/* make sure we do have the function */
+					if((ret=sial_getfbyname(buf, 0))) {
+
+						nxtfunc=nxtfunc->next;
+						return ret->name;
+					}
+				}
+			}
+		}
+		nxtfdata=nxtfdata->next;
+		if(nxtfdata) nxtfunc=nxtfdata->funcs;
+	}
+	sial_rstscan();
+	return 0;
+}
+
+/*
+	This is the entry point for the error handling 
+*/
+void
+sial_exevi(char *fname, int line)
+{
+char buf[200];
+char *ed=getenv("EDITOR");
+
+	if(!ed) ed="vi";
+	snprintf(buf, sizeof(buf), "%s +%d %s", ed, line, fname);
+	system(buf);
+	sial_load(fname);
+}
+
+/*
+	This funciton is called to start a vi session on a function
+	(file=0) or a file (file=1);
+*/
+void
+sial_vi(char *fname, int file)
+{
+int line, freeit=0;
+char *filename;
+
+	if(file) {
+
+		filename=sial_filempath(fname);
+
+		if(!filename) {
+
+			sial_msg("File not found : %s\n", fname);
+			return;
+
+		}
+
+		line=1;
+		freeit=1;
+
+
+	} else {
+
+		func *f=sial_getfbyname(fname, 0);
+
+		if(!f) {
+
+			sial_msg("Function not found : %s\n", fname);
+			return;
+
+		} else {
+
+			filename=f->pos.file;
+			line=f->pos.line;
+
+		}
+	}
+
+	sial_exevi(filename, line);
+
+	if(freeit) sial_free(filename);
+
+}
+
+char *
+sial_getfile(char *fname)
+{
+func *f;
+
+	if((f=sial_getfbyname(fname, 0))) return f->file->fname;
+	return 0;
+}
+
+static void
+sial_insertfunc(func *f)
+{
+	f->next=fall->funcs;
+	fall->funcs=f;
+}
+
+value_t *
+sial_execmcfunc(func *f, value_t **vp)
+{
+value_t *retval;
+jmp_buf env;
+var_t*parm=0;
+int i=0;
+char *ocurp, *curp;
+
+	/* set the current path */
+	{
+	char *p;
+
+		curp=sial_strdup(f->file->fname);
+		if((p=strrchr(curp, '/'))) *p='\0';
+		ocurp=sial_curp(curp);
+	}
+		
+
+	if(!(setjmp(env))) {
+
+		/* push a return level */
+		sial_pushjmp(J_RETURN, &env, &retval);
+
+		/* Now it's ok to add any static vars for this file */
+		sial_addsvs(S_FILE, f->file->fsvs);
+
+		/* we need to create brand new variables with 
+		   the name of the declared arguments */
+		if(f->varlist) {
+
+			for(i=0, parm=f->varlist->next; 
+			    vp && (parm != f->varlist) && vp[i];
+			    parm=parm->next, i++) {
+
+				var_t*var=sial_newvar(parm->name);
+
+				var->v=sial_cloneval(parm->v);
+				sial_chkandconvert(var->v, vp[i]);
+				sial_add_auto(var);
+				sial_freeval(vp[i]);
+		
+			}
+		}
+		if(vp && vp[i]) {
+
+			sial_warning("Too many parameters to function call");
+
+		} else if(parm != f->varlist) {
+
+			sial_warning("Not enough parameters for function call");
+		}
+
+		/* we execute the buddy of the function */
+		retval=NODE_EXE(f->body);
+
+		sial_freeval(retval);
+
+		retval=0;
+
+		sial_popjmp(J_RETURN);
+	}
+
+	/* make sure non void function do return something */
+	if(!retval) {
+
+		if(!sial_isvoid(f->rvar->v->type.typattr))
+
+			sial_rwarning(&f->pos, "Non void function should return a value.");
+
+	} else {
+
+		/* type checking here ... */
+	}
+
+	sial_curp(ocurp);
+	sial_free(curp);
+
+	return retval;
+}
+
+/* this is the externalized function that the API users call to execute 
+   a function */
+ull
+sial_exefunc(char *fname, value_t **vp)
+{
+func *f;
+ull ret;
+
+	if(!sial_chkfname(fname, 0))
+		sial_warning("Unknown function called: %s\n", fname);
+
+	/* builtin vs cmc ...*/
+	if((f=sial_getfbyname(fname, 0))) ret=sial_getval(sial_execmcfunc(f, vp));
+	else ret=sial_getval(sial_exebfunc(fname, vp));
+	/* sial_freeval(v); */
+	return ret;
+}
+
+value_t *
+sial_exefunc_common(char *fname, node_t*parms, fdata *fd)
+{
+int i;
+node_t*args;
+value_t *vp[BT_MAXARGS+1];
+func *f;
+
+	/* We most execute before pushing the S_FILE vars so the the
+	   local variable for the caller can still be accessed */
+	for(i=0,args=parms; args; args=args->next) {
+
+		if(i==BT_MAXARGS) {
+
+			sial_error("Max number of parameters exceeded [%d]", BT_MAXARGS);
+		}
+		vp[i++]=NODE_EXE(args);
+
+	}
+
+	/* null out the rest */
+	for(;i<=BT_MAXARGS;i++) vp[i]=0;
+
+	/* builtin vs cmc ...*/
+	if((f=sial_getfbyname(fname, fd))) return sial_execmcfunc(f, vp);
+	else return sial_exebfunc(fname, vp);
+}
+
+
+/* this function is called by the sial_exeop() through a CALL op. */
+value_t *
+sial_docall(node_t*name, node_t*parms, void *arg)
+{
+fdata *fd = arg;
+char *sname=sial_vartofunc(name);
+value_t *v=0;
+
+	if(sial_chkfname(sname, fd)) {
+
+		v=sial_exefunc_common(sname, parms, fd);
+
+	}
+	else sial_rerror(&name->pos, "Unknown function being called:[%s]", sname, fd);
+	/* sial_vartofunc() allocates the name */
+	/* we don't free this item if mem debug has been set */
+	if(!sial_ismemdebug()) sial_free(sname);
+	return v;
+
+}
+
+int 
+sial_newfunc(var_t*fvar, node_t* body)
+{
+var_t*v=fvar->next;
+
+	if(v == fvar) {
+
+		sial_freevar(v);
+		NODE_FREE(body);
+		sial_error("Syntax error in function declaration");
+
+	}else{
+
+		func *fn, *fi ;
+
+		sial_freevar(fvar);
+
+		/* we do the func insertion first so that if we have a problem
+		   we can jump our of the parser using the sial_parback() function 
+		   which will deallocate the stuff */
+
+		fn=sial_alloc(sizeof(func));
+		if(sial_isstatic(v->v->type.typattr)) fn->local=1;
+		fn->rvar=v;
+		fn->varlist=v->dv->fargs;
+
+		/* check for func(void) */
+		if(fn->varlist && fn->varlist->next != fn->varlist) {
+
+			var_t*v=fn->varlist->next;
+
+			if(v->v->type.type != V_REF && sial_isvoid(v->v->type.typattr)) {
+
+				/* cut the chain here */
+				if(v->next != fn->varlist) {
+
+					sial_error("function parameter cannot have 'void' type");
+				}
+				sial_freesvs(fn->varlist);
+				fn->varlist=0;
+			}
+		}
+
+		v->dv->fargs=0;
+		fn->name=sial_strdup(v->name);
+		fn->local=sial_isstatic(v->v->type.typattr)?1:0;
+		fn->body=body;
+		fn->file=fall;
+
+		/* the position of the function is the position of the var_t*/
+		memcpy(&fn->pos, &v->dv->pos, sizeof(srcpos_t));
+
+		/* emit a warning for variables in the main statement group that
+		   shadow ont of the parameters */
+		if(fn->varlist) {
+
+			var_t*v;
+
+			for(v=fn->varlist->next; v!=fn->varlist; v=v->next) {
+
+				var_t*vs;
+
+				if((vs=sial_inlist(v->name, sial_getsgrp_avs(body))) ||
+				   (vs=sial_inlist(v->name, sial_getsgrp_svs(body)))) {
+
+					sial_rwarning(&vs->dv->pos, "variable '%s' shadow's a function parameter"
+						, v->name);
+
+				}
+			}
+		}
+
+		if((fi=sial_getfbyname(fn->name, fall))) {
+
+			/* check for local conflicts */
+			if(fi->file == fn->file) {
+
+				sial_insertfunc(fn);
+				sial_rerror(&fn->pos, "Function '%s' redefinition, first defined in file '%s' line %d"
+					, fn->name, fi->pos.file, fi->pos.line);
+	
+			/* check for global conflicts */
+			} else if(!fn->local) {
+
+				sial_insertfunc(fn);
+				sial_rerror(&fn->pos, "Function '%s' already defined in file %s, line %d"
+					, fn->name, fi->pos.file, fi->pos.line);
+
+			} /* else... it's a static that shadows a global somewhere else. So it's ok */
+
+		}
+
+		/* Searching is all done, so insert it */
+		sial_insertfunc(fn);
+
+		/* check out the storage class. Only 'static' is supported */
+		if(!sial_isjuststatic(v->v->type.typattr)) {
+
+			sial_error("Only 'static' storage class is valid for a function");
+		}
+	}
+       	return 1;
+}
+
+/* check for the existance of a function in the list */
+int
+sial_chkfname(char *fname, void *vfd)
+{
+fdata *fd=(fdata *)vfd;
+
+	/* check script functions */
+	if(!sial_getfbyname(fname, fd)) {
+
+		/* check builtin list */
+		if(sial_chkbuiltin(fname)) return 1;
+		return 0;
+
+	}
+	return 1;
+}
+
+/*
+
+	Thsi is the interface function with the command interpreter.
+	It needs to be able to execute a function giving a name and
+	passing some random parameters to it.
+
+	A return of 0 means "no such function".
+*/
+int
+sial_runcmd(char *fname, var_t*args)
+{
+	if(sial_chkfname(fname, 0)) {
+
+		value_t *val;
+		int *exval;
+		jmp_buf exitjmp;
+		void *vp;
+		ull ret;
+		sact *sa;
+
+		/* make sure arguments are available in the global vars */
+		vp=sial_add_globals(args);
+
+		/* we set the exception handler too... */
+		sa=sial_setexcept();
+
+		if(!setjmp(exitjmp)) {
+
+			sial_pushjmp(J_EXIT, &exitjmp, &exval);
+
+			/* we need to create a var with that name */
+			val=sial_exefunc_common(fname, 0, 0);
+
+			sial_popjmp(J_EXIT);
+
+			if(val) {
+
+				ret=unival(val);
+				sial_freeval(val);
+			}
+			else ret=0;
+		}
+		else {
+
+			ret=*exval;
+		}
+
+		/* remove exception handlers and restore previous handlers */
+		sial_rmexcept(sa);
+
+		/* remove args from global vars */
+		sial_rm_globals(vp);
+		return ret;
+	}
+	return 0;
+}
+
--- crash/extensions/libsial/sial_alloc.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/extensions/libsial/sial_alloc.c	2007-10-30 11:13:53.000000000 -0400
@@ -0,0 +1,430 @@
+/*
+ * Copyright 2001 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#define MEMDEBUG 1
+/*
+*/
+#include "sial.h"
+#include <string.h>
+#include <ctype.h>
+#include <stdarg.h>
+#include <malloc.h>
+#include <limits.h>
+#include <sys/mman.h>
+#include <setjmp.h>
+
+#ifdef __GNUC__
+#       if __LP64__
+#               define NKPC	16
+#       else
+#               define NKPC	4
+#       endif
+#else
+// must be the SGI Mips compiler.
+#       if (_MIPS_SZLONG == 64)
+#               define NKPC	16
+#       else
+#               define NKPC	4
+#       endif
+#endif
+#define PAGESIZE (NKPC*1024)
+
+
+/*
+	Jump defines
+*/
+#define MAXJMPS (S_MAXDEEP*3)
+int njmps=0;
+
+typedef struct blklist {
+
+	struct blklist *next;	/* root based doubly chained */
+	struct blklist *prev;
+	int size;		/* size of the allocation */
+	int istmp;		/* was flaged as temp ? */
+	int level;		/* coresponding level */
+	void *caller;		/* __return_address of caller */
+	void *freer;		/* __return_address of freer */
+
+} blist;
+
+#define SIZEBL   (((sizeof(blist)+8)/8)*8)
+
+void pbl(void *p)
+{
+blist *bl=(blist*)(((char*)p)-SIZEBL);
+    sial_msg("struct blklist *%p {", bl);
+    sial_msg("      next=%p", bl->next);
+    sial_msg("      prev=%p", bl->prev);
+    sial_msg("      size=%d", bl->size);
+    sial_msg("      istmp=%d", bl->istmp);
+    sial_msg("      level=%d", bl->level);
+    sial_msg("      caller=%p", bl->caller);
+    sial_msg("      freer=%p", bl->freer);
+}
+
+static blist temp={ &temp, &temp, 0, 0, 0, 0, 0 };
+
+value_t*
+sial_findsym(value_t *vadr)
+{
+	char *addr=sial_getptr(vadr, char);
+	char *p = API_FINDSYM(addr);
+
+	if(p) {
+	    return sial_setstrval(sial_newval(), p);
+	} else {
+	    return sial_setstrval(sial_newval(),"");
+	}
+}
+
+value_t*
+sial_showaddr(value_t *vadr)
+{
+void *addr=sial_getptr(vadr, void);
+blist *bl;
+int n=0;
+
+	for(bl=temp.next; bl != &temp; bl=bl->next) {
+
+		if(bl->caller==addr) {
+
+			if(!(n%8)) sial_msg("\n");
+			sial_msg("0x%08x ", ((char *)bl) + SIZEBL);
+			n++;
+		}
+	}
+	return sial_makebtype(0);
+}
+
+static int memdebug=0;
+
+/* these two functions must *not* receive any values */
+value_t* sial_memdebugon() { memdebug=1; return sial_makebtype(0); }
+value_t* sial_memdebugoff() { memdebug=0; return sial_makebtype(0); }
+int sial_ismemdebug() { return memdebug; }
+
+value_t*
+sial_showtemp()
+{
+blist *bl;
+int i, totsiz, totbl;
+static int ncallers=0;
+static void *callers[1000];
+static int count[1000];
+static int sizes[1000];
+static int dir=0;
+
+	if(!dir) {
+
+		memset(callers, 0, sizeof(void*)*1000);
+		memset(count, 0, sizeof(int)*1000);
+		memset(sizes, 0, sizeof(int)*1000);
+		ncallers=0;
+	}
+
+	if(dir==1) dir=0;
+	else dir=1;
+
+	for(bl=temp.next; bl != &temp; bl=bl->next) {
+
+		int i;
+
+		for(i=0;i<ncallers;i++) 
+			if(callers[i]==bl->caller) { 
+				if(dir) { count[i]++; sizes[i]+=bl->size; }
+				else { count[i]--; sizes[i]-=bl->size; }
+				break; 
+			}
+
+		if(i==ncallers) {
+			callers[ncallers]=bl->caller;
+			count[ncallers]=1;
+			sizes[ncallers]=bl->size;
+			ncallers++;
+		}
+
+	}
+	totbl=totsiz=0;
+	for(i=0;i<ncallers;i++) {
+
+		int c=count[i]<0?-count[i]:count[i];
+		int s=sizes[i]<0?-sizes[i]:sizes[i];
+
+		sial_msg("0x%08x [%5d] [%8d]\n", callers[i], c, s);
+
+		totsiz+=s;
+		totbl+=c;
+	}
+	sial_msg("    --------------\nTotal of %d bytes in %d blocks.\n", totsiz, totbl);
+	return sial_newval();
+}
+
+void
+sial_caller(char *p, void *retaddr)
+{
+blist *bl=(blist*)(((char*)p)-SIZEBL);
+
+    bl->caller=retaddr;
+}
+
+#define PAGEMASK 0xfffffffffffff000ll
+#define MAGIC    0xdeadbabe
+void *
+sial_alloc(int size)
+{
+char *m;
+blist *bl;
+
+#ifdef MEMDEBUG
+unsigned long p, pp;
+int npages;
+#endif
+
+	size=size+SIZEBL;
+
+#if MEMDEBUG
+
+	if(memdebug) {
+
+		npages=((size+PAGESIZE+4)/PAGESIZE)+2;
+		p=(unsigned long)malloc(npages*PAGESIZE);
+		p=(p+PAGESIZE)&PAGEMASK;
+		pp=p+((npages-2)*PAGESIZE);
+		p=pp-size;
+		p = p ^ (p & 0x0fll);
+		*((int*)(p-4))=MAGIC;
+		mprotect((void*)pp, PAGESIZE, PROT_READ);
+		m=(char*)p;
+
+	} else {
+
+		m=malloc(size);
+	}
+
+#else
+
+	m=malloc(size);
+	
+#endif
+
+
+	bl=(blist*)m;
+	bl->size=size;
+	bl->level=njmps;
+	bl->prev=bl->next=bl;
+	bl->istmp=0;
+	TAG(m+SIZEBL);
+	return m+SIZEBL;
+}
+
+void
+sial_maketemp(void *p)
+{
+blist *bl;
+
+	if(!p) return;
+
+	bl=(blist*)(((char*)p)-SIZEBL);
+	bl->prev=&temp;
+	bl->next=temp.next;
+	bl->istmp=1;
+	temp.next->prev=bl;
+	temp.next=bl;
+}
+
+void *
+sial_calloc(int size)
+{
+char *p=sial_alloc(size);
+
+	TAG(p);
+	memset(p, 0, size);
+	return p;
+}
+
+static void
+sial_free_bl(blist *bl, void *ra)
+{
+	bl->freer=ra;
+	bl->prev->next=bl->next;
+	bl->next->prev=bl->prev;
+
+#ifdef MEMDEBUG
+
+	if(memdebug) {
+
+		/* help out dbx/gdb when they're watching the allocated area
+	   	   by writing over it */
+		{
+		int i, ni=bl->size/sizeof(void*);
+		char *p=(char*)bl;
+		unsigned long up;
+
+			for(i=0;i<ni;i++) ((char **)p)[i]=ra;
+			up=(unsigned long)p;
+			if(*((int*)(up-4)) != MAGIC) sial_error("Oops sial_free");
+			up=up ^ (up & (PAGESIZE-1));
+			mprotect((void*)up, PAGESIZE, PROT_READ);
+		}
+
+	} else {
+
+		free(bl);
+	}
+
+#else
+	free(bl);
+#endif
+
+}
+
+void
+sial_free(void *p)
+{
+	if(!p) return;
+	sial_free_bl((blist*)(((char*)p)-SIZEBL), __return_address);
+}
+
+void
+sial_freetemp()
+{
+blist *bl=temp.next;
+
+	while(bl != &temp) {
+
+		blist *next=bl->next;
+		sial_free_bl(bl, __return_address);
+		bl=next;
+	}
+}
+
+int
+sial_istemp(void *p)
+{
+	return ((blist*)(((char*)p)-SIZEBL))->istmp;
+}
+
+char *
+sial_strdup(char *s)
+{
+char *ns=sial_alloc(strlen(s)+1);
+
+	strcpy(ns, s);
+	TAG(ns);
+	return ns;
+}
+
+void *
+sial_dupblock(void *p)
+{
+void *p2;
+int size=((blist*)(((char*)p)-SIZEBL))->size-SIZEBL;
+
+	if(!p) return 0;
+
+	p2=sial_alloc(size);
+	memcpy(p2, p, size);
+	return p2;
+}
+
+/* cheap realloc. we drop the original
+   This function is only used ones in configmon(1) code */
+void *
+sial_realloc(void *p, int size)
+{
+int cursize=((blist*)(((char*)p)-SIZEBL))->size-SIZEBL;
+void *p2;
+
+	p2=sial_calloc(size);
+	memcpy(p2, p, cursize<size?cursize:size);
+	sial_free(p);
+	return p2;
+}
+
+/*
+	Warp jumps clearing house
+	This is intimetly linked to the jumping stuff.
+	We allocate a new list of buffer for each discontinuity (break, continue or
+	return).
+*/
+struct {
+
+	int type;
+	int svlev;
+	value_t **val;
+	jmp_buf *env;
+
+} jmps[MAXJMPS];
+
+/* this is used after a jump since sial_freetemp() should already have cleaned up 
+   anything in between the previous and new level */
+void
+sial_setlev(int level)
+{
+	njmps=level;
+}
+
+void
+sial_pushjmp(int type, void *venv, void *val)
+{
+jmp_buf *env=(jmp_buf *)venv;
+
+	if(njmps<MAXJMPS) {
+
+		jmps[njmps].type=type;
+		jmps[njmps].val=(value_t**)val;
+		jmps[njmps].env=env;
+		jmps[njmps++].svlev=sial_getsvlev();
+
+	} else {
+
+		sial_error("Jump Stack overflow");
+
+	}
+}
+
+/*
+	Switch context to a break, continue or return end point.
+	If we are popoing a break we trash the continues.
+	If we are poping a return then we trash the current break and continue.
+*/
+void
+sial_dojmp(int type, void *val)
+{
+	if(njmps > 1) {
+
+		jmp_buf *env;
+
+		while(njmps && jmps[--njmps].type!=type);
+		if(jmps[njmps].val) *(jmps[njmps].val)=val;
+		env=jmps[njmps].env;
+
+		/* reset the variable level too... */
+		sial_setsvlev(jmps[njmps].svlev);
+
+		longjmp(*env, 1);
+		/* NOT REACHED */
+
+	} else sial_parseback(); /* we use the same code for initializing
+		static and automatic variables. In the case of statiuc variables
+		is the initizer expression throws an error then there's no J_EXIT
+		jump context and njmps is null. It's treated as a parsing error */
+}
+
+void
+sial_popjmp(int type)
+{
+	if(!njmps) {
+
+		sial_error("Pop underflow!");
+	}
+	njmps--;
+	if(jmps[njmps].type != type) {
+
+		sial_error("Wrong pop! %d vs %d", jmps[njmps].type, type);
+	}
+	sial_setsvlev(jmps[njmps].svlev);
+}
+
--- crash/extensions/libsial/sial_print.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/extensions/libsial/sial_print.c	2007-09-24 16:07:06.000000000 -0400
@@ -0,0 +1,398 @@
+/*
+ * Copyright 2001 Silicon Graphics, Inc. All rights reserved.
+ */
+#include <stdio.h>
+#include <string.h>
+#include <ctype.h>
+#include "sial.h"
+/*
+	This set of function are used to print value_ts.
+*/
+
+/* utility that returns a string of '*' for a reference */
+static
+char *sial_getref(int lev)
+{
+static char *ptrs="*******";
+
+	return ptrs+strlen(ptrs)-lev;
+}
+
+static char *
+sial_getidx(type_t *t, char*buf, int len)
+{
+int pos=0;
+
+	buf[0]='\0';
+	if(t->idxlst) {
+
+		int i;
+
+		for(i=0; t->idxlst[i] && pos < len; i++) {
+
+			pos += snprintf(buf+pos, len-pos, "[%d]", t->idxlst[i]);
+		}
+	}
+	return buf;
+}
+
+#define INDENT		4	/* print indent at beginning of new line */
+#define SPACER		16	/* space for type string */
+#define NAMESPACE	16	/* space used for member/var names */
+#define NBUNDLE		4	/* when printing arrays print this much before \n */
+
+static void
+sial_indent(int level, int indent)
+{
+	if(!indent) return;
+	sial_msg("%*s", level*INDENT, "");
+}
+
+static void sial_ptype2(type_t*t, value_t*v, int level, int indent, char *name, int ref, int justv);
+
+/*
+	Print a struct/union type or value
+*/
+static void
+sial_print_ctype(type_t *t, value_t *v, int level, int indent, char *name, int ref, int justv)
+{
+stinfo_t *st=sial_getstbyindex(t->idx, t->type);
+stmember_t *m;
+char buf[100];
+
+	if(!st) sial_error("Oops sial_print_ctype!");
+
+	if(!st->all) {
+
+		sial_fillst(st);
+		if(!st->all) sial_error("Reference to a incomplete type");
+	}
+
+	sial_indent(level, indent);
+
+	if(!justv) {
+		snprintf(buf, sizeof(buf)-1, "%s %s", sial_ctypename(t->type), st->name?st->name:"");
+		sial_msg("%-*s ", SPACER, buf);
+
+		/* is this is a pointer, bail out */
+	}
+	if(ref) return;
+
+	if(v && !justv) sial_msg(" = ");
+
+	sial_msg("{\n");
+
+	for(m=st->stm; m; m=m->next) {
+
+		value_t *vm=0;
+
+		sial_indent(level+1, 1);
+		if(v) {
+			vm=sial_newval();
+			sial_duptype(&vm->type, &m->type);
+			sial_exememlocal(v, m, vm);
+			sial_ptype2(&vm->type, vm, level+1, 0, m->m.name, 0, 0);
+
+		} else sial_ptype2(&m->type, vm, level+1, 0, m->m.name, 0, 0);
+		sial_msg(";\n");
+		if(vm) sial_freeval(vm);
+	}
+
+	sial_indent(level, 1);
+	sial_msg("}");
+	if(name) sial_msg(" %s", name);
+	
+}
+
+static void
+sial_prbval(value_t *v)
+{
+	if(sial_issigned(v->type.typattr)) sial_msg("%8lld", sial_getval(v));
+	else sial_msg("%8llu", sial_getval(v));
+}
+
+static int 
+sial_prtstr(value_t *v, int justv)
+{
+value_t *vs;
+char *s, *p;
+
+	if(sial_defbsize()==8) v->v.ull=v->mem;
+	else v->v.ul=v->mem;
+	vs=sial_getstr(v);
+	s=sial_getptr(vs, char);
+	for(p=s; *p; p++) if(!isprint(*p)) return 0;
+	if(p==s) { sial_freeval(vs); return 0; }
+	if(!justv) sial_msg("= ");
+	sial_msg("\"%s\"", s);
+	sial_freeval(vs);
+	return 1;
+}
+
+static void
+sial_prtarray(type_t*t, ull mem, int level, int idx)
+{
+int i;
+int j, size=1;
+
+	for(j=idx+1; t->idxlst[j]; j++) size *= t->idxlst[j];
+	size *= t->type==V_REF ? sial_defbsize() : t->size;
+
+	/* start printing */
+	sial_msg("{");
+	sial_msg("\n");
+	sial_indent(level+1, 1);
+
+	for(i=0; i<t->idxlst[idx]; i++, mem += size) {
+
+		if(t->idxlst[idx+1]) {
+
+			sial_msg("[%d] = ", i);
+			sial_prtarray(t, mem, level+1, idx+1);
+
+		} else {
+
+			/* time to deref and print final type */
+			value_t *v=sial_newval(), *vr=sial_newval();
+			int *pi=t->idxlst;
+
+			t->idxlst=0;
+
+			sial_duptype(&vr->type, t);
+			sial_pushref(&vr->type, 1);
+			if(sial_defbsize()==8) vr->v.ull=mem;
+			else vr->v.ul=(ul)mem;
+			sial_do_deref(1, v, vr);
+			if(is_ctype(v->type.type) || !(i%NBUNDLE)) sial_msg("[%2d] ", i);
+			sial_ptype2(&v->type, v, level+1, 0, 0, 0, 1);
+			sial_msg(", ");
+			/* anything else then struct/unions, print in buddles */
+			if(!is_ctype(v->type.type) && !((i+1)%NBUNDLE)) {
+
+				sial_msg("\n"); 
+				sial_indent(level+1, 1);
+			}
+			sial_freeval(v);
+			sial_freeval(vr);
+			t->idxlst=pi;
+		}
+	}
+	sial_msg("\n");
+	sial_indent(level, 1);
+	sial_msg("}");
+}
+
+/*
+	Print a type.
+	Typical output of the 'whatis' command.
+*/
+static
+void sial_ptype2(type_t*t, value_t*v, int level, int indent, char *name, int ref, int justv)
+{
+int type=t->type;
+
+	sial_indent(level, indent);
+	switch(type) {
+
+		case V_STRUCT: case V_UNION:
+
+			/* make sure we have all the member info */
+			sial_print_ctype(t, v, level, 0, name, ref, justv);
+		break;
+
+
+		case V_TYPEDEF:
+			/* no typedef should get here */
+			sial_warning("Typedef in print!");
+		break;
+
+		case V_ENUM:
+			/* no enum should get here */
+			sial_warning("ENUM in print!");
+		break;
+
+		case V_REF:
+		{
+		int refi=t->ref, ref=refi;
+
+			/* decrement ref if this was declared as a array */
+			if(t->idxlst) ref--;
+
+			/* print the referenced type */
+			sial_popref(t, t->ref);
+			sial_ptype2(t, 0, level, 0, 0, 1, justv);
+			sial_pushref(t, refi);
+
+			if(!justv) {
+
+				char buf[100], buf2[100];
+				int pos=0, len=sizeof(buf);
+
+				buf[0]='\0';
+				if(t->fct) buf[pos++]='(';
+				if(pos < len)
+					pos += snprintf(buf+pos, len-pos, "%s%s", sial_getref(ref), name?name:"");
+				if(pos < len)
+					pos += snprintf(buf+pos, len-pos, "%s", sial_getidx(t, buf2, sizeof(buf2)));
+				if(pos < len && t->fct)
+					pos += snprintf(buf+pos, len-pos, "%s", ")()");
+
+				sial_msg("%*s ", NAMESPACE, buf);
+			}
+
+			/* arrays are ref with boundaries... */
+			if(t->idxlst && v) {
+
+				if(t->idxlst[1] || t->rtype!=V_BASE || t->size!=1 || !sial_prtstr(v, justv)) 
+				{
+					if(!justv) sial_msg("= ");
+					sial_popref(t, 1);
+					sial_prtarray(t, v->mem, level, 0);
+					sial_pushref(t, 1);
+				}
+
+			} else if(v) {
+
+				if(!justv) sial_msg("= ");
+				if(!sial_getval(v)) sial_msg("(nil)");
+				else {
+					if(sial_defbsize()==8) sial_msg("0x%016llx", sial_getval(v));
+					else sial_msg("0x%08x", sial_getval(v));
+				}
+				if(t->ref==1 && t->rtype==V_BASE && t->size==1) {
+
+					(void)sial_prtstr(v, justv);
+				}
+			}
+		}
+		break;
+
+		case V_BASE:
+		{
+			if(sial_isenum(t->typattr)) {
+
+				stinfo_t *st=sial_getstbyindex(t->rtype, V_ENUM);
+				if(!justv) {
+					char buf[200];
+					snprintf(buf, sizeof(buf), "enum %s", st->name?st->name:"");
+					sial_msg("%-*s ", SPACER, buf);
+					sial_msg("%*s ", NAMESPACE, (name&&v)?name:"");
+				}
+				if(v) {
+
+					enum_t *e=st->enums;
+
+					sial_msg("= ");
+					sial_prbval(v);
+					while(e) {
+
+						if(e->value==sial_getval(v)) {
+							sial_msg(" [%s]", e->name);
+							break;
+						}
+						e=e->next;
+					}
+					if(!e) sial_msg(" [???]");
+
+				}else{
+
+					enum_t *e=st->enums;
+					int count=0;
+
+					sial_msg(" {");
+					while(e) {
+
+						if(!(count%4)) {
+							sial_msg("\n");
+							sial_indent(level+1, 1);
+						}
+						count ++;
+						sial_msg("%s = %d, ", e->name, e->value);
+						e=e->next;
+
+					}
+					sial_msg("\n");
+					sial_indent(level, 1);
+					sial_msg("%-*s ", SPACER, "}");
+					if(ref) return;
+					sial_msg("%*s ", NAMESPACE, name?name:"");
+				}
+
+			} else {
+
+				if(!justv) {
+					sial_msg("%-*s " , SPACER , sial_getbtypename(t->typattr));
+					if(ref) return;
+					sial_msg("%s%*s ", sial_getref(t->ref), NAMESPACE, name?name:"");
+				}
+				if(v) { 
+
+					if(!justv) sial_msg("= ");
+					sial_prbval(v);
+				}
+			}
+		}
+		break;
+		case V_STRING:
+			if(!justv) {
+				sial_msg("%-*s " , SPACER , "string");
+				sial_msg("%*s ", NAMESPACE, name?name:"");
+			}
+			if(v) {
+
+				if(!justv) sial_msg("= ");
+				sial_msg("\"%s\"", v->v.data);
+			}
+		break;
+	}
+	if(indent) sial_msg("\n");
+}
+
+static value_t*
+sial_ptype(value_t*v)
+{
+	sial_ptype2(&v->type, 0, 0, 1, 0, 0, 0);
+	sial_msg("\n");
+	return 0;
+}
+
+node_t*
+sial_newptype(var_t*v)
+{
+node_t*n=sial_newnode();
+
+	n->data=v->next->v;
+	v->next->v=0; /* save value against freeing */
+	sial_freevar(v->next);
+	sial_freevar(v);
+	n->exe=(xfct_t)sial_ptype;
+	n->free=(ffct_t)sial_freeval;
+	n->name=0;
+	sial_setpos(&n->pos);
+	return n;
+}
+
+static value_t *
+sial_pval(node_t*n)
+{
+value_t *v=NODE_EXE(n);
+char *name=NODE_NAME(n);
+
+	sial_ptype2(&v->type, v, 0, 1, name, 0, 0);
+	sial_free(name);
+	sial_freeval(v);
+	return 0;
+}
+
+node_t*
+sial_newpval(node_t*vn, int fmt)
+{
+node_t*n=sial_newnode();
+
+	n->data=vn;
+	n->exe=(xfct_t)sial_pval;
+	n->free=(ffct_t)sial_freenode;
+	n->name=0;
+	sial_setpos(&n->pos);
+	return n;
+}
--- crash/extensions/libsial/README.sial.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/extensions/libsial/README.sial	2007-09-24 16:07:06.000000000 -0400
@@ -0,0 +1,97 @@
+
+	This file (README.sial) gives some information specific to the crash(1) 
+	integration of sial.
+
+	Please refer to the README file generic libsial informations.
+
+	An example script can be found as ../scripts/sial.c
+
+	PATHS and ENVIRONMENT
+	=====================
+
+	The default location to look for macros and include files are 
+	/usr/share/sial/.sial and ~/..sial 
+
+	The default 'root' for #include location are thus:
+
+	/usr/share/sial/crash/include and ~/.sial/include.
+
+	There are two environment variables that control these locations.
+
+	SIAL_IPATH : path to use for include files.
+	ex: setenv SIAL_IPATH /usr/include:$(ROOT)/usr/include:~/.lcrash/include
+
+	SIAL_MPATH : path to use for finding macro files.
+	ex: setenv SIAL_MPATH /usr/tmp/macs:~/.sial
+
+	#define's
+	=====================
+
+        The current independent #define's are:
+
+        Name                    Value/format
+        ====                    =====
+        linux                   1
+        __linux                 1
+        __linux__               1
+        unix                    1
+        __unix                  1
+        __unix                  1
+        LINUX_RELEASE           0x%08x
+        LINUX_2_2_16            (LINUX_RELEASE==0x020210)
+        LINUX_2_2_17            (LINUX_RELEASE==0x020211)
+        LINUX_2_4_0             (LINUX_RELEASE==0x020400)
+        LINUX_2_2_X             (((LINUX_RELEASE) & 0xffff00) == 0x020200)
+        LINUX_2_4_X             (((LINUX_RELEASE) & 0xffff00) == 0x020400)
+
+        For i386 images/cores only.
+
+        Name			Value
+        ====			====
+        i386                    1
+        __i386                  1
+        __i386__                1
+
+        For ia64 images/cores only.
+
+        Name			Value
+        ====			=====
+        ia64                    1
+        __ia64                  1
+        __ia64__                1
+        __LP64__                1
+        _LONGLONG               1
+        __LONG_MAX__            9223372036854775807L
+
+        If you feel there should be more standard defined pushed 
+        there, let me know. 
+
+
+	Loading/Unloading
+	=====================
+
+	crash defines two new commands for loading and unloading sial
+	macros called "sload" and "sunload", respectively.
+
+	Using "sload" should be enough, since sial automaticly "sunload's"
+	the previous copy after successfully compiling the new one.
+
+	DSO's must be unload'ed before you can reload them.
+
+	Editing
+	=====================
+	
+	To facilitate macro editing, crash makes a "edit" command available.
+	edit  <funcname> will get you directly in the macro file that 
+	defines function "funcname" at the line where "funcname" starts.
+
+	edit -f /somedir/somefile, will start editing a new/old file.
+
+	edit -l will get you position on the "l"ast compile or runtime error.
+
+	Macro integration into the crash framework
+	=============================================
+
+	Refer to the README file on writing a user level command.
+	Also look at the 'sial.c' example in the scripts directory
+        
--- crash/extensions/libsial/sial_stat.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/extensions/libsial/sial_stat.c	2007-09-24 16:07:06.000000000 -0400
@@ -0,0 +1,435 @@
+/*
+ * Copyright 2001 Silicon Graphics, Inc. All rights reserved.
+ */
+#include "sial.h"
+#include "sial.tab.h"
+#include <stdarg.h>
+#include <setjmp.h>
+
+#define MAXPARMS 10
+
+typedef struct stat {
+
+	int stype;
+	int np;
+	struct stat *next;
+	srcpos_t pos;
+	node_t*n;
+	node_t*parms[MAXPARMS];
+	var_t*svs;	/* if statement block then these are the auto and static
+			   wars for it */
+	var_t*avs;
+
+} stat;
+
+#define SETVS	value_t *v1=0,*v2=0,*v3=0,*v4=0
+#define FV1	sial_freeval(v1),v1=0
+#define FV2	sial_freeval(v2),v2=0
+#define FV3	sial_freeval(v3),v3=0
+#define FV4	sial_freeval(v4),v4=0
+#define UNSETVS	FV1,FV2,FV3,FV4
+
+#define P1 (s->parms[0])
+#define P2 (s->parms[1])
+#define P3 (s->parms[2])
+#define P4 (s->parms[3])
+
+#define V1 (v1?v1:(v1=NODE_EXE(P1)))
+#define V2 (v2?v2:(v2=NODE_EXE(P2)))
+#define V3 (v3?v3:(v3=NODE_EXE(P3)))
+#define V4 (v4?v4:(v4=NODE_EXE(P4)))
+
+#define L1 (unival(V1))
+#define L2 (unival(V2))
+#define L3 (unival(V3))
+#define L4 (unival(V4))
+
+#define S1 (V1->v.data)
+#define S2 (V2->v.data)
+#define S3 (V3->v.data)
+#define S4 (V4->v.data)
+
+/* this is used to execute staement lists e.g. i=1,j=3; */
+static value_t*
+sial_exeplist(node_t*n)
+{
+value_t *val=0;
+
+	if(n) {
+
+		do {
+
+			if(val) sial_freeval(val), val=0;
+			val=NODE_EXE(n);
+			n=n->next;
+
+		} while(n);
+	}
+	return val;
+}
+
+static int
+sial_dofor(stat *s)
+{
+jmp_buf brkenv;
+jmp_buf cntenv;
+SETVS;
+
+	if(!setjmp(brkenv)) {
+
+		sial_pushjmp(J_BREAK, &brkenv, 0);
+
+		v1=sial_exeplist(P1);
+		FV1;
+
+		while(!P2 || sial_bool(V2)) {
+
+			FV2;
+
+			if(!setjmp(cntenv)) {
+
+				sial_pushjmp(J_CONTINUE, &cntenv, 0);
+				V4;
+				FV4;
+				sial_popjmp(J_CONTINUE);
+
+			}
+			
+			UNSETVS; /* make sure we re-execute everything each time */
+			v3=sial_exeplist(P3);
+			FV3;
+		}
+		sial_popjmp(J_BREAK);
+		
+	}
+	UNSETVS;
+	return 1;
+}
+
+static int
+sial_dowhile(stat *s)
+{
+jmp_buf brkenv;
+jmp_buf cntenv;
+SETVS;
+
+	if(!setjmp(brkenv)) {
+
+		sial_pushjmp(J_BREAK, &brkenv, 0);
+
+		while(sial_bool(V1)) {
+
+			FV1;
+
+			if(!setjmp(cntenv)) {
+
+				sial_pushjmp(J_CONTINUE, &cntenv, 0);
+				V2;
+				FV2;
+				sial_popjmp(J_CONTINUE);
+
+			}
+			
+			UNSETVS; /* make sure we re-execute everything each time */
+		}
+		FV1;
+		sial_popjmp(J_BREAK);
+		
+	}
+
+	return 1;
+}
+
+static int
+sial_dodo(stat *s)
+{
+jmp_buf brkenv;
+jmp_buf cntenv;
+SETVS;
+
+	if(!setjmp(brkenv)) {
+
+		sial_pushjmp(J_BREAK, &brkenv, 0);
+
+		do {
+
+			FV2;
+			if(!setjmp(cntenv)) {
+
+				sial_pushjmp(J_CONTINUE, &cntenv, 0);
+				V1;
+				FV1;
+				sial_popjmp(J_CONTINUE);
+
+			}
+			
+			UNSETVS; /* make sure we re-execute everything each time */
+
+		} while (sial_bool(V2));
+		FV2;
+
+		sial_popjmp(J_BREAK);
+
+	}
+
+	UNSETVS;
+	return 1;
+}
+
+static int
+sial_doif(stat *s)
+{
+SETVS;
+ul b;
+
+	b=sial_bool(V1);
+	FV1;
+
+	if(s->np==3) {
+
+		if (b) 
+			V2;
+		else 
+			V3;
+
+	} else {
+
+		if (b) 
+			V2;
+
+	}
+
+	UNSETVS;
+	return 1;
+}
+
+static int
+sial_doswitch(stat *s)
+{
+jmp_buf brkenv;
+ull cval;
+SETVS;
+
+	if(!setjmp(brkenv)) {
+
+		sial_pushjmp(J_BREAK, &brkenv, 0);
+		cval=unival(V1);
+		FV1;
+		sial_docase(cval, P2->data);
+		sial_popjmp(J_BREAK);
+
+	}
+
+	UNSETVS;
+	return 1;
+}
+
+static void
+sial_exein(stat *s)
+{
+jmp_buf cntenv;
+SETVS;
+
+	if(!setjmp(cntenv)) {
+
+		sial_pushjmp(J_CONTINUE, &cntenv, 0);
+		V3;
+		sial_popjmp(J_CONTINUE);
+
+	}
+	UNSETVS;
+}
+
+static int
+sial_doin(stat *s)
+{
+jmp_buf brkenv;
+	if(!setjmp(brkenv)) {
+
+		sial_pushjmp(J_BREAK, &brkenv, 0);
+		sial_walkarray(P1, P2, (void (*)(void *))sial_exein, s);
+		sial_popjmp(J_BREAK);
+	}
+	return 1;
+}
+
+/* this is where all of the flow control takes place */
+
+static value_t*
+sial_exestat(stat *s)
+{
+srcpos_t p;
+value_t *val=0;
+
+	do {
+
+		/* dump the val while looping */
+		if(val) sial_freeval(val);
+		val=0;
+
+		sial_curpos(&s->pos, &p);
+
+
+		switch(s->stype) {
+
+		case FOR : 	sial_dofor(s); break;
+		case WHILE: 	sial_dowhile(s); break;
+		case IN:	sial_doin(s); break;
+		case IF:	sial_doif(s); break;
+		case DO:	sial_dodo(s); break;
+		case SWITCH:	sial_doswitch(s); break;
+		case DOBLK:
+		{
+		int lev;
+
+			/* add any static variables to the current context */
+			lev=sial_addsvs(S_STAT, s->svs);
+			sial_addsvs(S_AUTO, sial_dupvlist(s->avs));
+
+			/* with the block statics inserted exeute the inside stmts */
+			if(s->next) val=sial_exestat(s->next);
+
+			/* remove any static variables to the current context */
+			if(s->svs) sial_setsvlev(lev);
+
+			sial_curpos(&p, 0);
+
+			return val;
+		}
+
+		case BREAK:	sial_dojmp(J_BREAK, 0); break;
+		case CONTINUE:	sial_dojmp(J_CONTINUE, 0); break;
+		case RETURN: {
+
+
+			if(s->parms[0]) {
+
+				val=(s->parms[0]->exe)(s->parms[0]->data);
+			}
+			else val=sial_newval();
+
+			sial_curpos(&p, 0);
+			sial_dojmp(J_RETURN, val);
+		}
+		break;
+		case PATTERN:
+
+			val=sial_exeplist(s->parms[0]);
+
+		}
+
+		sial_curpos(&p, 0);
+
+	} while((s=s->next));
+
+	/* we most return a type val no mather what it is */
+	/* that's just the way it is...Somethings will never change...*/
+	if(!val) val=sial_newval();
+
+	return val;
+}
+
+void
+sial_freestat(stat *s)
+{
+int i;
+
+	if(s->next) sial_freenode(s->next->n);
+
+	for(i=0;i<s->np && s->parms[i];i++) {
+
+		NODE_FREE(s->parms[i]);
+
+	}
+	sial_free(s);
+}
+
+void
+sial_freestat_static(stat *s)
+{
+
+	if(s->next) sial_freenode(s->next->n);
+
+	/* free associated static var list */
+	sial_freesvs(s->svs);
+	sial_freesvs(s->avs);
+	sial_free(s);
+}
+
+var_t*sial_getsgrp_avs(node_t*n) { return ((stat *)n->data)->avs; }
+var_t*sial_getsgrp_svs(node_t*n) { return ((stat *)n->data)->svs; }
+
+/* add a set of static variable to a statement */
+node_t*
+sial_stat_decl(node_t*n, var_t*svs)
+{
+node_t*nn;
+stat *s;
+
+	sial_validate_vars(svs);
+
+	nn=sial_newnode();
+	s=sial_alloc(sizeof(stat));
+
+	/* add statics and autos to this statement */
+	s->svs=sial_newvlist();
+	s->avs=sial_newvlist();
+	sial_addnewsvs(s->avs, s->svs, svs);
+
+	if(n) s->next=(stat*)(n->data);
+	else s->next=0;
+	s->stype=DOBLK;
+	s->n=nn;
+	nn->exe=(xfct_t)sial_exestat;
+	nn->free=(ffct_t)sial_freestat_static;
+	nn->data=s;
+	sial_setpos(&s->pos);
+
+	return nn;
+}
+
+node_t*
+sial_newstat(int type, int nargs, ...)
+{
+va_list ap;
+node_t*n=sial_newnode();
+stat *s=sial_alloc(sizeof(stat));
+int i;
+
+	s->stype=type;
+
+	va_start(ap, nargs);
+
+	for(i=0;i<nargs && i<MAXPARMS; i++) {
+
+		s->parms[i]=va_arg(ap, node_t*);
+	}
+
+	s->np=i;
+	s->n=n;
+        s->next=0;
+	n->exe=(xfct_t)sial_exestat;
+	n->free=(ffct_t)sial_freestat;
+	n->data=s;
+	
+	sial_setpos(&s->pos);
+	
+	va_end(ap);
+	return n;
+}
+
+node_t*
+sial_addstat(node_t*list, node_t*s)
+{
+	if(!s && list) return list;
+	if(s && !list) return s;
+	else {
+		stat *sp=(stat*)(list->data);
+
+		while(sp->next) sp=sp->next;
+		sp->next=(stat*)(s->data);
+		return list;
+
+	}
+}
+
--- crash/extensions/libsial/sial_num.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/extensions/libsial/sial_num.c	2007-09-24 16:07:06.000000000 -0400
@@ -0,0 +1,233 @@
+/*
+ * Copyright 2001 Silicon Graphics, Inc. All rights reserved.
+ */
+#include <string.h>
+#include <stdio.h>
+#include "sial.h"
+
+typedef struct {
+	int type;
+	ull val;
+} num;
+
+/*
+        Numeric constants.
+*/
+
+static value_t*
+sial_exenum(num *n)
+{
+value_t *v=sial_newval();
+
+	v->type.type=V_BASE;
+	v->type.idx=n->type;
+	if(n->type==B_SLL) {
+
+ll:
+		v->v.sll=n->val;
+		v->type.size=8;
+
+	}else if(n->type==B_SC) {
+
+		v->v.sc=n->val;
+		v->type.size=1;
+
+	} else {
+
+		if(sial_defbsize()==4) {
+
+			v->v.sl=n->val;
+			v->type.size=4;
+
+		} else {
+
+			v->type.idx=B_SLL;
+			goto ll;
+		}
+	}
+	v->type.typattr=sial_idxtoattr(v->type.idx);
+	v->set=0;
+	return v;
+}
+
+void
+sial_freenumnode(num *n)
+{
+	sial_free(n);
+}
+
+node_t*
+sial_makenum(int type, ull val)
+{
+node_t*n=sial_newnode();
+num *nu=sial_alloc(sizeof(num));
+
+	TAG(nu);
+
+	nu->type=type;
+	nu->val=val;
+        n->exe=(xfct_t)sial_exenum;
+        n->free=(ffct_t)sial_freenumnode;
+        n->data=nu;
+
+	sial_setpos(&n->pos);
+	return n;
+}
+
+/*
+	Execution of the sizeof() operator.
+	This sould be done at compile time, but I have not setup
+	a 'type only' execution path for the nodes.
+	Runtime is good enough to cover mos cases.
+*/
+#define SN_TYPE 1
+#define SN_EXPR 2
+
+typedef struct {
+	int type;
+	void *p;
+	srcpos_t pos;
+} snode_t;
+
+static value_t *
+sial_exesnode(snode_t*sn)
+{
+srcpos_t pos;
+type_t*t;
+value_t *v=sial_newval();
+value_t *v2=0;
+int size;
+
+	sial_curpos(&sn->pos, &pos);
+	if(sn->type == SN_TYPE) {
+
+		t=(type_t*)(sn->p);
+
+	} else {
+
+		sial_setinsizeof(1);
+		v2=NODE_EXE((node_t*)(sn->p));
+		t=&v2->type;
+		sial_setinsizeof(0);
+	}
+
+	switch(t->type) {
+
+		case V_REF:
+
+			if(t->idxlst) {
+
+				int i; 
+				for(size=t->size,i=0;t->idxlst[i];i++) size *= t->idxlst[i];
+
+			} else size=sial_defbsize();
+
+		break;
+		case V_STRUCT: case V_UNION:
+
+			if(sial_ispartial(t)) {
+
+				sial_error("Invalid type specified");
+			}
+			size=t->size;
+
+		break;
+		case V_BASE: case V_STRING:
+			size=t->size;
+		break;
+		
+		default: size=0;
+	}
+
+	sial_defbtype(v, (ull)size);
+
+	sial_curpos(&pos, 0);
+
+	if(v2) sial_freeval(v2);
+
+	return v;
+	
+}
+
+static void
+sial_freesnode(snode_t*sn)
+{
+	if(sn->type == SN_TYPE) sial_free(sn->p);
+	else NODE_FREE(sn->p);
+	sial_free(sn);
+}
+
+node_t*
+sial_sizeof(void *p, int type)
+{
+node_t*n=sial_newnode();
+snode_t*sn=sial_alloc(sizeof(snode_t));
+
+	n->exe=(xfct_t)sial_exesnode;
+	n->free=(ffct_t)sial_freesnode;
+	n->data=sn;
+	sn->type=type;
+	sn->p=p;
+	sial_setpos(&sn->pos);
+	return n;
+}
+
+node_t*
+sial_newnum(char *buf)
+{
+int type;
+unsigned long long val;
+
+	type=B_SL;
+
+	/* get the value_t of this constant. Could be hex, octal or dec. */
+	if(buf[0]=='0') {
+
+		if(buf[1]=='x') {
+
+			if(!sscanf(buf, "%llx", &val)) goto error;
+
+		} else {
+
+			if(!sscanf(buf,"%llo", &val)) goto error;
+		}
+
+	} else {
+
+		if(!sscanf(buf,"%lld", &val)) goto error;
+
+	}
+
+	if(val & 0xffffffff00000000ll) type=B_SLL;
+	
+	/* threat the long and long long atributes */
+	{ 
+		int l=strlen(buf);
+
+		if(l>1) {
+
+			if(buf[l-1]=='l' || buf[l-1]=='L') {
+
+				if(l>2) {
+
+					if(sial_defbsize()==8 || buf[l-2]=='l' || buf[l-2]=='L') {
+
+						type=B_SLL;
+
+					}
+					else type=B_SL;
+
+				}
+
+			}
+		}
+	}
+	{
+	node_t*n=sial_makenum(type, val);
+		TAG(n->data);
+		return n;
+	}
+error:
+	sial_error("Oops! NUMBER");
+	return 0;
+}
--- crash/extensions/libsial/sial_str.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/extensions/libsial/sial_str.c	2007-09-24 16:07:06.000000000 -0400
@@ -0,0 +1,185 @@
+/*
+ * Copyright 2001 Silicon Graphics, Inc. All rights reserved.
+ */
+#include <ctype.h>
+#include <string.h>
+#include "sial.h"
+
+/*
+        Create a new string node from a string.
+*/
+
+value_t *
+sial_setstrval(value_t *val, char *buf)
+{
+char *newbuf=sial_strdup(buf);
+
+	val->v.data=(void*)newbuf;
+	val->type.type=V_STRING;
+	val->type.size=strlen(buf)+1;
+	val->set=0;
+	return val;
+}
+
+value_t *
+sial_makestr(char *s)
+{
+	return sial_setstrval(sial_newval(), s);
+}
+
+static value_t*
+sial_exestr(char *buf)
+{
+value_t *v=sial_newval();
+
+	sial_setstrval(v, buf);
+	return v;
+}
+
+void
+sial_freestrnode(char *buf)
+{
+	sial_free(buf);
+}
+
+node_t*
+sial_allocstr(char *buf)
+{
+node_t*n=sial_newnode();
+
+        n->exe=(xfct_t)sial_exestr;
+        n->free=(ffct_t)sial_freestrnode;
+        n->data=buf;
+	sial_setpos(&n->pos);
+
+	return n;
+}
+
+node_t*
+sial_strconcat(node_t*n1, node_t*n2)
+{
+char *newbuf=sial_alloc(strlen(n1->data)+strlen(n2->data)+1);
+
+	strcpy(newbuf, n1->data);
+	strcat(newbuf, n2->data);
+	sial_free(n1->data);
+	n1->data=newbuf;
+	sial_freenode(n2);
+	return n1;
+}
+
+static int
+is_valid(int c, int base)
+{
+	switch(base)
+	{
+		case 16: return (c>='0' && c<='9') || (toupper(c) >= 'A' && toupper(c) <= 'F');
+		case 10: return (c>='0' && c<='9');
+		case 8:  return (c>='0' && c<='7');
+	}
+	return 0;
+}
+
+/* extract a number value_t from the input stream */
+static int sial_getnum(int base)
+{
+int val=0;
+	while(1)
+	{
+	char c=sial_input(), C;
+
+		C=toupper(c);
+		if(is_valid(C, base)) {
+
+			val=val*base;
+			val+=(C>='A')?10+('F'-C):'9'-C;
+		}
+		else
+		{
+			sial_unput(c);
+			break;
+		}
+	}
+	return val;
+}
+
+int
+sial_getseq(int c)
+{
+int i;
+static struct {
+	int code;
+	int value;
+} seqs[] = {
+	{ 'n', '\n' },
+	{ 't', '\t' },
+	{ 'f', '\f' },
+	{ 'r', '\r' },
+	{ 'n', '\n' },
+	{ 'v', '\v' },
+	{ '\\', '\007' },
+};
+	for(i=0;i<sizeof(seqs)/sizeof(seqs[0]);i++) {
+
+		if(seqs[i].code==c) return seqs[i].value;
+	}
+	return c;
+}
+
+node_t*
+sial_newstr()
+{
+int maxl=S_MAXSTRLEN;
+char *buf=sial_alloc(maxl);
+int iline=sial_line(0);
+int i, c;
+
+	/* let the input function knwo we want averyting from the 
+	   input stream. Comments and all... */
+	sial_rawinput(1);
+
+	for(i=0;i<maxl;i++) {
+
+		switch(c=sial_input()) {
+
+		case '\\': /* escape sequence */
+			switch(c=sial_input()) {
+			case 'x': /* hexa value_t */
+				buf[i]=sial_getnum(16);
+			break;
+			case '0': /* octal value_t */
+				buf[i]=sial_getnum(8);
+			break;
+			default : 
+				if(isdigit(c))
+				{
+					sial_unput(c);
+					buf[i]=sial_getnum(10);
+				}
+				else
+				{
+					buf[i]=sial_getseq(c); 
+				}
+			break;
+
+			}
+		break;
+		case '"': /* we're finished */
+		{
+			buf[i]='\0';
+			sial_rawinput(0);
+			return sial_allocstr(buf);
+
+		}
+		case (-1):
+			sial_error("Unterminated string at line %d", iline);
+		break;
+		default:
+			buf[i]=c;
+		break;
+
+		}
+	}
+	sial_error("String too long at %d", iline);
+	return NULLNODE;
+}
--- crash/extensions/libsial/sial_input.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/extensions/libsial/sial_input.c	2007-10-30 11:13:53.000000000 -0400
@@ -0,0 +1,802 @@
+/*
+ * Copyright 2001 Silicon Graphics, Inc. All rights reserved.
+ */
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <string.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <setjmp.h>
+#include <signal.h>
+#include <unistd.h>
+#include <stdio.h>
+#include "sial.h"
+
+char *sialpp_create_buffer(void *, int);
+void sialpp_switch_to_buffer(void *);
+void sialpp_delete_buffer(void *);
+
+typedef void fdone(void *data);
+extern void* sial_create_buffer(FILE *, int);
+typedef struct inbuf_s {
+	srcpos_t pos;		/* current filename,line,col */
+	int cursor;		/* position of next input() character */
+	int len;		/* length of the buffer */
+	char *buf;		/* buffer */
+	void *data;		/* opaque data for callback */
+	void *mac;		/* for nested defines substitutions */
+	fdone *donefunc;	/* function to call when done with buffer */
+	int space;
+	int eofonpop;		/* terminate parsing at end of buffer ? */
+#if linux
+	void* yybuf;
+#endif
+
+} inbuf_t;
+
+void sial_switch_to_buffer(void *);
+void sial_delete_buffer(void *);
+#define MAXIN 20
+static inbuf_t inlist[MAXIN];
+static inbuf_t *in=0;
+static int nin=0;
+static int eol=0, virgin=1;
+#if linux
+static int inpp=0;
+#endif
+
+extern void *sial_getmac(char *, int);
+
+/* this function is called by the macro macro functions to set
+   and test the current buffer level in order to take care of 
+   imbedded macros w/ the same parameter names.
+   see sial_getmac().
+*/ 
+void *sial_getcurmac(void) 
+{ 
+	return in ? in->mac : 0 ;
+}
+
+static void
+sial_pusherror(void)
+{
+	sial_error("Too many level of input stream");
+}
+
+/*
+	Push a buffer onto the parser input stream.
+*/
+void
+sial_pushbuf(char *buf, char *fname, void (*vf)(void *), void *d, void *m)
+{
+fdone *f=(fdone*)vf;
+
+	if(nin==MAXIN) sial_pusherror();
+
+	/* if we are pushing a macro then use upper level coordinates */
+	if(fname) {
+
+		inlist[nin].pos.line=1;
+		inlist[nin].pos.col=1;
+		inlist[nin].pos.file=fname;
+
+	} else sial_setpos(&inlist[nin].pos);
+
+	/* set it */
+	if(nin) {
+
+		sial_curpos(&inlist[nin].pos, &inlist[nin-1].pos);
+
+	} else {
+
+		sial_curpos(&inlist[nin].pos, 0);
+
+	}
+
+	inlist[nin].buf=buf;
+	inlist[nin].donefunc=f;
+	inlist[nin].space=0;
+	inlist[nin].data=d;
+	inlist[nin].mac=m;
+	inlist[nin].cursor=0;
+	inlist[nin].len=strlen(buf);
+	inlist[nin].eofonpop=0;
+#if linux
+	if(inpp) {
+		inlist[nin].yybuf=sialpp_create_buffer(0, inlist[nin].len);
+		sialpp_switch_to_buffer(inlist[nin].yybuf);
+	}else{
+		inlist[nin].yybuf=sial_create_buffer(0, inlist[nin].len);
+		sial_switch_to_buffer(inlist[nin].yybuf);
+	}
+#endif
+	in=&inlist[nin];
+	nin++;
+}
+
+/* read the rest of the "#include" line from the parser input stream
+   open the corresponding file, push it's contain on the parser input
+   stream.
+*/
+int
+sial_pushfile(char *name)
+{
+struct stat s;
+char *fname;
+
+	if(nin==MAXIN) sial_pusherror();
+	
+	fname=sial_fileipath(name);
+
+	if(fname) {
+
+		if(!stat(fname, &s)) {
+	
+			char *buf=sial_alloc(s.st_size+1);
+			int fd;
+
+			if((fd=open(fname, O_RDONLY))==-1) {
+
+					sial_msg("%s: %s", fname, strerror(errno));
+
+			}
+			else {
+
+				if(read(fd, buf, s.st_size) != s.st_size) {
+
+					if(errno != EISDIR) 
+						sial_msg("%s: read error : %s", fname, strerror(errno));
+
+				}
+				else {
+
+					
+					buf[s.st_size]='\0';
+					sial_pushbuf(buf, fname, sial_free, buf, 0);
+					close(fd);
+					return 1;
+
+				}
+				close(fd);
+			}
+
+			sial_free(buf);
+
+		}
+		sial_free(fname);
+	}
+	return 0;
+
+}
+
+/*
+	Done with the current buffer.
+	Go back to previous on the stack.
+*/
+static int
+sial_popin(void)
+{
+
+	if(eol || !nin) {
+
+		if(!nin) in=0;
+		return 1;
+	
+	} else {
+
+		nin--;
+
+		/* call back */
+		if(inlist[nin].donefunc) {
+
+			inlist[nin].donefunc(inlist[nin].data);
+		}
+		if(inlist[nin].eofonpop) {
+
+			eol=1;
+#if linux
+			inpp=0;
+#endif
+		}
+		if(!nin) in=0;
+		else {
+
+			in=&inlist[nin-1];
+			if(!eol) {
+#if linux
+				if(inpp) {
+					sialpp_switch_to_buffer(inlist[nin-1].yybuf);
+					sialpp_delete_buffer(inlist[nin].yybuf);
+				} else {
+					sial_switch_to_buffer(inlist[nin-1].yybuf);
+					sial_delete_buffer(inlist[nin].yybuf);
+				}
+#endif
+			}
+			sial_curpos(&in->pos, 0);
+		}
+		return 0;
+	}
+}
+
+/*
+  With linux we need to use the wrap function
+  so that the flex buffer stuff is keaped in the game.
+*/
+int
+sialwrap(void) 
+{ 
+	return sial_popin(); 
+}
+
+int
+sialppwrap(void)
+{ 
+	if(eol) return 1;
+	return sial_popin();
+}
+
+void
+sial_popallin(void)
+{
+	while(nin) {
+		eol=0;
+		sial_popin();
+	}
+}
+
+#define BLK_IFDEF	1
+#define BLK_IFNDEF	2
+#define BLK_IF		3
+#define BLK_ELIF	4
+#define BLK_ELSE	5
+
+typedef struct ifblk {
+	int type;	/* type of block */
+	int exprpos;    /* curpor to start of corresponding expression */
+	int bstart;	/* curpor position at start of block */
+	int dirlen;	/* length of the directive name */
+	int bend;	/* cursor position at end of block */
+	struct ifblk *next;
+} ifblk_t;
+
+static int
+sial_isif(int pos)
+{
+	if(
+	      (in->len-pos>6 && !strncmp(in->buf+pos, "ifndef", 6))
+	   || (in->len-pos>5 && !strncmp(in->buf+pos, "ifdef", 5)) 
+	   || (in->len-pos>2 && !strncmp(in->buf+pos, "if", 2)) 
+
+	) return 1;
+
+	return 0;
+}
+
+/*
+	Get directly to next block, skipping nested blocks.
+*/
+static int
+sial_nxtblk(int pos, int lev)
+{
+int virgin=0;
+
+	while(1) {
+
+		if(pos==in->len) {
+
+			sial_error("Block without endif");
+		}
+
+		if(virgin && in->buf[pos]=='#') {
+
+			pos++;
+
+			/* nested if ? */
+			if(in->buf[pos]=='i' && sial_isif(pos)) {
+
+				while(1) {
+					pos=sial_nxtblk(pos, lev+1);
+					if(in->len-pos>5 && !strncmp(in->buf+pos, "endif", 5)) break;
+				}
+
+			} else if(in->buf[pos]=='e') return pos;
+
+		} else if(in->buf[pos]=='\n') {
+
+			virgin=1;
+
+		} else if(in->buf[pos] != ' ' && in->buf[pos] != '\t') {
+
+			virgin=0;
+		}
+		pos++;
+	}
+}
+
+static ifblk_t *
+sial_getblklst(void)
+{
+ifblk_t *lst, *last;
+int doneelse=0, pos;
+
+	lst=sial_alloc(sizeof(ifblk_t));
+
+	lst->bstart=in->cursor-1;
+	if(!strncmp(in->buf+in->cursor, "ifdef", 5)) {
+
+		lst->type=BLK_IFDEF;
+		lst->exprpos=lst->bstart+6;
+		lst->dirlen=6;
+
+	} else if(!strncmp(in->buf+in->cursor, "ifndef", 6)){
+
+		lst->type=BLK_IFNDEF;
+		lst->exprpos=lst->bstart+7;
+		lst->dirlen=7;
+
+	} else {
+
+		lst->type=BLK_IF;
+		lst->exprpos=lst->bstart+3;
+		lst->dirlen=3;
+	}
+
+	last=lst;
+	pos=in->cursor;
+
+	while(1) {
+
+		ifblk_t *new=sial_alloc(sizeof(ifblk_t));
+
+		pos=sial_nxtblk(pos, 0);
+
+		last->bend=pos-2;
+		new->bstart=pos-1;
+		if(!strncmp(in->buf+pos, "elif", 4)) {
+
+			if(doneelse) {
+
+				sial_error("Additional block found after #else directive");
+			}
+			new->type=BLK_ELIF;
+			new->exprpos=new->bstart+5;
+			new->dirlen=5;
+
+		} else if(!strncmp(in->buf+pos, "else", 4)) {
+
+			if(doneelse) {
+
+				sial_error("#else already done");
+			}
+			new->type=BLK_ELSE;
+			new->exprpos=new->bstart+5;
+			new->dirlen=5;
+			doneelse=1;
+
+		} else if(!strncmp(in->buf+pos, "endif", 5)) {
+
+			sial_free(new);
+			last->next=0;
+			break;
+		}
+		last->next=new;
+		last=new;
+	}
+	return lst;
+}
+
+/*
+	Zap a complete block.
+	We put spaces everywhere but over the newline.
+	Hey, it works. It's good enough for me.
+*/
+static void
+sial_zapblk(ifblk_t *blk)
+{
+int i;
+
+	for(i=blk->bstart;i<blk->bend;i++) {
+
+		if(in->buf[i]!='\n') in->buf[i]=' ';
+	}
+}
+
+int sial_eol(char c) { return (!c || c=='\n') ? 1 : 0; }
+
+/*
+	This function is called by sial_input() when a #if[def] is found.
+	We gather all blocks of the if/then/else into a list.
+	Parsing and execution of the expression is done only when needed.
+*/
+void sial_rsteofoneol(void)
+{
+	eol=0;
+	virgin=1;
+#if linux
+	inpp=0;
+#endif
+}
+
+void
+sial_zapif(void)
+{
+ifblk_t *lst=sial_getblklst();
+ifblk_t *last=lst;
+int b=0;
+
+	/* we scan the entire list untill a condition is true or we
+	   reach #else or we reach the end */
+	while(lst) {
+
+		switch(lst->type) {
+
+			case BLK_IFDEF:
+			case BLK_IFNDEF:
+			{
+			char mname[MAX_SYMNAMELEN+1], c;
+			int i=0, j=lst->bstart+lst->dirlen;
+			int v;
+
+				/* get the macro name and see if it exists */
+				/* skip all white spaces */
+				while((c=in->buf[j]) == ' ' || c == '\t') if(c=='\n' || !c) {
+
+					sial_error("Macro name not found!");
+
+				} else j++;
+
+				/* get the constant or macro name */
+				while((c=in->buf[j]) != ' ' && c != '\t' && c != '(') {
+
+					if(c=='\n' || !c) break;
+
+					if(i==MAX_SYMNAMELEN) break;
+
+					mname[i++]=c;
+					j++;
+				}
+				mname[i]='\0';
+				lst->dirlen += (j-lst->bstart-lst->dirlen);
+				if(sial_getmac(mname,0)) v=1;
+				else v=0;
+				b=lst->type==BLK_IFDEF?v:!v;
+
+			}
+			break;
+
+			case BLK_IF: case BLK_ELIF:
+			{
+			node_t*n;
+			void sialpprestart(int);
+			void sialppparse(void);
+			char *expr=sial_getline();
+			int len=lst->dirlen;
+
+#if linux
+				sialpprestart(0);
+				inpp=1;
+#endif
+				lst->dirlen += (in->cursor-lst->exprpos-1);
+				sial_pushbuf(expr, 0, sial_free, expr, 0);
+				in->eofonpop=1;
+				in->cursor += len;
+				sialppparse();
+
+				sial_rsteofoneol();
+				eol=0;
+
+				/* get the resulting node_t*/
+				n=sial_getppnode();
+
+				/* execute it */
+				{
+					
+				int *exval;
+				jmp_buf exitjmp;
+				void *sa;
+				value_t *v;
+
+					sa=sial_setexcept();
+
+					if(!setjmp(exitjmp)) {
+
+						sial_pushjmp(J_EXIT, &exitjmp, &exval);
+						v=NODE_EXE(n);
+						sial_rmexcept(sa);
+						sial_popjmp(J_EXIT);
+						b=sial_bool(v);
+						sial_freeval(v);
+
+					} else {
+
+						sial_rmexcept(sa);
+						sial_parseback();
+					}
+				}
+			}
+			break;
+
+			case BLK_ELSE:
+			{
+
+				b=1;
+
+			}
+			break;
+		}
+
+		last=lst;
+		if(b) break;
+
+		/* count new lines */
+		{
+			while(in->cursor < lst->bend+1) {
+
+				if(sial_eol(in->buf[in->cursor]))
+					sial_line(1);
+				in->cursor++;
+			}
+	
+		}
+		lst=lst->next;
+	}
+
+	if(lst) {
+
+		/* remove the # directive itself */
+		memset(in->buf+lst->bstart, ' ', lst->dirlen);
+
+		/* zap all remaining blocks */
+		while((lst=lst->next)) { sial_zapblk(lst); last=lst; }
+	}
+
+	/* most remove the #endif */
+	memset(in->buf+last->bend+1, ' ', 6);
+}
+
+static int rawinput=0;
+void sial_rawinput(int on) { rawinput=on; }
+
+/*
+	Get the next character from the input stream tack.
+*/
+int
+sial_input(void) 
+{ 
+register char c;
+
+redo:
+
+	if(!in || eol) {
+
+		return 0;
+	}
+
+	if(in->cursor==in->len) {
+
+#if linux
+		return (-1);
+#else
+		sial_popin();
+		goto redo;
+#endif
+	}
+
+	c=in->buf[in->cursor++];
+	if(!rawinput) {
+		if(c=='\\') {
+
+			if(in->cursor==in->len) return c;
+			else if(in->buf[in->cursor]=='\n') {
+
+				sial_line(1);
+				in->cursor++;
+				goto redo;
+			}
+
+		} else if(c=='/') {
+
+			if(in->cursor==in->len) return c;
+			else if(in->buf[in->cursor]=='/') {
+
+				/* C++ stype comment. Eat it. */
+				in->cursor++;
+				while(in->cursor<in->len) {
+
+					c=in->buf[in->cursor++];
+					if(c=='\n') { 
+						/* leave the newline in there */
+						in->cursor--;
+						break;
+					}
+				}
+				goto redo;
+
+			}else if(in->buf[in->cursor]=='*') {
+
+				/* C style comment, eat it */
+				in->cursor++;
+				while(in->cursor<in->len) {
+
+					c=in->buf[in->cursor++];
+					if(c=='*' && (in->cursor<in->len)) {
+
+						if(in->buf[in->cursor]=='/') {
+
+							in->cursor++;
+							break;
+
+						}
+
+					} else if(c=='/' && (in->cursor<in->len)) {
+
+						if(in->buf[in->cursor]=='*') {
+
+							sial_warning("Nested comment");
+
+						}
+
+					}
+					if(c=='\n') sial_line(1);
+				}
+				goto redo;
+			}
+
+		}else if(virgin && c=='#') {
+
+			char *p=in->buf+in->cursor;
+			char *end=in->buf+in->len;
+			int c=0;
+
+			/* skip white spaces '#      define ... ' */
+			while(p<(end-4) && (*p==' ' || *p=='\t')) { p++; c++; }
+
+			/* this must be a preprocessor command */
+			/* we trigger on the if, ifdef only. #define, #undef, #include are
+			   handled by the lexer */
+
+			if(!strncmp(p, "if", 2)) {
+
+				in->cursor += c;
+				sial_zapif();
+				/* zapif sets the cursor correctly */
+				goto redo;
+			}
+		}
+	}
+
+	if(c=='\n') {
+
+		virgin=1;
+		sial_line(1);
+
+	}else if(c != ' ' && c != '\t') {
+
+		virgin=0;
+
+	} 
+	else if(!rawinput){
+
+		register char c2=c;
+
+		/* return one white space for a group of them */
+		while((in->cursor < in->len) 
+			&& in->buf[in->cursor]==c2) in->cursor++;
+
+	}
+
+	return c;
+}
+
+char *
+sial_cursorp()
+{
+	if(!in) return 0;
+	return in->buf+in->cursor;
+}
+
+void
+sial_unput(char c)
+{
+
+	if(!c) return;
+	if(!nin) return;
+	if(!in->cursor) {
+
+		sial_error("Fatal unput error");
+
+	}
+	in->buf[--in->cursor]=c;
+	if(c=='\n') {
+
+		sial_line(-1);
+	}
+}
+
+/*
+	Get a single line from the parser stream.
+*/
+char *
+sial_getline()
+{
+char *buf2=0;
+
+	/* use the current input stream for that */
+	if(in) {
+
+		/* save the cursor */
+		int n=0, c;
+		char *buf=sial_alloc(in->len-in->cursor+1);
+
+		while(!sial_eol(c=sial_input()))
+			buf[n++]=c;
+		buf[n]='\0';
+		buf2=sial_alloc(n+2);
+		strcpy(buf2,buf);
+		buf2[n]=' ';
+		buf2[n+1]='\0';
+		sial_free(buf);
+		/* leave the newline there */
+		sial_unput(c);
+	}
+	return buf2;
+}
+
+
+/* read a complete line from the input stream */
+void
+sial_include(void)
+{
+char name[MAX_SYMNAMELEN+1];
+int n=0;
+int c;
+int found=0;
+
+	while((c=sial_input())) {
+
+		if(c=='"') {
+
+			if(!found) found++;
+			else break;
+			continue;
+		}
+
+		if(c=='<') {
+
+			found++;
+			continue;
+			
+		}
+		if(c=='>') break;
+		if(sial_eol(c)) {
+
+			sial_error("Unexpected EOL on #include");
+		}
+		if(found) {
+
+			if(n==MAX_SYMNAMELEN) {
+
+				sial_error("Filename too long");
+			}
+			name[n++]=c;
+
+		}
+	}
+	name[n]='\0';
+
+	/* flush the rest of the line */
+	while((c=sial_input())) {
+
+		if(sial_eol(c)) break;
+	}
+	sial_unput(c);
+	if(sial_fileipath(name)) {
+
+		sial_pushfile(name);
+
+	} else {
+
+		sial_msg("Include file not found: '%s' [include path is '%s']", name, sial_getipath());
+	}
+}
--- crash/extensions/libsial/mkbaseop.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/extensions/libsial/mkbaseop.c	2007-09-24 16:07:06.000000000 -0400
@@ -0,0 +1,169 @@
+/*
+ * Copyright 2000 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include "sial.h"
+#include "sial.tab.h"
+/*
+	This utility generates a operator function table for the base type.
+	Each combinaison of operand type and operation needs a dedicated
+	function. We use a table defined here in to generate an indirect table
+	that if indexed (from within sial_op.c) using :
+
+	value_t * (func)(value_t *v1, value_t *v2) = table[type1][type2][op];
+*/
+static struct opss {
+	char *str;
+	char *acro;
+	int code;
+} opstbl[] = {
+	{ "+", "ADD", ADD },
+        { "-", "SUB", SUB },
+        { "/", "DIV", DIV },
+        { "*", "MUL", MUL },
+        { "^", "XOR", XOR },
+        { "%", "MOD", MOD },
+        { "|", "OR",  OR  },
+        { "&", "AND", AND },
+	{ "<<", "SHL", SHL },
+	{ ">>", "SHR", SHR },
+	{ "==", "EQ", EQ }, /* most be first bool */
+	{ ">", "GT", GT },
+	{ "<", "LT", LT },
+	{ ">=", "GE", GE },
+	{ "<=", "LE", LE },
+	{ "!=", "NE", NE },
+};
+
+static char *typtbl[] = { "sc", "uc", "ss", "us", "sl", "ul", "sll", "ull" };
+
+#define NOPS (sizeof(opstbl)/sizeof(opstbl[0]))
+#define NTYPS (sizeof(typtbl)/sizeof(typtbl[0]))
+
+int
+main()
+{
+int i,j,k;
+
+	printf("\
+#include \"sial.h\"\n\
+#include \"sial.tab.h\"\n\
+/**************************************************************\n\
+ This file is generated by a program.\n\
+ Check out and modify libsial/mkbaseop.c instead !\n\
+***************************************************************/\n");
+
+
+	/* create all the functions for all combinaison */
+	for(i=0;i<NTYPS;i++) {
+	
+		for(j=0; j<NTYPS;j++) {
+
+			int bool=0;
+
+			for(k=0;k<NOPS;k++) {
+
+				if(opstbl[k].code==EQ) bool++;
+
+
+				if(!bool) {
+
+					printf(""
+"static void \n"
+"op_%s_%s_%s(value_t *v1,value_t *v2,value_t *ret)\n"
+"{\n"
+"	ret->v.%s = v1->v.%s %s v2->v.%s;\n"
+"	ret->type.type=%s->type.type;\n"
+"	ret->type.idx=%s->type.idx;\n"
+"	ret->type.size=%s->type.size;\n"
+"}\n", 
+					opstbl[k].acro, 
+					typtbl[i], 
+					typtbl[j], 
+					j>=i?typtbl[j]:typtbl[i], 
+					typtbl[i], 
+					opstbl[k].str,
+					typtbl[j],
+					j>=i?"v2":"v1",
+					j>=i?"v2":"v1",
+					j>=i?"v2":"v1");
+
+				} else {
+
+					printf(""
+"static void \n"
+"op_%s_%s_%s(value_t *v1,value_t *v2,value_t *ret)\n"
+"{\n"
+"	ret->v.%s = ( v1->v.%s %s v2->v.%s );\n"
+"	ret->type.type=V_BASE;\n"
+"	ret->type.idx=B_UL;\n"
+"	ret->type.size=4;\n"
+"}\n",
+					opstbl[k].acro, 
+					typtbl[i], 
+					typtbl[j], 
+					"ul",
+					typtbl[i], 
+					opstbl[k].str,
+					typtbl[j]);
+				}
+
+			}
+
+		}
+
+	}
+
+	/* create the array from within which the runtime functions
+	   will indexed to get a function pointer */
+
+	printf("void (*opfuncs[%d][%d][%d])()={\n", NTYPS, NTYPS, NOPS);
+
+	for(i=0;i<NTYPS;i++) {
+	
+		for(j=0; j<NTYPS;j++) {
+
+			printf("\t");
+
+			for(k=0;k<NOPS;k++) {
+
+				if(!k%6) printf("\n\t");
+				printf("op_%s_%s_%s, ", opstbl[k].acro, typtbl[i], typtbl[j]);
+
+			}
+			printf("\n");
+
+		}
+
+	}
+	printf("};\n");
+
+	/* output a ops lut */
+	printf("\nstatic int opslut[%d]={\n", NOPS);
+
+	for(i=0;i<NOPS;i++) {
+
+		printf("%s, ", opstbl[i].acro);
+
+	}
+	printf("};\n");
+
+	/* output the main op execution function */
+	printf("\n\
+void\n\
+sial_baseop(int op, value_t *v1, value_t *v2, value_t *ret)\n\
+{\n\
+int i;\n\
+\n\
+	for(i=0;i<%d;i++) {\n\
+\n\
+		if(opslut[i]==op) break;\n\
+\n\
+	}\n\
+	if(i==%d) sial_error(\"Oops!ops!\");\n\
+	(opfuncs[v1->type.idx][v2->type.idx][i])(v1,v2,ret);\n\
+}\n", NOPS, NOPS);
+	exit(0);
+}
--- crash/extensions/libsial/sial_type.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/extensions/libsial/sial_type.c	2008-10-28 14:53:26.000000000 -0400
@@ -0,0 +1,1169 @@
+/*
+ * Copyright 2001 Silicon Graphics, Inc. All rights reserved.
+ */
+#include "sial.h"
+#include "sial.tab.h"
+#include <string.h>
+#include <errno.h>
+/*
+	This file contains functions that deals with type and type
+	casting operators.
+*/
+#define B_SIZE_MASK	0x0007f0
+#define B_SIGN_MASK     0x00f000
+#define B_STOR_MASK	0x1f0000
+#define B_CHAR		0x000010
+#define B_SHORT		0x000020
+#define B_INT		0x000040
+#define B_LONG		0x000080
+#define B_LONGLONG	0x000100
+#define B_FLOAT		0x000200
+#define B_CONST		0x000400
+#define B_SIGNED	0x001000
+#define B_UNSIGNED	0x002000
+#define B_STATIC	0x010000
+#define B_REGISTER	0x020000
+#define B_VOLATILE	0x040000
+#define B_TYPEDEF	0x080000
+#define B_EXTERN	0x100000
+#define B_VOID		0x800000
+#define B_USPEC		0x000001 /* user specified sign */
+#define B_ENUM		0x000002 /* btype is from a enum */
+
+#define is_size(i)	((i)&B_SIZE_MASK)
+#define is_sign(i)	((i)&B_SIGN_MASK)
+#define is_stor(i)	((i)&B_STOR_MASK)
+#define issigned(v)	(v->type.typattr & B_SIGNED)
+#define vsize(v)	(is_size(v->type.typattr))
+
+static struct {
+	int btype;
+	int key;
+	char *name;
+} blut[] = {
+	{ B_VOID,	VOID ,		"void"},
+	{ B_TYPEDEF,	TDEF ,		"tdef"},
+	{ B_EXTERN,	EXTERN ,	"extern"},
+	{ B_STATIC,	STATIC ,	"static"},
+	{ B_VOLATILE,	VOLATILE ,	"volatile"},
+	{ B_CONST,	CONST ,		"const"},
+	{ B_REGISTER,	REGISTER ,	"register"},
+	{ B_UNSIGNED,	UNSIGNED ,	"unsigned"},
+	{ B_SIGNED,	SIGNED ,	"signed"},
+	{ B_CHAR,	CHAR,		"char" },
+	{ B_SHORT,	SHORT ,		"short"},
+	{ B_INT,	INT ,		"int"},
+	{ B_LONG,	LONG ,		"long"},
+	{ B_LONGLONG,	DOUBLE ,	"long long"},
+	{ B_FLOAT,	FLOAT ,		"float"},
+};
+
+type_t *
+sial_newtype()
+{
+	return sial_calloc(sizeof(type_t));
+}
+
+void
+sial_freetype(type_t* t)
+{
+	if(t->idxlst) sial_free(t->idxlst);
+	sial_free(t);
+}
+
+/* this function is called by the parser to merge the
+   storage information (being hold in a basetype) into the real type_t*/
+type_t*
+sial_addstorage(type_t*t1, type_t*t2)
+{
+	t1->typattr |= is_stor(t2->typattr);
+	sial_freetype(t2);
+	return t1;
+}
+
+char *
+sial_ctypename(int type)
+{
+	switch(type) {
+
+		case V_TYPEDEF: return "typedef";
+		case V_STRUCT: return "struct";
+		case V_UNION: return "union";
+		case V_ENUM: return "enum";
+		default: return "???";
+	}
+}
+
+int sial_isstatic(int atr) { return atr & B_STATIC; }
+int sial_isenum(int atr) { return atr & B_ENUM; }
+int sial_isconst(int atr) { return atr & B_CONST; }
+int sial_issigned(int atr) { return atr & B_SIGNED; }
+int sial_istdef(int atr) { return atr & B_TYPEDEF; }
+int sial_isxtern(int atr) { return atr & B_EXTERN; }
+int sial_isvoid(int atr) { return atr & B_VOID; }
+int sial_isstor(int atr) { return is_stor(atr); }
+int sial_is_struct(int ctype) { return ctype==V_STRUCT; }
+int sial_is_enum(int ctype) { return ctype==V_ENUM; }
+int sial_is_union(int ctype) { return ctype==V_UNION; }
+int sial_is_typedef(int ctype) { return ctype==V_TYPEDEF; }
+
+/* type seting */
+int sial_type_gettype(type_t*t) { return t->type; }
+void sial_type_settype(type_t*t, int type) { t->type=type; }
+void sial_type_setsize(type_t*t, int size) { t->size=size; }
+int  sial_type_getsize(type_t*t) { return t->size; }
+void sial_type_setidx(type_t*t, ull idx) { t->idx=idx; }
+ull sial_type_getidx(type_t*t) { return t->idx; }
+void sial_type_setidxlst(type_t*t, int* idxlst) { t->idxlst=idxlst; }
+void sial_type_setref(type_t*t, int ref, int type) { t->ref=ref; t->rtype=type; }
+void sial_type_setfct(type_t*t, int val) { t->fct=val; }
+void sial_type_mkunion(type_t*t) { t->type=V_UNION; }
+void sial_type_mkenum(type_t*t) { t->type=V_ENUM; }
+void sial_type_mkstruct(type_t*t) { t->type=V_STRUCT; }
+void sial_type_mktypedef(type_t*t) { t->type=V_TYPEDEF; }
+
+static int defbtype=B_LONG|B_SIGNED;
+static int defbidx=B_SL;
+static int defbsize=4;
+static int defbsign=B_SIGNED;
+int sial_defbsize() { return defbsize; }
+
+char *
+sial_getbtypename(int typattr)
+{
+int i;
+char *name=sial_alloc(200);
+
+	name[0]='\0';
+	for(i=0;i<sizeof(blut)/sizeof(blut[0]);i++) {
+
+		/* skip sign attr. if defaults */
+		if(is_sign(blut[i].btype)) {
+
+			if(!(typattr & B_USPEC)) continue;
+			if(typattr & B_INT) {
+				if(blut[i].btype==B_SIGNED) continue;
+			} else if(typattr & B_CHAR) {
+				if(blut[i].btype == defbsign) continue;
+			} else if(blut[i].btype==B_UNSIGNED) continue;
+		}
+
+		if(typattr & blut[i].btype) {
+
+			strcat(name, blut[i].name);
+			if(i<(sizeof(blut)/sizeof(blut[0]))-1) {
+
+				strcat(name, " ");
+			}
+		}
+	}
+	return name;
+}
+
+/* promote a random base or ref into a ull */
+ull
+unival(value_t *v)
+{
+	if(v->type.type==V_REF) {
+
+		return TYPE_SIZE(&v->type)==4 ? (ull)(v->v.ul) : v->v.ull;
+
+	} else switch(v->type.idx) {
+
+                case B_SC: return (ull)(v->v.sc);
+                case B_UC: return (ull)(v->v.uc);
+                case B_SS: return (ull)(v->v.ss);
+                case B_US: return (ull)(v->v.us);
+                case B_SL: return (ull)(v->v.sl);
+                case B_UL: return (ull)(v->v.ul);
+                case B_SLL: return (ull)(v->v.sll);
+                case B_ULL: return (ull)(v->v.ull);
+                default: sial_error("Oops univ()[%d]", TYPE_SIZE(&v->type)); break;
+        }
+        return 0;
+}
+
+void
+sial_duptype(type_t*t, type_t*ts)
+{
+	memmove(t, ts, sizeof(type_t));
+	if(ts->idxlst) {
+
+		t->idxlst=sial_calloc(sizeof(int)*(MAXIDX+1));
+		memmove(t->idxlst, ts->idxlst, sizeof(int)*(MAXIDX+1));
+	}
+}
+
+#define asarray(v) (v->arr!=v->arr->next)
+
+/*
+	Duplicate a value_t.
+	On duplication we do verification of the value_ts involved.
+	this is to make it possible to pass array to subfunctions
+	and to override specific value_ts that also have arrays attached
+	to them.
+*/
+void
+sial_dupval(value_t *v, value_t *vs)
+{
+int isvoid=(v->type.typattr & B_VOID);
+
+	/* if both have an attached array ... fail */
+	if(asarray(v) && asarray(vs)) {
+
+		sial_error("Can't override array");
+
+	}
+	/* when we are attaching a new array to the destination value_t
+	   we need to add the destination reference count to the source */
+	if(asarray(v)) {
+
+		array_t*a=v->arr;
+
+		/* preserve the array accross the freedata and memmove */
+		v->arr=0;
+		sial_freedata(v);
+
+		/* copy the new value_t over it */
+		memmove(v, vs, sizeof(value_t));
+
+		/* and restore the array_t*/
+		v->arr=a;
+
+	} else {
+
+		sial_refarray(vs, 1);
+		sial_freedata(v);
+		memmove(v, vs, sizeof(value_t));
+	} 
+
+	sial_duptype(&v->type, &vs->type);
+	sial_dupdata(v, vs);
+
+	/* conserve the void atribute across asignements */
+	v->type.typattr |= isvoid;
+}
+
+/*
+	clone a value_t.
+*/
+value_t *
+sial_cloneval(value_t *v)
+{
+value_t *nv=sial_alloc(sizeof(value_t));
+
+	memmove(nv, v, sizeof(value_t));
+	sial_refarray(v, 1);
+	sial_dupdata(nv, v);
+	return nv;
+}
+
+static signed long long 
+twoscomp(ull val, int nbits)
+{
+	return val | (0xffffffffffffffffll << nbits);
+	// XXX return (val-1)^0xffffffffll;
+}
+
+/*
+	Get a bit field value_t from system image or live memory.
+	We do all operations with a ull untill the end.
+	Then we check for the basetype size and sign and convert
+	apropriatly.
+*/
+void
+get_bit_value(ull val, int nbits, int boff, int size, value_t *v)
+{
+        ull mask;
+	int dosign=0;
+	int vnbits=size*8;
+
+	/* first get the value_t */
+        if (nbits >= 32) {
+                int upper_bits = nbits - 32;
+                mask = ((1 << upper_bits) - 1);
+                mask = (mask << 32) | 0xffffffff;
+        }
+        else {
+                mask = ((1 << nbits) - 1);
+        }
+        val = val >> boff;
+	val &= mask;
+
+	if(issigned(v)) {
+
+		/* get the sign bit */
+		if(val >> (nbits-1)) dosign=1;
+
+	}
+	switch(vsize(v)) {
+
+		case B_CHAR: {
+			if(dosign) {
+				v->v.sc=(signed char)twoscomp(val, nbits);
+			}
+			else {
+				v->v.uc=val;
+			}
+		}
+		break;
+		case B_SHORT: {
+			if(dosign) {
+				v->v.ss=(signed short)twoscomp(val, nbits);
+			}
+			else {
+				v->v.us=val;
+			}
+		}
+		break;
+		case B_LONG: 
+
+			if(sial_defbsize()==8) goto ll;
+
+		case B_INT: {
+			if(dosign) {
+				v->v.sl=(signed long)twoscomp(val, nbits);
+			}
+			else {
+				v->v.ul=val;
+			}
+		}
+		break;
+		case B_LONGLONG: {
+ll:
+			if(dosign) {
+				v->v.sll=(signed long long)twoscomp(val, nbits);
+			}
+			else {
+				v->v.ull=val;
+			}
+		}
+		break;
+		default:
+			sial_error("Oops get_bit_value_t...");
+		break;
+	}
+
+}
+/*
+	Set a bit field value_t. dvalue_t is the destination value_t as read
+	from either the system image of live memory.
+ */
+ull
+set_bit_value_t(ull dvalue, ull value, int nbits, int boff)
+{
+        ull mask;
+
+        if (nbits >= 32) {
+                int upper_bits = nbits - 32;
+                mask = ((1 << upper_bits) - 1);
+                mask = (mask << 32) | 0xffffffff;
+        }
+        else {
+                mask = ((1 << nbits) - 1);
+        }
+	/* strip out the current value_t */
+	dvalue &= ~(mask << boff);
+
+	/* put in the new one */
+        dvalue |= (value << boff);
+	return dvalue;
+}
+
+/* this function is called when we have determined the systems
+   default int size (64 bit vs 32 bits) */
+void 
+sial_setdefbtype(int size, int sign)
+{
+int idx=B_INT;
+
+	switch(size) {
+
+	case 1: defbtype=B_CHAR; idx=B_UC; break;
+	case 2: defbtype=B_SHORT;idx=B_US;  break;
+	case 4: defbtype=B_INT; idx=B_UL; break;
+	case 8: defbtype=B_LONGLONG; idx=B_ULL; break;
+
+	}
+	if(sign) defbsign = B_SIGNED;
+	else defbsign = B_UNSIGNED;
+	defbtype |= defbsign;
+	defbsize=size;
+	defbidx=idx;
+}
+
+static int
+getbtype(int token)
+{
+int i;
+
+	for(i=0;i<sizeof(blut)/sizeof(blut[0]);i++) {
+
+		if(blut[i].key==token) return blut[i].btype;
+	}
+
+	sial_error("token not found in btype lut [%d]", token);
+	return B_UNSIGNED;
+}
+
+int
+sial_isjuststatic(int attr)
+{
+int satr=is_stor(attr);
+
+	return (satr & ~B_STATIC) == 0;
+}
+
+value_t *sial_defbtypesize(value_t *v, ull i, int idx)
+{
+	v->type.type=V_BASE;
+	v->setfct=sial_setfct;
+	v->type.idx=idx;
+	v->mem=0;
+	switch(idx) {
+
+		case B_UC: case B_SC:
+			v->type.size=1;
+			v->v.uc=i;
+		break;
+		case B_US: case B_SS:
+			v->type.size=2;
+			v->v.us=i;
+		break;
+		case B_UL: case B_SL:
+			v->type.size=4;
+			v->v.ul=i;
+		break;
+		case B_ULL: case B_SLL:
+			v->type.size=8;
+			v->v.ull=i;
+		break;
+		default: sial_error("Oops defbtypesize!"); break;
+	}
+	return v;
+}
+
+value_t *
+sial_defbtype(value_t *v, ull i)
+{
+	v->type.typattr=defbtype;
+	return sial_defbtypesize(v, i, defbidx);
+}
+
+value_t *
+sial_makebtype(ull i)
+{
+value_t *v=sial_calloc(sizeof(value_t));
+
+	sial_defbtype(v, i);
+	sial_setarray(&v->arr);
+	TAG(v);
+	return v;
+}
+
+value_t *
+sial_newval()
+{
+value_t *v=sial_makebtype(0);
+
+	return v;
+}
+
+/* take the current basetypes and generate a uniq index */
+static void
+settypidx(type_t*t)
+{
+int v1, v2, v3, size;
+
+	if(t->typattr & B_CHAR) {
+		size=1;
+		v1=B_SC; v2=B_UC; 
+		v3=(defbsign==B_SIGNED?B_SC:B_UC);
+	} else if(t->typattr & B_SHORT) {
+		size=2;
+		v1=B_SS; v2=B_US; v3=B_SS;
+	} else if(t->typattr & B_LONG) {
+		if(sial_defbsize()==4) {
+			size=4;
+			v1=B_SL; v2=B_UL; v3=B_SL;
+		} else goto ll;
+	} else if(t->typattr & B_INT) {
+go:
+		size=4;
+		v1=B_SL; v2=B_UL; v3=B_SL;
+	} else if(t->typattr & B_LONGLONG) {
+ll:
+		size=8;
+		v1=B_SLL; v2=B_ULL; v3=B_SLL;
+	}
+	else goto go;
+
+	if(t->typattr & B_SIGNED) t->idx=v1;
+	else if(t->typattr & B_UNSIGNED) t->idx=v2;
+	else t->idx=v3;
+	t->size=size;
+}
+
+/* take the current basetypes and generate a uniq index */
+int
+sial_idxtoattr(int idx)
+{
+int i;
+static struct {
+
+	int idx;
+	int attr;
+
+} atoidx[] = {
+
+	{B_SC,  B_SIGNED  | B_CHAR}, 
+	{B_UC,  B_UNSIGNED| B_CHAR}, 
+	{B_SS,  B_SIGNED  | B_SHORT}, 
+	{B_US,  B_UNSIGNED| B_SHORT}, 
+	{B_SL,  B_SIGNED  | B_LONG}, 
+	{B_UL,  B_UNSIGNED| B_LONG}, 
+	{B_SLL, B_SIGNED  | B_LONGLONG}, 
+	{B_ULL, B_UNSIGNED| B_LONGLONG}, 
+};
+
+	for(i=0; i < sizeof(atoidx)/sizeof(atoidx[0]); i++) {
+
+		if(atoidx[i].idx==idx)  return atoidx[i].attr;
+	}
+	sial_error("Oops sial_idxtoattr!");
+	return 0;
+}
+
+void
+sial_mkvsigned(value_t*v)
+{
+	v->type.typattr &= ~B_SIGN_MASK;
+	v->type.typattr |= B_SIGNED;
+	settypidx(&v->type);
+}
+
+/* if there's no sign set the default */
+void
+sial_chksign(type_t*t)
+{
+	if(sial_isvoid(t->typattr)) return;
+	if(!is_sign(t->typattr)) {
+
+		/* char is compile time dependant */
+		if(t->idx==B_SC || t->idx==B_UC) t->typattr |= defbsign;
+		/* all other sizes are signed by default */
+		else t->typattr |= B_SIGNED;
+	}
+	settypidx(t);
+}
+
+/* if ther's no size specification, make it an INT */
+void
+sial_chksize(type_t*t)
+{
+	if(!sial_isvoid(t->typattr) && !is_size(t->typattr)) sial_addbtype(t, INT);
+}
+
+/* create a new base type element */
+type_t*
+sial_newbtype(int token)
+{
+int btype;
+type_t*t=sial_newtype();
+
+	if(!token) btype=defbtype;
+	else {
+
+		btype=getbtype(token);
+		if(is_sign(btype)) btype |= B_USPEC;
+	}
+	t->type=V_BASE;
+	t->typattr=btype;
+	settypidx(t);
+	TAG(t);
+	return t;
+}
+
+/* set the default sign on a type if user did'nt specify one and not int */
+#define set_base_sign(a) if(!(base & (B_USPEC|B_INT))) base = (base ^ is_sign(base)) | a
+
+/*
+		char	short	int	long	longlong
+char		XXX	XXX	XXX	XXX	XXX
+short		XXX	XXX	OOO	XXX	XXX
+int 		XXX	OOO	XXX	OOO	OOO
+long		XXX	XXX	OOO	OOO	XXX
+longlong	XXX	XXX	OOO	XXX	XXX
+
+   the parser let's you specify any of the B_ type. It's here that we 
+   have to check things out 
+
+*/
+type_t*
+sial_addbtype(type_t*t, int newtok)
+{
+int btype=getbtype(newtok);
+int base=t->typattr;
+
+	/* size specification. Check for 'long long' any other 
+	   combinaison of size is invalid as is 'long long long' */
+	if(is_size(btype)) {
+
+		int ibase=base;
+
+		switch(btype) {
+
+			case B_LONG: {
+
+
+				if(!(base & (B_CHAR|B_SHORT))) {
+
+					set_base_sign(B_UNSIGNED);
+
+					if(base & B_LONG || sial_defbsize()==8) {
+
+						ibase &= ~B_LONGLONG;
+						base |= B_LONGLONG;
+						base &= ~B_LONG;
+
+					} else {
+
+						base |= B_LONG;
+					}
+				}
+				break;
+			}
+			case B_INT: {
+
+				/*
+				 * This is a bit of a hack to circumvent the
+				 * problem that "long int" or "long long int"
+				 * is a valid statement in C.
+				 */
+				if(!(base & (B_INT|B_CHAR|B_LONG|B_LONGLONG))) {
+
+					set_base_sign(B_SIGNED);
+					base |= B_INT;
+				}
+				if (base & (B_LONG|B_LONGLONG))
+					ibase = 0;
+				break;
+			}
+			case B_SHORT: {
+
+				if(!(base & (B_SHORT|B_CHAR|B_LONG|B_LONGLONG))) {	
+
+					base |= B_SHORT;
+					set_base_sign(B_UNSIGNED);
+				}
+
+			}
+			case B_CHAR: {
+
+				if(!(base & (B_CHAR|B_SHORT|B_INT|B_LONG|B_LONGLONG))) {	
+
+					base |= B_CHAR;
+					set_base_sign(defbsign);
+				}
+
+			}
+		}
+
+		if(ibase == base) {
+
+			sial_warning("Invalid combinaison of sizes");
+
+		}
+	
+	} else if(is_sign(btype)) {
+
+		if(base & B_USPEC) {
+
+			if(is_sign(btype) == is_sign(base))
+
+				sial_warning("duplicate type specifier");
+
+			else
+
+				sial_error("invalid combination of type specifiers");
+		}
+		/* always keep last found signed specification */
+		base ^= is_sign(base);
+		base |= btype;
+		base |= B_USPEC;
+
+	} else if(is_stor(btype)) {
+
+		if(is_stor(base)) {
+
+			sial_warning("Suplemental storage class ignore");
+
+		}
+		else base |= btype;
+	}
+	t->typattr=base;
+	settypidx(t);
+	return t;
+}
+
+/* this function gets called back from the API when the user need to parse
+   a type declaration. Like when a typedef dwarf returns a type string */
+
+void
+sial_pushref(type_t*t, int ref)
+{
+	if(t->type==V_REF) {
+
+		t->ref += ref;
+
+	} else {
+
+		t->ref=ref;
+
+		if(ref) {
+
+			t->rtype=t->type;
+			t->type=V_REF;
+		}
+	}
+}
+void
+sial_popref(type_t*t, int ref)
+{
+
+	if(!t->ref) return;
+
+	t->ref-=ref;
+
+	if(!t->ref) {
+
+		t->type=t->rtype;
+	}
+}
+
+typedef struct {
+	int battr;
+	char *str;
+} bstr;
+static bstr btypstr[] = {
+	{CHAR,		"char"},
+	{SHORT,		"short"},
+	{INT,		"int"},
+	{LONG,		"long"},
+	{DOUBLE,	"double"},
+	{SIGNED,	"signed"},
+	{UNSIGNED,	"unsigned"},
+	{STATIC,	"static"},
+	{REGISTER,	"register"},
+	{VOLATILE,	"volatile"},
+	{VOID,		"void"},
+};
+int
+sial_parsetype(char *str, type_t*t, int ref)
+{
+char *p;
+char *tok, *pend;
+int ctype=0, i, first, found;
+type_t*bt=0;
+
+	/* if it's a simple unamed ctype return 0 */
+        if(!strcmp(str, "struct")) { t->type=V_STRUCT; return 0; }
+        if(!strcmp(str, "enum"))   { t->type=V_ENUM; return 0; }
+        if(!strcmp(str, "union"))  { t->type=V_UNION; return 0; }
+
+	p=sial_strdup(str);
+
+	/* get he level of reference */
+	for(pend=p+strlen(p)-1; pend>=p; pend--) {
+
+		if(*pend==' ' || *pend == '\t') continue;
+		if(*pend == '*' ) ref ++;
+		else break;
+
+	}
+	*++pend='\0';
+
+again:
+	tok=strtok(p," ");
+	if(!strcmp(tok, "struct")) {
+
+		ctype=V_STRUCT;
+
+	} else if(!strcmp(tok, "union")) {
+
+		ctype=V_UNION;
+
+	} else if(!strcmp(tok, "enum")) {
+		sial_free(p);
+		p=(char*)sial_alloc(strlen("unsigned int") + 1);
+		/* force enum type into unigned int type for now */
+		strcpy(p, "unsigned int");
+		goto again;
+
+	}
+	if(ctype) {
+
+		char *name=strtok(NULL, " \t");
+		bt=sial_getctype(ctype, name, 1);
+
+		/* we accept unknow struct reference if it's a ref to it */
+		/* the user will probably cast it to something else anyway... */
+		if(!bt) {
+
+			if(ref) {
+
+				bt=(type_t*)sial_getvoidstruct(ctype);
+
+			} else {
+
+				sial_error("Unknown Struct/Union/Enum %s", name);
+
+			}
+		}
+
+		sial_duptype(t, bt);
+		sial_freetype(bt);
+		sial_pushref(t, ref);
+		sial_free(p);
+		return 1;
+	}
+
+	/* this must be a basetype_t*/
+	first=1;
+	do {
+		found=0;
+		for(i=0;i<sizeof(btypstr)/sizeof(btypstr[0]) && !found;i++) {
+
+			if(!strcmp(tok, btypstr[i].str)) {
+
+				found=1;
+				if(first) {
+					first=0;
+					bt=sial_newbtype(btypstr[i].battr);
+				}
+				else {
+
+					sial_addbtype(bt, btypstr[i].battr);
+
+				}
+
+			}
+
+		}
+		if(!found) break;
+	
+	} while((tok=strtok(0, " \t")));
+
+	/* if the tok and bt is set that means there was a bad token */
+	if(bt && tok) {
+
+		sial_error("Oops typedef expension![%s]",tok);
+
+	}
+	/* could be a typedef */
+	if(!bt) {
+
+		int ret=0;
+
+		if((bt=sial_getctype(V_TYPEDEF, tok, 1))) {
+
+			sial_duptype(t, bt);
+			sial_freetype(bt);
+			sial_free(p);
+			return ret;
+
+		}
+		sial_free(p);
+		return ret;
+
+	}
+	else if(bt) {
+
+		/* make sure we have signed it and sized it */
+		sial_chksign(bt);
+		sial_chksize(bt);
+
+		sial_duptype(t, bt);
+		sial_freetype(bt);
+		sial_pushref(t, ref);
+		sial_free(p);
+		return 1;
+
+	}
+	sial_free(p);
+	return 0;
+}
+
+type_t*
+sial_newcast(var_t*v)
+{
+type_t*type=sial_newtype();
+
+	sial_duptype(type, &v->next->v->type);
+	sial_freesvs(v);
+	return type;
+}
+
+typedef struct cast {
+
+	type_t*t;
+	node_t*n;
+	srcpos_t pos;
+
+} cast;
+
+/* make sure we do the proper casting */
+void
+sial_transval(int s1, int s2, value_t *v, int issigned)
+{
+vu_t u;
+
+	if(s1==s2) return;
+
+	if(issigned) {
+
+		switch(s1) {
+			case 1:
+				switch(s2) {
+					case 2:
+						u.us=v->v.sc;
+					break;
+					case 4:
+						u.ul=v->v.sc;
+					break;
+					case 8:
+						u.ull=v->v.sc;
+					break;
+				}
+			break;
+			case 2:
+				switch(s2) {
+					case 1:
+						u.uc=v->v.ss;
+					break;
+					case 4:
+						u.ul=v->v.ss;
+					break;
+					case 8:
+						u.ull=v->v.ss;
+					break;
+				}
+			break;
+			case 4:
+				switch(s2) {
+					case 2:
+						u.us=v->v.sl;
+					break;
+					case 1:
+						u.uc=v->v.sl;
+					break;
+					case 8:
+						u.ull=v->v.sl;
+					break;
+				}
+			break;
+			case 8:
+				switch(s2) {
+					case 2:
+						u.us=v->v.sll;
+					break;
+					case 4:
+						u.ul=v->v.sll;
+					break;
+					case 1:
+						u.uc=v->v.sll;
+					break;
+				}
+			break;
+		}
+
+	} else {
+
+		switch(s1) {
+			case 1:
+				switch(s2) {
+					case 2:
+						u.us=v->v.uc;
+					break;
+					case 4:
+						u.ul=v->v.uc;
+					break;
+					case 8:
+						u.ull=v->v.uc;
+					break;
+				}
+			break;
+			case 2:
+				switch(s2) {
+					case 1:
+						u.uc=v->v.us;
+					break;
+					case 4:
+						u.ul=v->v.us;
+					break;
+					case 8:
+						u.ull=v->v.us;
+					break;
+				}
+			break;
+			case 4:
+				switch(s2) {
+					case 2:
+						u.us=v->v.ul;
+					break;
+					case 1:
+						u.uc=v->v.ul;
+					break;
+					case 8:
+						u.ull=v->v.ul;
+					break;
+				}
+			break;
+			case 8:
+				switch(s2) {
+					case 2:
+						u.us=v->v.ull;
+					break;
+					case 4:
+						u.ul=v->v.ull;
+					break;
+					case 1:
+						u.uc=v->v.ull;
+					break;
+				}
+			break;
+		}
+	}
+	memmove(&v->v, &u, sizeof(u));
+	if(v->type.type!=V_REF) v->type.size=s2;
+}
+
+value_t *
+sial_execast(cast *c)
+{
+/* we execute the expression node_t*/
+value_t *v=NODE_EXE(c->n);
+
+	/* ... and validate the type cast */
+	if(v->type.type != V_REF && v->type.type != V_BASE) {
+
+		sial_rerror(&c->pos, "Invalid typecast");
+
+	}
+	else {
+
+		int vsize=TYPE_SIZE(&v->type);
+		int issigned=sial_issigned(v->type.typattr);
+
+		/* Now, just copy the cast type over the current type_t*/
+		sial_duptype(&v->type, c->t);
+
+		/* Take into account the size of the two objects */
+		sial_transval(vsize, TYPE_SIZE(c->t), v, issigned);
+	}
+	return v;
+}
+
+void
+sial_freecast(cast *c)
+{
+	NODE_FREE(c->n);
+	sial_freetype(c->t);
+	sial_free(c);
+}
+
+node_t*
+sial_typecast(type_t*type, node_t*expr)
+{
+	if(type->type==V_STRING) {
+		
+		sial_error("Cannot cast to a 'string'");
+		return 0;
+
+	} else {
+
+		node_t*n=sial_newnode();
+		cast *c=sial_alloc(sizeof(cast));
+
+		c->t=type;
+		c->n=expr;
+		n->exe=(xfct_t)sial_execast;
+		n->free=(ffct_t)sial_freecast;
+		n->data=c;
+		sial_setpos(&c->pos);
+		return n;
+	}
+}
+
+/*
+	Validate type conversions on function calls and assignments.
+*/
+void
+sial_chkandconvert(value_t *vto, value_t *vfrm)
+{
+type_t*tto=&vto->type;
+type_t*tfrm=&vfrm->type;
+
+	if(tto->type == tfrm->type) {
+
+		if(tto->type == V_BASE) {
+
+			int attr=tto->typattr;
+			int idx=tto->idx;
+
+			sial_transval(tfrm->size, tto->size, vfrm, sial_issigned(vfrm->type.typattr));
+			sial_dupval(vto, vfrm);
+			tto->typattr=attr;
+			tto->idx=idx;
+			return;
+
+		} else if(tto->type == V_REF) {
+
+			if(sial_isvoid(tto->typattr) || sial_isvoid(tfrm->typattr)) goto dupit;
+
+			if(tto->ref == tfrm->ref && tto->rtype == tfrm->rtype) {
+
+				if(is_ctype(tto->rtype)) {
+
+					if(tto->idx == tfrm->idx || sial_samectypename(tto->rtype, tto->idx, tfrm->idx))
+						goto dupit;
+
+				} else if(tto->size == tfrm->size) {
+
+					int attr=tto->typattr;
+					sial_dupval(vto, vfrm);
+					tto->typattr=attr;
+					return;
+				}
+			}
+		}
+		/* Allow assignments between enums of the same type */
+		else if(is_ctype(tto->type) || tto->type == V_ENUM) {
+
+			/* same structure  type_t*/
+			if(tto->idx == tfrm->idx || sial_samectypename(tto->type, tto->idx, tfrm->idx))
+				goto dupit;
+		}
+		else if(tto->type == V_STRING) goto dupit;
+
+	} 
+	else if((tto->type == V_ENUM && tfrm->type == V_BASE) ||
+			(tto->type == V_BASE && tfrm->type == V_ENUM)) {
+		/* convert type from or to enum */
+		int attr=tto->typattr;
+		int idx=tto->idx;
+
+		sial_transval(tfrm->size, tto->size, vfrm, sial_issigned(vfrm->type.typattr));
+		sial_dupval(vto, vfrm);
+		tto->typattr=attr;
+		tto->idx=idx;
+		return;
+	}
+        // support NULL assignment to pointer
+        else if(tto->type == V_REF && tfrm->type == V_BASE && !sial_getval(vfrm)) return;
+	sial_error("Invalid type conversion");
+
+dupit:
+	sial_dupval(vto, vfrm);
+}
+
--- crash/extensions/libsial/sial_api.h.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/extensions/libsial/sial_api.h	2007-10-30 11:13:53.000000000 -0400
@@ -0,0 +1,267 @@
+/*
+ * Copyright 2001 Silicon Graphics, Inc. All rights reserved.
+ */
+
+/* minor and major version number */
+#define S_MAJOR 3
+#define S_MINOR 0
+
+#define MAX_SYMNAMELEN  100
+#define MAXIDX		20
+
+/* abi values */
+#define ABI_MIPS	1
+#define ABI_INTEL_X86	2
+#define ABI_INTEL_IA	3
+#define ABI_S390        4
+#define ABI_S390X       5
+#define ABI_PPC64	6
+
+/* types of variables */
+#define V_BASE          1
+#define V_STRING        2
+#define V_REF           3
+#define V_ENUM          4
+#define V_UNION         5
+#define V_STRUCT        6
+#define V_TYPEDEF       7
+#define V_ARRAY         8
+
+#define ENUM_S		struct enum_s
+#define DEF_S		struct def_s
+#define MEMBER_S	struct member_s
+#define TYPE_S		struct type_s
+#define VALUE_S		struct value_s
+#define ARRAY_S		struct array_s
+#define NODE_S		struct node_s
+#define IDX_S		struct idx_s
+#define VAR_S		struct var_s
+
+ENUM_S;
+DEF_S;
+MEMBER_S;
+TYPE_S;
+VALUE_S;
+ARRAY_S;
+NODE_S;
+IDX_S;
+VAR_S;
+
+#if linux
+#include <stdint.h>
+typedef uint64_t ull;
+typedef uint32_t ul;
+#else
+typedef unsigned long long ull; 
+typedef unsigned long ul;
+#endif
+
+/* THe API function calls numbers */
+typedef struct {
+
+        int (*getmem)(ull, void *, int);	/* write to system image */
+        int (*putmem)(ull, void *, int);	/* read from system image */
+	char* (*member)(char *, ull, TYPE_S *	/* get type and positional information ... */
+		, MEMBER_S *, ull *lidx); 	/* ... about the member of a structure */
+	int (*getctype)(int ctype, char *	/* get struct/union type information */
+		, TYPE_S*); 
+	char* (*getrtype)(ull, TYPE_S *);		/* get complex type information */
+	int (*alignment)(ull);			/* get alignment value for a type */
+	int (*getval)(char *, ull *);		/* get the value of a system variable */
+	ENUM_S* (*getenum)(char *name);		/* get the list of symbols for an enum type */
+	DEF_S*  (*getdefs)(void);		/* get the list of compiler pre-defined macros */
+	uint8_t (*get_uint8)(void*);
+	uint16_t (*get_uint16)(void*);
+	uint32_t (*get_uint32)(void*);
+	uint64_t (*get_uint64)(void*);
+	char* (*findsym)(char*);
+} apiops; 
+
+/*
+	Builtin API defines....
+*/
+/* call this function to install a new builtin 
+
+   proto is the function prototype ex:
+   struct proc* mybuiltin(int flag, char *str);
+
+   "mybuiltin" will be the sial name for the function.
+   "fp" is the pointer to the builtin function code.
+
+*/
+typedef VALUE_S* bf_t(VALUE_S*, ...);
+typedef struct btspec {
+	char *proto;
+	bf_t *fp;
+} btspec_t;
+
+/* dso entry points */
+#define BT_SPEC_TABLE   btspec_t bttlb[]
+#define BT_SPEC_SYM    "bttlb"
+#define BT_INIDSO_FUNC  int btinit
+#define BT_INIDSO_SYM  "btinit"
+#define BT_ENDDSO_FUNC  void btend
+#define BT_ENDDSO_SYM  "btend"
+
+/* maximum number of parameters that can be passed to a builtin */
+#define BT_MAXARGS	20
+
+extern apiops *sial_ops;
+#define API_GETMEM(i, p, n)	((sial_ops->getmem)((i), (p), (n)))
+#define API_PUTMEM(i, p, n)	((sial_ops->putmem)((i), (p), (n)))
+#define API_MEMBER(n, i, tm, m, l)	((sial_ops->member)((n), (i), (tm), (m), (l)))
+#define API_GETCTYPE(i, n, t)	((sial_ops->getctype)((i), (n), (t)))
+#define API_GETRTYPE(i, t)	((sial_ops->getrtype)((i), (t)))
+#define API_ALIGNMENT(i)	((sial_ops->alignment)((i)))
+#define API_GETVAL(n, v)	((sial_ops->getval)((n), (v)))
+#define API_GETENUM(n)		((sial_ops->getenum)(n))
+#define API_GETDEFS()		((sial_ops->getdefs)())
+#define API_GET_UINT8(ptr)	((sial_ops->get_uint8)(ptr))
+#define API_GET_UINT16(ptr)	((sial_ops->get_uint16)(ptr))
+#define API_GET_UINT32(ptr)	((sial_ops->get_uint32)(ptr))
+#define API_GET_UINT64(ptr)	((sial_ops->get_uint64)(ptr))
+#define API_FINDSYM(p)		((sial_ops->findsym)(p))
+
+#if linux
+#	if __LP64__
+#		define sial_getptr(v, t) 	((t*)sial_getval(v))
+#	else
+#		define sial_getptr(v, t) 	((t*)(ul)sial_getval(v))
+#	endif
+#else
+#	if (_MIPS_SZLONG == 64)
+#		define sial_getptr(v, t) 	((t*)sial_getval(v))
+#	else
+#		define sial_getptr(v, t) 	((t*)(ul)sial_getval(v))
+#	endif
+#endif
+
+/* startup function */
+int	 sial_open(void);		/* initialize a session with sial */
+void	 sial_apiset(apiops *, int, int, int);/* define the API for a connection */
+void	 sial_setofile(void *);		/* sial should output messages to this file */
+void	*sial_getofile(void);		/* where is sial currently outputing */
+void	 sial_setmpath(char *p);	/* set the search path for sial scripts */
+void	 sial_setipath(char *p);	/* set the search path for sial include files  */
+VAR_S	*sial_builtin(char *proto, bf_t);/* install a builtin function */
+int      sial_cmd(char *name, char **argv, int argc); /* execute a command w/ args */
+
+/* load/unload of script files and directories */
+ull	 sial_load(char *);		/* load/parse a file */
+ull	 sial_unload(char *);		/* load/parse a file */
+void	 sial_loadall(void);		/* load all files found in set path */
+
+/* variables associated functions */
+VAR_S	*sial_newvar(char *);		/* create a new static/auto variable */
+void	*sial_add_globals(VAR_S*);	/* add a set of variable to the globals context */
+VAR_S	*sial_newvlist(void);		/* create a root for a list of variables */
+
+int	 sial_tryexe(char *, char**, int);/* try to execute a function */
+int	 sial_parsetype(char*, TYPE_S *, int);/* parse a typedef line */
+ull	 sial_exefunc(char *, VALUE_S **);/* to execute a function defined in sial */
+
+/* help related function */
+void	 sial_showallhelp(void);	/* display help info for all commands */
+int	 sial_showhelp(char *);		/* display help info for a single command */
+
+/* allocation related function */
+void	*sial_alloc(int);		/* allocate some memory */
+void	*sial_calloc(int);		/* allocate some 0 filed memory */
+void	 sial_free(void*);		/* free it */
+char	*sial_strdup(char*);		/* equivalent of strdup() returns sial_free'able char */
+void	*sial_dupblock(void *p);	/* duplicate the contain of a block of allocated memory */
+void	*sial_realloc(void *p, int size);	/* reallocate a block */
+void	 sial_maketemp(void *p);	/* put a block on the temp list */
+void	 sial_freetemp(void);		/* free the temp list */
+VALUE_S	*sial_makebtype(ull);		/* create a default base type value (int) */
+
+/* handle values */
+VALUE_S	*sial_newval(void);		/* get a new placeholder for a value */
+void	 sial_freeval(VALUE_S *);	/* free a value* and associated structs */
+VALUE_S	*sial_makestr(char *);		/* create a string value */
+ull	 sial_getval(VALUE_S*);		/* transform a random value to a ull */
+VALUE_S	*sial_cloneval(VALUE_S *);	/* make a clone of a value */
+
+/* array related */
+/* add a new array element to a value */
+void	 sial_addvalarray(VALUE_S*v, VALUE_S*idx, VALUE_S*val);
+/* return the value associated with a int index */
+VALUE_S	*sial_intindex(VALUE_S *, int);	
+/* return the value associated with a 'string' index */
+VALUE_S	*sial_strindex(VALUE_S *, char *);
+/* set the value of an array element */
+void	 sial_setarrbval(ARRAY_S*, int);	
+/* get the array element coresponding to index */
+ARRAY_S	*sial_getarrval(ARRAY_S**, VALUE_S*);
+/* get the initiale array for a variable */
+ARRAY_S	*sial_addarrelem(ARRAY_S**, VALUE_S*, VALUE_S*); 
+
+/* type manipulation */
+int sial_is_struct(int);
+int sial_is_enum(int);
+int sial_is_union(int);
+int sial_is_typedef(int);
+int sial_type_gettype(TYPE_S*t);
+int sial_chkfname(char *fname, void *vfd);
+int sial_loadunload(int load, char *name, int silent);
+
+void sial_type_settype(TYPE_S*t, int type);
+void sial_setcallback(void (*scb)(char *, int));
+void sial_vilast(void);
+void sial_vi(char *fname, int file);
+void sial_type_setsize(TYPE_S*t, int size);
+int sial_type_getsize(TYPE_S*t);
+void sial_type_setidx(TYPE_S*t, ull idx);
+ull sial_type_getidx(TYPE_S*t);
+void sial_type_setidxlst(TYPE_S*t, int *idxlst);
+void sial_type_setref(TYPE_S*t, int ref, int type);
+void sial_type_setfct(TYPE_S*t, int val);
+void sial_type_mkunion(TYPE_S*t);
+void sial_type_mkenum(TYPE_S*t);
+void sial_type_mkstruct(TYPE_S*t);
+void sial_type_mktypedef(TYPE_S*t);
+TYPE_S*sial_newtype(void);
+void sial_freetype(TYPE_S*t);
+TYPE_S*sial_getctype(int ctype_t, char *name, int silent);
+void sial_type_free(TYPE_S* t);
+void sial_pushref(TYPE_S*t, int ref);
+void sial_duptype(TYPE_S*to, TYPE_S*from);
+int sial_defbsize(void);
+TYPE_S*sial_newbtype(int token);
+void sial_setdbg(unsigned int lvl);
+unsigned int sial_getdbg(void);
+void sial_setname(char *name);
+char *sial_getname(void);
+void sial_setclass(char *class);
+char **sial_getclass(void);
+
+/* struct member functions */
+void sial_member_soffset(MEMBER_S*m, int offset);
+void sial_member_ssize(MEMBER_S*m, int size);
+void sial_member_sfbit(MEMBER_S*m, int fbit);
+void sial_member_snbits(MEMBER_S*m, int nbits);
+void sial_member_sname(MEMBER_S*m, char *name);
+
+/* enums */
+ENUM_S* sial_add_enum(ENUM_S* e, char* name, int val);
+/* defines */
+DEF_S*	sial_add_def(DEF_S* d, char *name, char *val);
+
+/* error handling */
+/* display error w/ file/line coordinates */
+/* does not return */
+void sial_error(char *, ...);
+/* display warning w/ file/line coordinates */
+void sial_warning(char *, ...);
+/* display a message and continue */
+void sial_msg(char *, ...);
+/* display a debug message */
+#define DBG_TYPE            0x00000001
+#define DBG_STRUCT          0x00000002
+#define DBG_NAME            0x10000000  // 
+#define DBG_ALL             0x0fffffff
+void sial_dbg(int class, int level, char *, ...);
+void sial_dbg_named(int class, char *name, int level, char *, ...);
+
+/* parsers debug flags */
+extern int sialdebug, sialppdebug;
--- crash/extensions/libsial/sial_var.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/extensions/libsial/sial_var.c	2007-10-30 11:13:53.000000000 -0400
@@ -0,0 +1,1320 @@
+/*
+ * Copyright 2001 Silicon Graphics, Inc. All rights reserved.
+ */
+#include <stdio.h>
+#include <setjmp.h>
+#include <string.h>
+#include "sial.h"
+
+/*
+	Get an existing variable from the current set.
+*/
+
+/* variable lists for the different scopes */
+typedef struct {
+	int type;
+	var_t*svs;
+} svlist;
+
+typedef struct glo {
+	struct glo *next;
+	var_t*vv;
+} glo;
+
+/*
+	Free indexes specifications.
+*/
+void
+sial_freeidx(idx_t *idx)
+{
+int i;
+
+	for(i=0;i<idx->nidx;i++) {
+
+		if(idx->idxs[i]) NODE_FREE(idx->idxs[i]);
+	}
+	sial_free(idx);
+}
+
+/*
+	Free a variable declaration structure.
+*/
+void
+sial_freedvar(dvar_t*dv)
+{
+	if(!dv) return;
+	if(--dv->refcount) return;
+	if(dv->name) sial_free(dv->name);
+	if(dv->idx) sial_freeidx(dv->idx);
+	if(dv->init) NODE_FREE(dv->init);
+	if(dv->fargs) sial_freesvs(dv->fargs);
+	sial_free(dv);
+}
+
+void
+sial_setarray(array_t**arpp)
+{
+array_t*arp=*arpp;
+
+	if(!arp) {
+
+		arp=sial_calloc(sizeof(array_t));
+		TAG(arp);
+		arp->next=arp->prev=arp;
+		arp->ref=1;
+		*arpp=arp;
+	}
+}
+
+/*
+	this is the main variable declaration function.
+	We support the global scope attribute that make the declared
+	variable accessible to all function from all scripts.
+
+	By default the scope of a variable either the statement block
+	where it was declared (or first used):
+	{
+	int var;
+	 ...
+	}
+	Then it's scope is the block itself.
+
+	Or the file, if it was declared outside of a function.
+
+	Storage is by default 'automatic' and can be made permanent
+	by using the 'static' keywork in the declaration.
+	'Volatile' and 'register' storage classes are supported but 
+	have no effect.
+*/
+var_t*
+sial_vardecl(dvar_t*dv, type_t*t)
+{
+var_t*vlist=sial_newvlist();
+var_t*var;
+
+	/* type *and* dv can have ref counts. First comes from typedef parsing
+	   second comes from the declaration itself */
+	dv->ref += t->ref;
+
+	/* add one level of ref for arrays */
+	if(dv->idx) dv->ref++;
+
+	/* reset ref level for tests below */
+	sial_popref(t, t->ref);
+
+	TAG(vlist);
+
+	if(!t->type) {
+
+		int sto=sial_isstor(t->typattr);
+
+		sial_freetype(t);
+		t=sial_newbtype(0);
+		t->typattr |= sto;
+	}
+	else if(t->type==V_BASE && !dv->ref) {
+
+		sial_chksign(t);
+		sial_chksize(t);
+	}
+
+	/* is this a new typedef declaration ? */
+	/* typedef is considered just like any other storage class */
+	if(sial_istdef(t->typattr)) {
+
+		sial_tdef_decl(dv, t);
+		return 0;
+	}
+
+	while(dv) {
+        
+                /* disalow var names that match against already defined vars */
+                if(dv->name[0]) {
+                    type_t *t=sial_getctype(V_TYPEDEF, dv->name, 1);
+                    if(t) {
+                    
+                        sial_freetype(t);
+                        sial_warning("Variable '%s' already defined as typedef.\n");
+                    }
+                }
+
+		/* 
+		   some sanity checks here that apply to both var and struct 
+		   declarations 
+		*/
+		if(is_ctype(t->type) && !dv->ref) {
+
+			if(dv->name[0]) {
+
+				if(!instruct) {
+
+					if(!sial_isxtern(t->typattr)) {
+
+						sial_freesvs(vlist);
+						sial_error("struct/union instances not supported, please use pointers");
+					}
+
+				} else if(sial_ispartial(t)) {
+
+					sial_freesvs(vlist);
+					sial_error("Reference to incomplete type");
+				}
+			}
+		}
+		if(dv->nbits) { 
+
+			if(t->type != V_BASE) {
+
+				sial_freesvs(vlist);
+				sial_error("Bit fields can only be of integer type");
+
+			}
+			if(dv->idx) {
+
+				sial_freesvs(vlist);
+				sial_error("An array of bits ? Come on...");
+			}
+		}
+
+		var=sial_newvar(dv->name);
+
+		t->fct=dv->fct;
+		sial_duptype(&var->v->type, t);
+		sial_pushref(&var->v->type, dv->ref);
+
+		var->dv=dv;
+
+		TAG(var);
+
+		if(t->type == V_STRING) {
+
+			sial_setstrval(var->v, "");
+
+		} 
+
+		sial_setpos(&dv->pos);
+
+		sial_enqueue(vlist, var);
+
+		dv=dv->next;
+	}
+	sial_free(t);
+	TAG(vlist);
+	return vlist;
+}
+
+dvar_t*
+sial_newdvar(node_t*v)
+{
+dvar_t*dv;
+
+	dv=sial_alloc(sizeof(dvar_t));
+	memset(dv, 0, sizeof(dvar_t));
+	if(v) {
+		dv->name=NODE_NAME(v);
+		NODE_FREE(v);
+
+	} else {
+
+		dv->name=sial_alloc(1);
+		dv->name[0]='\0';
+	}
+	dv->refcount=1;
+	sial_setpos(&dv->pos);
+	return dv;
+}
+
+dvar_t*
+sial_dvarini(dvar_t*dv, node_t*init)
+{
+	dv->init=init;
+	return dv;
+}
+
+dvar_t*
+sial_dvarptr(int ref, dvar_t*dv)
+{
+	dv->ref+=ref;
+	return dv;
+}
+
+dvar_t*
+sial_dvaridx(dvar_t*dv, node_t*n)
+{
+	if(!dv->idx) {
+
+		dv->idx=sial_alloc(sizeof(idx_t));
+		dv->idx->nidx=0;
+	}
+	dv->idx->idxs[dv->idx->nidx++]=n;
+	return dv;
+}
+
+dvar_t*
+sial_dvarfld(dvar_t*dv, node_t*n)
+{
+
+	if(n) {
+
+		value_t *va=sial_exenode(n);
+
+		/* get the value_t for the bits */
+		if(!va) dv->nbits=0;
+		else {
+			dv->nbits=unival(va);
+			sial_freeval(va);
+		}
+		NODE_FREE(n);
+
+	} else dv->nbits=0;
+
+	dv->bitfield=1;
+	return dv;
+}
+
+dvar_t*
+sial_dvarfct(dvar_t*dv, var_t*fargs)
+{
+	dv->fct=1;
+	dv->fargs=fargs;
+	return dv;
+}
+
+dvar_t*
+sial_linkdvar(dvar_t*dvl, dvar_t*dv)
+{
+dvar_t*v;
+
+	/* need to keep declaration order for variable initialization */
+	if(dv) {
+
+		for(v=dvl; v->next; v=v->next);
+		dv->next=0;
+		v->next=dv;
+	}
+	return dvl;
+}
+
+idx_t *
+sial_newidx(node_t*n)
+{
+idx_t *idx;
+
+	if(!instruct) {
+
+		sial_error("Array supported only in struct/union declarations");
+	}
+	idx=sial_alloc(sizeof(idx_t));
+	idx->nidx=1;
+	idx->idxs[0]=n;
+	return idx;
+}
+
+idx_t *
+sial_addidx(idx_t *idx, node_t*n)
+{
+	if(idx->nidx==MAXIDX) {
+
+		sial_error("Maximum number of dimension is %d", MAXIDX);
+	}
+	idx->idxs[idx->nidx++]=n;
+	return idx;
+}
+
+static svlist svs[S_MAXDEEP];
+static glo *globs=0;
+int svlev=0;
+
+void
+sial_refarray(value_t *v, int inc)
+{
+array_t*ap, *na;
+
+	if(!v->arr) return;
+	v->arr->ref+=inc;
+	if(v->arr->ref == 0) {
+
+		/* free all array element. */
+		for(ap=v->arr->next; ap!=v->arr; ap=na) {
+
+			na=ap->next;
+			sial_freeval(ap->idx);
+			sial_freeval(ap->val);
+			sial_free(ap);
+		}
+		sial_free(v->arr);
+		v->arr=0;
+
+	} else {
+
+		/* do the same to all sub arrays */
+		for(ap=v->arr->next; ap!=v->arr; ap=na) {
+
+			na=ap->next;
+			sial_refarray(ap->val, inc);
+		}
+	}
+		
+}
+
+void
+sial_freedata(value_t *v)
+{
+	
+	if(is_ctype(v->type.type) || v->type.type == V_STRING) {
+
+		if(v->v.data) sial_free(v->v.data);
+		v->v.data=0;
+
+	}
+	sial_refarray(v, -1);
+}
+
+void
+sial_dupdata(value_t *v, value_t *vs)
+{
+
+	if(is_ctype(vs->type.type) || vs->type.type == V_STRING) {
+
+		v->v.data=sial_alloc(vs->type.size);
+		memmove(v->v.data, vs->v.data, vs->type.size);
+	}
+}
+
+void
+sial_freeval(value_t *v)
+{
+	if(!v) return;
+	sial_freedata(v);
+	sial_free(v);
+}
+
+
+void
+sial_freevar(var_t*v)
+{
+
+	if(v->name) sial_free(v->name);
+	sial_freeval(v->v);
+	sial_freedvar(v->dv);
+	sial_free(v);
+}
+
+void 
+sial_enqueue(var_t*vl, var_t*v)
+{
+	v->prev=vl->prev;
+	v->next=vl;
+	vl->prev->next=v;
+	vl->prev=v;
+}
+
+void
+sial_dequeue(var_t*v)
+{
+	v->prev->next=v->next;
+	v->next->prev=v->prev;
+	v->next=v->prev=v;
+}
+
+/*
+	This function is called to validate variable declaration.
+	No array decalration for variables (this can only be checked in 
+	sial_stat_decl() and sial_file_decl() usingthe idx field ofthe var struct.
+	Same comment for nbits. Only in struct declarations.
+*/
+void
+sial_validate_vars(var_t*svs)
+{
+var_t*v, *next;
+
+	if(!svs) return;
+
+	for(v=svs->next; v!=svs; v=next) {
+
+		next=v->next;
+
+		/* just remove extern variables */
+		if(sial_isxtern(v->v->type.typattr)) {
+
+			sial_dequeue(v);
+			sial_freevar(v);
+
+		} else {
+
+			if(v->dv->idx) {
+
+				sial_freesvs(svs);
+				sial_error("Array instanciations not supported.");
+
+			} 
+			if(v->dv->nbits) {
+
+				sial_freesvs(svs);
+				sial_error("Syntax error. Bit field unexpected.");
+			}
+		}
+	}
+}
+
+var_t*
+sial_inlist(char *name, var_t*vl)
+{
+var_t*vp;
+
+	if(vl) {
+
+		for(vp=vl->next; vp!=vl; vp=vp->next) {
+
+			if(!strcmp(name, vp->name)) {
+
+				return vp;
+
+			}
+
+		}
+	}
+	return 0;
+}
+
+static var_t*apiglobs;
+
+void
+sial_setapiglobs(void)
+{
+	apiglobs=sial_newvlist();
+	sial_add_globals(apiglobs);
+}
+
+static var_t*
+sial_inglobs(char *name)
+{
+var_t*vp;
+glo *g;
+
+	for(g=globs; g; g=g->next) {
+
+		if((vp=sial_inlist(name, g->vv))) return vp;
+	}
+	return 0;
+}
+
+
+void
+sial_chkglobsforvardups(var_t*vl)
+{
+var_t*v;
+
+	if(!vl) return;
+
+	for(v=vl->next; v != vl; v=v->next) {
+
+		var_t*vg;
+
+		if(v->name[0] && (vg=sial_inglobs(v->name))) {
+
+			/* if this is a prototype declaration then skip it */
+			if(v->dv && v->dv->fct) continue;
+
+			sial_rerror(&v->dv->pos, "Duplicate declaration of variable '%s', defined at %s:%d"
+				, v->name, vg->dv->pos.file, vg->dv->pos.line);
+		}
+	}
+}
+
+/*
+   This function scans a list of variable and looks for those that have not been initialized yet.
+   Globals, statics and autos all get initialized through here.
+*/
+static void
+sial_inivars(var_t*sv)
+{
+var_t*v;
+
+	if(!sv) return;
+
+	for(v=sv->next; v!=sv; v=v->next) {
+
+		/* check if we need to initialize it */
+		if(!v->ini && v->dv && v->dv->init) {
+
+			value_t *val;
+			srcpos_t pos;
+
+			sial_curpos(&v->dv->pos, &pos);
+
+			if((val=sial_exenode(v->dv->init))) {
+
+				sial_chkandconvert(v->v, val);
+				sial_freeval(val);
+				v->ini=1;
+
+			} else {
+
+				sial_rwarning(&v->dv->pos, "Error initializing '%s'", v->name);
+			}
+			sial_curpos(&pos, 0);
+		}
+	}
+}
+
+/* return the last set of globals */
+var_t*
+sial_getcurgvar()
+{
+	if(!globs) return 0;
+	return globs->vv;
+}
+
+void *
+sial_add_globals(var_t*vv)
+{
+glo *ng=sial_alloc(sizeof(glo));
+
+	sial_inivars(vv);
+	ng->vv=vv;
+	ng->next=globs;
+	sial_chkglobsforvardups(vv);
+	globs=ng;
+	return ng;
+}
+
+void
+sial_rm_globals(void *vg)
+{
+glo *g=(glo*)vg;
+
+	if(globs) {
+
+		if(globs==g) globs=g->next;
+		else {
+
+			glo *gp;
+
+			for(gp=globs; gp; gp=gp->next) {
+
+				if(gp->next==g) {
+
+					gp->next=g->next;
+
+				}
+
+			}
+		}
+		sial_free(g);
+	}
+}
+
+
+
+/*
+	This is where we implement the variable scoping.
+*/
+var_t*
+sial_getvarbyname(char *name, int silent, int local)
+{
+var_t*vp;
+int i, aidx=0;
+ull apiv;
+
+	for(i=svlev-1; i>=0; i--) {
+
+		if((vp=sial_inlist(name, svs[i].svs))) {
+
+			return vp;
+		}
+		if(svs[i].type==S_AUTO && !aidx) aidx=i;
+
+		/* when we get to the function we're finished */
+		if(svs[i].type==S_FILE) break;
+	}
+
+	/* have'nt found any variable named like this one */
+	/* first check the globals */
+	if(!(vp=sial_inglobs(name))) {
+
+		int off=0;
+
+		/* check the API for a corresponding symbol */
+		/* Jump over possible leading "IMG_" prefix */
+		if(!strncmp(name, "IMG_", 4)) off=4;
+		if(!local && API_GETVAL(name+off, &apiv)) {
+
+			vp=sial_newvar(name);
+			vp->ini=1;
+
+			sial_defbtype(vp->v, apiv);
+			vp->v->mem=apiv;
+
+			/* put this on the global list */
+			sial_enqueue(apiglobs, vp);
+		}
+		else {
+
+			if(silent) return 0;
+			sial_error("Unknown variable [%s]", name);
+		}
+	}
+	return vp;
+}
+
+value_t *
+sial_exists(value_t *vname)
+{
+char *name=sial_getptr(vname, char);
+
+	return sial_defbtype(sial_newval(), (sial_getvarbyname(name, 1, 0) || sial_funcexists(name)));
+}
+
+/* get a new empty vlist */
+var_t*
+sial_newvlist()
+{
+var_t*p=sial_newvar("");
+	TAG(p);
+	TAG(p->name);
+	return p;
+}
+
+/* this is called when we duplicate a list of automatic variables */
+var_t*
+sial_dupvlist(var_t*vl)
+{
+var_t*nv=(var_t*)sial_newvlist(); /* new root */
+var_t*vp;
+
+	for(vp=vl->next; vp !=vl; vp=vp->next) {
+
+		var_t*v=sial_newvar(vp->name); /* new var_t*/
+
+		v->dv=vp->dv;
+		v->dv->refcount++;
+		v->ini=vp->ini;
+		sial_dupval(v->v, vp->v);
+
+		/* we start with a new array for automatic variable */
+		sial_refarray(v->v, -1);
+		v->v->arr=0;
+		sial_setarray(&v->v->arr);
+		
+		/* can't check ctypes for initialisation */
+		if(is_ctype(v->v->type.type)) v->ini=1;
+		sial_enqueue(nv, v);
+
+	}
+	return nv;
+}
+
+void
+sial_addtolist(var_t*vl, var_t*v)
+{
+	if(!v->name[0] || !sial_inlist(v->name, vl)) {
+
+		sial_enqueue(vl, v);
+
+	} else {
+
+		/* if this is a prototype declaration then skip it */
+		if(v->dv && v->dv->fct) return;
+
+		sial_error("Duplicate declaration of variable %s", v->name);
+	}
+}
+
+static void
+sial_chkforvardups(var_t*vl)
+{
+var_t*v;
+
+	if(!vl) return;
+
+	for(v=vl->next; v!=vl; v=v->next) {
+
+		var_t*v2=v->next;
+
+		for(v2=v->next; v2!=vl; v2=v2->next) {
+
+			if(v2->name[0] && !strcmp(v->name, v2->name)) {
+
+				sial_rerror(&v2->dv->pos, "Duplicate declaration of variable '%s'", v->name);
+
+			}
+		}
+	}
+}
+
+static int takeproto=0;
+void sial_settakeproto(int v) { takeproto=v; }
+
+
+/* 
+	This function scans a new list of declared variables
+	searching for static variables.
+*/
+void
+sial_addnewsvs(var_t*avl, var_t*svl, var_t*nvl)
+{
+var_t*v;
+
+	if(nvl) {
+
+		for(v=nvl->next; v!=nvl; ) {
+
+			var_t*next;
+
+			/* save next before sial_enqueue() trashes it ... */
+			next=v->next;
+
+			/* if this is a external variable or prototype function declaration 
+			   skip it */
+			if((!takeproto && v->dv->fct && !v->dv->ref) || sial_isxtern(v->v->type.typattr)) {
+
+				v=next;
+				continue;
+			}
+
+			if(sial_isstatic(v->v->type.typattr)) {
+
+				sial_addtolist(svl, v);
+
+			} else {
+
+				sial_addtolist(avl, v);
+			}
+			/* with each new variables check for duplicate declarations */
+			sial_chkforvardups(avl);
+			sial_chkforvardups(svl);
+
+			v=next;
+		}
+		/* discard nvl's root */
+		sial_freevar(nvl);
+	}
+}
+
+int
+sial_addsvs(int type, var_t*sv)
+{
+int curlev=svlev;
+
+	if(svlev==S_MAXDEEP) {
+
+		sial_error("Svars stack overflow");
+
+	} else {
+
+		svs[svlev].type=type;
+		svs[svlev].svs=sv;
+		svlev++;
+
+		/* perform automatic initializations */
+		sial_inivars(sv);
+		
+		/* if S_FILE then we are entering a function so start a newset of
+		   stack variables */
+		if(type == S_FILE ) {
+
+			(void)sial_addsvs(S_AUTO, (var_t*)sial_newvlist());
+
+		}
+	}
+	return curlev;
+}
+
+void
+sial_add_statics(var_t*var)
+{
+int i;
+
+	for(i=svlev-1;i>=0;i--) {
+
+		if(svs[i].type==S_FILE ) {
+
+			if(svs[i].svs)
+				sial_enqueue(svs[i].svs, var);
+			else
+				svs[i].svs=var;
+			return;
+
+		}
+	}
+	sial_rwarning(&var->dv->pos, "No static context for var %s.", var->name);
+}
+
+void sial_freesvs(var_t*v)
+{
+var_t*vp;
+
+	for(vp=v->next; vp != v; ) {
+
+		var_t*vn=vp->next;
+
+		sial_freevar(vp);
+
+		vp=vn;
+	}
+	sial_freevar(v);
+}
+
+int
+sial_getsvlev() { return svlev; }
+
+/* reset the current level of execution and free up any automatic
+   variables. */
+void
+sial_setsvlev(int newlev)
+{
+int lev;
+
+	for(lev=svlev-1; lev>=newlev; lev--) {
+
+			if(svs[lev].type==S_AUTO) {
+
+				sial_freesvs(svs[lev].svs);
+
+			}
+
+	}
+	svlev=newlev;
+}
+
+/*
+	called by the 'var in array' bool expression.
+*/
+int
+sial_lookuparray(node_t*vnode, node_t*arrnode)
+{
+value_t *varr=NODE_EXE(arrnode);
+array_t*ap, *apr=varr->arr;
+value_t *val;
+int b=0;
+
+	val=NODE_EXE(vnode);
+
+	if(apr) {
+
+		for(ap=apr->next; ap != apr; ap=ap->next) {
+
+			if(VAL_TYPE(ap->idx) == VAL_TYPE(val)) {
+
+				switch(VAL_TYPE(val)) {
+				case V_STRING:	b=(!strcmp(ap->idx->v.data, val->v.data)); break;
+				case V_BASE:	b=(unival(ap->idx)==unival(val)); break;
+				case V_REF:	
+					if(sial_defbsize()==4) 
+						b=(ap->idx->v.ul==val->v.ul);
+					else
+						b=(ap->idx->v.ull==val->v.ull);
+				break;
+				default:
+					sial_rerror(&vnode->pos, "Invalid indexing type %d", VAL_TYPE(val));
+				}
+				if(b) break;
+			}
+
+		}
+	}
+	sial_freeval(val);
+	sial_freeval(varr);
+	return b;
+}
+
+/*
+	The actual for(i in array) core...
+*/
+void
+sial_walkarray(node_t*varnode, node_t*arrnode, void (*cb)(void *), void *data)
+{
+value_t *v;
+value_t *av;
+array_t*ap, *apr;
+
+	sial_setini(varnode);
+	v=NODE_EXE(varnode);
+
+	av=NODE_EXE(arrnode);
+
+	if(av->arr) {
+
+		for(apr=av->arr, ap=apr->next; ap != apr; ap=ap->next) {
+
+			/* we set the value_t of the variable */
+			sial_setval(v,ap->idx);
+
+			(cb)(data);
+
+		}
+	}
+	sial_freeval(v);
+	sial_freeval(av);
+}
+
+/* scan the current array for a specific index and return value_t 
+   XXX should use some hashing tables here for speed and scalability */
+array_t*
+sial_getarrval(array_t**app, value_t *idx)
+{
+array_t*ap, *apr;
+
+	/* sial_setarray(app); AAA comment out */
+	apr=*app;
+
+	for(ap=apr->next; ap != apr; ap=ap->next) {
+
+		if(ap->idx->type.type == idx->type.type) {
+
+		int b=0;
+
+			switch(idx->type.type) {
+			case V_STRING: b=(!strcmp(ap->idx->v.data, idx->v.data));
+			break;
+			case V_BASE: b=(unival(ap->idx)==unival(idx));
+			break;
+			case V_REF:	
+				if(sial_defbsize()==4) 
+					b=(ap->idx->v.ul==idx->v.ul);
+				else
+					b=(ap->idx->v.ull==idx->v.ull);
+			break;
+			default:
+				sial_error("Invalid index type %d", idx->type.type);
+			}
+
+			if(b) {
+
+				return ap;
+
+			}
+		}
+	}
+
+	/* we have not found this index, create one */
+	ap=(array_t*)sial_calloc(sizeof(array_t));
+	ap->idx=sial_makebtype(0);
+	sial_dupval(ap->idx, idx);
+
+	/* just give it a int value_t of 0 for now */
+	ap->val=sial_makebtype(0);
+
+	/* we must set the same refenrence number as the
+	   upper level array_t*/
+	ap->val->arr->ref=apr->ref;
+
+	/* link it in */
+	ap->prev=apr->prev;
+	ap->next=apr;
+	apr->prev->next=ap;
+	apr->prev=ap;
+	ap->ref=0;
+	return ap;
+}
+
+value_t *
+sial_intindex(value_t *a, int idx)
+{
+value_t *v=sial_makebtype(idx);
+array_t*ap=sial_getarrval(&a->arr, v);
+
+	sial_dupval(v, ap->val);
+	return v;
+}
+
+value_t *
+sial_strindex(value_t *a, char *idx)
+{
+value_t *v=sial_makestr(idx);
+array_t*ap=sial_getarrval(&a->arr, v);
+
+	sial_dupval(v, ap->val);
+	return v;
+}
+
+
+void
+sial_setarrbval(array_t*a, int val)
+{
+	sial_defbtype(a->val, (ull)val);
+}
+
+array_t*
+sial_addarrelem(array_t**arr, value_t *idx, value_t *val)
+{
+array_t*na;
+
+	na=sial_getarrval(arr, idx);
+
+	/* copy new val over */
+	sial_freeval(na->val);
+	na->val=val;
+
+	return na;
+}
+
+/* insert a variable at the end of the list */
+static void
+sial_varinsert(var_t*v)
+{
+int i;
+
+	for(i=svlev-1;i>=0;i--) {
+
+		if(svs[i].type==S_AUTO) {
+
+			sial_enqueue(svs[i].svs, v);
+			break;
+		}
+	}
+}
+
+/* Dupicate and add a set of variables. Used to setup a function execution.
+   The new veriables are the actual parameters of the function so we mark them
+   As being initialized.
+*/
+void
+sial_add_auto(var_t*nv)
+{
+	nv->ini=1;
+	sial_varinsert(nv);
+}
+
+void
+sial_valindex(value_t *var, value_t *idx, value_t *ret)
+{
+	if(is_ctype(idx->type.type)) {
+
+		sial_error("Invalid indexing type");
+
+	} else {
+
+		array_t*a;
+
+		a=sial_getarrval(&var->arr, idx);
+
+		/* this is the first level of indexing through a variable */
+		sial_dupval(ret, a->val);
+		ret->set=1;
+		ret->setval=a->val;
+	}
+}
+
+void
+sial_addvalarray(value_t*v, value_t*idx, value_t*val)
+{
+	sial_addarrelem(&v->arr, idx, val);
+	sial_freeval(idx);
+}
+
+static void
+prtval(value_t*v)
+{
+value_t*fmt=sial_makestr("%?");
+
+	sial_printf(fmt, v, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0);
+	sial_freeval(fmt);
+}
+
+static void
+prlevel(char *name, value_t*root, int level)
+{
+ARRAY_S *arr;
+
+	for(arr=root->arr->next; arr != root->arr; arr=arr->next) {
+
+		printf("%*s%s[", level*3, "", name);
+		prtval(arr->idx);
+		printf("]=");
+		prtval(arr->val);
+		printf("\n");
+		prlevel(name, arr->val, level+1);
+	}
+}
+
+/* sial_prarr builtin */
+value_t*
+sial_prarr(value_t*vname, value_t*root)
+{
+char *name=sial_getptr(vname, char);
+	printf("%s=", name);
+	prtval(root);
+	printf("\n");
+	prlevel(name, root, 1);
+	return sial_makebtype(0);
+}
+
+var_t*
+sial_newvar(char *name)
+{
+var_t*v=sial_calloc(sizeof(var_t));
+char *myname=sial_alloc(strlen(name)+1);
+
+	TAG(myname);
+	strcpy(myname,name);
+	v->name=myname;
+	v->v=sial_makebtype(0);
+	v->v->setval=v->v;
+	v->next=v->prev=v;
+	return v;
+}
+
+
+typedef struct {
+	node_t*n;
+	char name[1];
+} vnode_t ;
+
+static int insizeof=0;
+void sial_setinsizeof(int v) { insizeof=v;}
+
+value_t *
+sial_exevar(void *arg)
+{
+vnode_t *vn = arg;
+value_t *nv;
+var_t*curv;
+srcpos_t pos;
+
+	sial_curpos(&vn->n->pos, &pos);
+
+	if(!(curv=sial_getvarbyname(vn->name, 0, 0))) {
+
+		sial_error("Oops! Var ref1.[%s]", vn->name);
+
+	}
+	if(!curv->ini && !insizeof) {
+
+		sial_error("Variable [%s] used before being initialized", curv->name);
+
+	}
+
+	nv=sial_newval();
+	sial_dupval(nv,curv->v);
+	nv->set=1;
+	nv->setval=curv->v;
+	nv->setfct=sial_setfct;
+
+	sial_curpos(&pos, 0);
+
+	return nv;
+}
+
+/* make sure a variable is flaged as being inited */
+void
+sial_setini(node_t*n)
+{
+	if((void*)n->exe == (void*)sial_exevar) {
+
+		var_t*v=sial_getvarbyname(((vnode_t*)(n->data))->name, 0, 0);
+		v->ini=1;
+	}
+}
+
+
+/* get the name of a function through a variable */
+char *
+sial_vartofunc(node_t*name)
+{
+char *vname=NODE_NAME(name);
+value_t *val;
+
+	/* if the nore is a general expression, then vname is 0 */
+	if(!vname) {
+
+		val=sial_exenode(name);
+
+	} else  {
+
+		var_t*v;
+		
+		v=sial_getvarbyname(vname, 1, 1);
+		if(!v) return vname;
+		val=v->v;
+	}
+
+	switch(val->type.type)
+	{
+		case V_STRING:
+		{
+		char *p=sial_alloc(val->type.size+1);
+			/* return the value_t of that string variable */
+			strcpy(p, val->v.data);
+			sial_free(vname);
+			return p;
+		}
+		default:
+			/* return the name of the variable itself */
+			sial_error("Invalid type for function pointer, expected 'string'.");
+			return vname;
+	}
+}
+
+char *
+sial_namevar(vnode_t*vn)
+{
+char *p;
+
+	p=sial_strdup(vn->name);
+	TAG(p);
+	return p;
+}
+
+static void
+sial_freevnode(vnode_t*vn)
+{
+	sial_free(vn);
+}
+
+/*
+        create or return existing variable node.
+*/      
+node_t*
+sial_newvnode(char *name)
+{
+node_t*n=sial_newnode();
+vnode_t*vn=sial_alloc(sizeof(vnode_t)+strlen(name)+1);
+
+	TAG(vn);
+
+	strcpy(vn->name, name);
+	n->exe=(xfct_t)sial_exevar;
+	n->free=(ffct_t)sial_freevnode;
+	n->name=(nfct_t)sial_namevar;
+	n->data=vn;
+	vn->n=n;
+
+	sial_setpos(&n->pos);
+
+        return n;
+}
+
+#define TO (*to)
+#define FRM (*frm)
+
+void
+sial_cparrelems(array_t**to, array_t**frm)
+{
+array_t*ap;
+
+	if(FRM) {
+
+		sial_setarray(to);
+		for(ap=FRM->next; ap!=FRM; ap=ap->next) {
+
+			array_t*na=sial_calloc(sizeof(array_t));
+
+			/* copy value_ts */
+			sial_dupval(na->idx, ap->idx);
+			sial_dupval(na->val, ap->val);
+
+			/* link it in */
+			na->prev=TO->prev;
+			na->next=TO;
+			TO->prev->next=na;
+			TO->prev=na;
+			na->ref=1;
+
+			/* copy that branch */
+			sial_cparrelems(&na->val->arr, &ap->val->arr);
+		}
+	}
+}
+
--- crash/extensions/libsial/sial-lsed.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/extensions/libsial/sial-lsed	2007-09-24 16:07:06.000000000 -0400
@@ -0,0 +1,32 @@
+s/yyback/sialback/g
+s/yybgin/sialbgin/g
+s/yycrank/sialcrank/g
+s/yyerror/sialerror/g
+s/yyestate/sialestate/g
+s/yyextra/sialextra/g
+s/yyfnd/sialfnd/g
+s/yyin/sialin/g
+s/yyinput/sialinput/g
+s/yyleng/sialleng/g
+s/yylex/siallex/g
+s/yylineno/siallineno/g
+s/yylook/siallook/g
+s/yylsp/siallsp/g
+s/yylstate/siallstate/g
+s/yylval/siallval/g
+s/yymatch/sialmatch/g
+s/yymorfg/sialmorfg/g
+s/yyolsp/sialolsp/g
+s/yyout/sialout/g
+s/yyoutput/sialoutput/g
+s/yyprevious/sialprevious/g
+s/yysbuf/sialsbuf/g
+s/yysptr/sialsptr/g
+s/yysvec/sialsvec/g
+s/yytchar/sialtchar/g
+s/yytext/sialtext/g
+s/yytop/sialtop/g
+s/yyunput/sialunput/g
+s/yyvstop/sialvstop/g
+s/yywrap/sialwrap/g
+s/yydebug/sialdebug/g
--- crash/extensions/libsial/sial.y.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/extensions/libsial/sial.y	2007-10-30 11:13:53.000000000 -0400
@@ -0,0 +1,436 @@
+%{
+/*
+ * Copyright 2001 Silicon Graphics, Inc. All rights reserved.
+ */
+#include "sial.h"
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <malloc.h>
+// to help resolve type name versus var name ambiguity...
+#define VARON  needvar=1;
+#define VAROFF needvar=0;
+static int sial_toctype(int);
+int sialerror(char *);
+%}
+
+%union {
+	node_t	*n;
+	char 	*s;
+	int	 i;
+	type_t	*t;
+	dvar_t	*d;
+	var_t	*v;
+}
+
+%token	<i>	STATIC DOBLK WHILE RETURN TDEF EXTERN VARARGS
+%token  <i>	CHAR SHORT FLOAT DOUBLE VOID INT UNSIGNED LONG SIGNED VOLATILE REGISTER STRTYPE CONST
+%token	<i>	BREAK CONTINUE DO FOR FUNC
+%token	<i>	IF PATTERN BASETYPE
+%token	<i>	STRUCT ENUM UNION
+%token	<i>	SWITCH CASE DEFAULT
+%token	<n>	ELSE CEXPR
+%token	<n>	VAR NUMBER STRING
+%token  <t>	TYPEDEF
+%token	<i>	'(' ')' ',' ';' '{' '}'
+
+%type	<n>	termlist term opt_term opt_termlist
+%type	<n>	stmt stmtlist expstmt stmtgroup
+%type	<n>	var opt_var c_string
+%type	<n>	for if while switch case caselist caseconstlist caseconst
+
+%type	<d>	dvar dvarlist dvarini
+
+%type	<v>	one_var_decl var_decl_list var_decl farglist decl_list
+
+%type	<t>	type ctype rctype btype_list tdef typecast
+%type	<t>	storage_list string type_decl
+%type	<t>	ctype_decl
+%type   <i>	btype storage ctype_tok print
+
+%right	<i>	ASSIGN ADDME SUBME MULME DIVME MODME ANDME XORME
+%right	<i>	ORME SHLME SHRME
+%right	<i>	'?'
+%left	<i>	IN
+%left	<i>	BOR
+%left	<i>	BAND
+%left	<i>	OR
+%left	<i>	XOR
+%left	<i>	AND
+%left	<i>	EQ NE
+%left	<i>	GE GT LE LT
+%left	<i>	SHL SHR
+%left	<i>	ADD SUB
+%left	<i>	MUL DIV MOD
+%left	<i>	PRINT PRINTO PRINTD PRINTX TAKE_ARR
+%right	<i>	ADROF PTRTO PTR UMINUS SIZEOF TYPECAST POSTINCR PREINCR POSTDECR PREDECR INCR DECR FLIP NOT
+%left	<i>	ARRAY CALL INDIRECT DIRECT
+
+%%
+
+file:
+	/* empty */
+	| fileobj
+	| file fileobj
+	;
+
+fileobj:
+	function
+	| var_decl ';'			{ sial_file_decl($1); }
+	| ctype_decl ';'		{ ; }
+	;
+
+function:
+	one_var_decl stmtgroup
+					{ sial_newfunc($1, $2); }
+	;
+
+
+for:
+	FOR '(' opt_termlist ';' opt_term ';' opt_termlist ')' expstmt
+					{ $$ = sial_newstat(FOR, 4, $3, $5, $7, $9); }
+	| FOR '(' var IN term ')' expstmt
+					{ $$ = sial_newstat(IN, 3, $3, $5, $7); }
+	;
+
+if:
+	IF '(' {VARON} term {VAROFF} ')'		{ $$ = $4; }
+	;
+
+switch :
+	SWITCH '(' {VARON} term {VAROFF} ')' '{' caselist '}'
+
+					{ $$ = sial_newstat(SWITCH, 2, $4, $8); }
+	;
+
+caselist:
+	case
+	| caselist case 		{ $$ = sial_addcase($1, $2); }
+	;
+
+case :
+	caseconstlist stmtlist		{ $$ = sial_newcase($1, $2); }
+	;
+
+caseconst:
+	CASE term ':'			{ $$ = sial_caseval(0, $2); }
+	| DEFAULT ':'			{ $$ = sial_caseval(1, 0); }
+	;
+
+caseconstlist:
+	caseconst
+	| caseconstlist caseconst	{ $$ = sial_addcaseval($1, $2); }
+	;
+
+opt_term:
+	/* empty */			{ $$ = 0; }
+	| term
+	;
+
+termlist:
+	  term
+	| termlist ',' term		{ $$ = sial_sibling($1, $3); }
+	;
+
+opt_termlist:
+	  /* empty */			{ $$ = 0; }
+	| termlist
+	;
+
+stmt:
+	  termlist ';'			{ $$ = sial_newstat(PATTERN, 1, $1); }
+	| while expstmt			{ $$ = sial_newstat(WHILE, 2, $1, $2); }
+	| switch
+	| for
+	| if expstmt ELSE expstmt	{ $$ = sial_newstat(IF, 3, $1, $2, $4); }
+	| if expstmt			{ $$ = sial_newstat(IF, 2, $1, $2); }
+	| DO expstmt WHILE '(' term ')' ';'
+					{ $$ = sial_newstat(DO, 2, $2, $5); }
+	| RETURN term ';'		{ $$ = sial_newstat(RETURN, 1, $2); }
+	| RETURN ';'			{ $$ = sial_newstat(RETURN, 1, NULLNODE); }
+	| BREAK ';'			{ $$ = sial_newstat(BREAK, 0); }
+	| CONTINUE ';'			{ $$ = sial_newstat(CONTINUE, 0); }
+	| ';'				{ $$ = 0; }
+	;
+
+stmtlist:
+	   /* empty */			{ $$ = 0; }
+	| stmt
+	| stmtgroup
+	| stmtlist stmt			{ $$ = sial_addstat($1, $2); }
+	| stmtlist stmtgroup		{ $$ = sial_addstat($1, $2); }
+	;
+
+stmtgroup:
+	'{' decl_list stmtlist '}'	{ $$ = sial_stat_decl($3, $2); }
+	| '{' stmtlist '}'  		{ $$ = sial_stat_decl($2, 0); }
+	;
+
+expstmt:
+	stmt
+	| stmtgroup
+	;
+
+term:
+
+	  term '?' term ':' term %prec '?'
+	 				{ $$ = sial_newop(CEXPR, 3, $1, $3, $5); }
+	| term BOR 	term		{ $$ = sial_newop(BOR, 2, $1, $3); }
+	| term BAND	term		{ $$ = sial_newop(BAND, 2, $1, $3); }
+	| NOT term			{ $$ = sial_newop(NOT, 1, $2); }
+	| term ASSIGN	term		{ $$ = sial_newop(ASSIGN, 2, $1, $3); }
+	| term EQ	term		{ $$ = sial_newop(EQ, 2, $1, $3); }
+	| term GE	term		{ $$ = sial_newop(GE, 2, $1, $3); }
+	| term GT	term		{ $$ = sial_newop(GT, 2, $1, $3); }
+	| term LE	term		{ $$ = sial_newop(LE, 2, $1, $3); }
+	| term LT	term		{ $$ = sial_newop(LT, 2, $1, $3); }
+	| term IN	term		{ $$ = sial_newop(IN, 2, $1, $3); }
+	| term NE	term		{ $$ = sial_newop(NE, 2, $1, $3); }
+	| '(' term ')'			{ $$ = $2; }
+	| term ANDME	term		{ $$ = sial_newop(ANDME, 2, $1, $3); }
+	| PTR term %prec PTRTO 		{ $$ = sial_newptrto($1, $2); }
+	| AND term %prec ADROF		{ $$ = sial_newadrof($2); }
+	| term OR	term		{ $$ = sial_newop(OR, 2, $1, $3); }
+	| term ORME	term		{ $$ = sial_newop(ORME, 2, $1, $3); }
+	| term XOR	term		{ $$ = sial_newop(XOR, 2, $1, $3); }
+	| term XORME	term		{ $$ = sial_newop(XORME, 2, $1, $3); }
+	| term SHR	term		{ $$ = sial_newop(SHR, 2, $1, $3); }
+	| term SHRME	term		{ $$ = sial_newop(SHRME, 2, $1, $3); }
+	| term SHL	term		{ $$ = sial_newop(SHL, 2, $1, $3); }
+	| term SHLME	term		{ $$ = sial_newop(SHLME, 2, $1, $3); }
+	| term ADDME	term		{ $$ = sial_newop(ADDME, 2, $1, $3); }
+	| term SUBME	term		{ $$ = sial_newop(SUBME, 2, $1, $3); }
+	| term MULME	term		{ $$ = sial_newop(MULME, 2, $1, $3); }
+	| term DIV	term		{ $$ = sial_newop(DIV, 2, $1, $3); }
+	| term DIVME	term		{ $$ = sial_newop(DIVME, 2, $1, $3); }
+	| term MODME	term		{ $$ = sial_newop(MODME, 2, $1, $3); }
+	| term MOD	term		{ $$ = sial_newop(MOD, 2, $1, $3); }
+	| term SUB	term		{ $$ = sial_newop(SUB, 2, $1, $3); }
+	| term ADD	term		{ $$ = sial_newop(ADD, 2, $1, $3); }
+	| term PTR term	%prec MUL	{ $$ = sial_newmult($1, $3, $2); }
+	| term AND term			{ $$ = sial_newop(AND, 2, $1, $3); }
+	| SUB term %prec UMINUS		{ $$ = sial_newop(UMINUS, 1, $2); }
+	| '~' term %prec FLIP		{ $$ = sial_newop(FLIP, 1, $2); }
+	| '+' term %prec UMINUS		{ $$ = $2; }
+	| term '(' ')' %prec CALL	{ $$ = sial_newcall($1, NULLNODE); }
+	| term '(' termlist ')' %prec CALL	{ $$ = sial_newcall($1, $3); }
+	| DECR term			{ $$ = sial_newop(PREDECR, 1, $2); }
+	| INCR term			{ $$ = sial_newop(PREINCR, 1, $2); }
+	| term DECR			{ $$ = sial_newop(POSTDECR, 1, $1); }
+	| term INCR			{ $$ = sial_newop(POSTINCR, 1, $1); }
+	| term INDIRECT var		{ $$ = sial_newmem(INDIRECT, $1, $3); }
+	| term INDIRECT tdef		{ $$ = sial_newmem(INDIRECT, $1, sial_tdeftovar($3)); } // resolve ambiguity
+	| term DIRECT var		{ $$ = sial_newmem(DIRECT, $1, $3); }
+	| term DIRECT tdef		{ $$ = sial_newmem(DIRECT, $1, sial_tdeftovar($3)); } // resolve ambiguity
+	| term  '[' term ']' %prec ARRAY	
+					{ $$ = sial_newindex($1, $3); }
+	| NUMBER
+	| c_string
+	| typecast term %prec TYPECAST	{ $$ = sial_typecast($1, $2); }
+	| SIZEOF '(' var_decl ')'
+					{ $$ = sial_sizeof(sial_newcast($3), 1); }
+	| SIZEOF term			{ $$ = sial_sizeof($2, 2); }
+	| print '(' var_decl ')' %prec SIZEOF	
+					{ $$ = sial_newptype($3); }
+	| print term %prec SIZEOF	{ $$ = sial_newpval($2, $1); }
+	| TAKE_ARR '(' term ',' term ')' { $$ = $3; /* sial_newtakearr($3, $5); */ }
+	| var
+	;
+
+print:
+	PRINT
+	| PRINTX
+	| PRINTO
+	| PRINTD
+	;
+
+typecast:
+        '(' var_decl ')'	 	{ $$ = sial_newcast($2); }
+	;
+
+var_decl_list:
+	var_decl ';'
+	| var_decl_list var_decl ';'	{ sial_addnewsvs($1, $1, $2); $$=$1; }
+	;
+
+decl_list:
+	ctype_decl ';'			{ $$ = 0; }
+	| var_decl ';'			{ $$ = $1; }
+	| decl_list var_decl ';'	{ $$=$1; if($1 && $2) sial_addnewsvs($1, $1, $2); }
+	| decl_list ctype_decl ';'	{ $$ = $1; }
+	;
+
+
+var_decl:
+	type_decl dvarlist	{ needvar=0; $$ = sial_vardecl($2, $1); }
+	;
+
+one_var_decl:
+	type_decl dvar	{ needvar=0; $$ = sial_vardecl($2, $1); }
+	;
+
+type_decl:
+	type				{ $$=$1; needvar++; }
+	| storage_list			{ $$=$1; needvar++; }
+	| type storage_list		{ $$=sial_addstorage($1, $2); needvar++; }
+	| storage_list type		{ $$=sial_addstorage($2, $1); needvar++; }
+	| type_decl PTR			{ $$=$1; sial_pushref($1, $2);; needvar++; }
+	| type_decl storage_list	{ $$=sial_addstorage($1, $2); needvar++; }
+	;
+
+type:
+	ctype
+	| tdef
+	| btype_list
+	| string
+	| ctype_decl
+	;
+
+ctype_decl:
+	ctype_tok var '{' {sial_startctype(sial_toctype($1),$2);instruct++;} var_decl_list '}'
+		 			{ instruct--; $$ = sial_ctype_decl(sial_toctype($1), $2, $5); }
+	| ctype_tok tdef '{' {sial_startctype(sial_toctype($1),lastv=sial_tdeftovar($2));instruct++;} var_decl_list '}'
+		 			{ instruct--; $$ = sial_ctype_decl(sial_toctype($1), lastv, $5); }
+	| ctype_tok var '{' dvarlist '}'
+		 			{ $$ = sial_enum_decl(sial_toctype($1), $2, $4); }
+	| ctype_tok tdef '{' dvarlist '}'
+		 			{ $$ = sial_enum_decl(sial_toctype($1), sial_tdeftovar($2), $4); }
+	;
+
+ctype:
+	rctype				{ $$ = $1; }
+	| ctype_tok '{' {instruct++;} var_decl_list '}'
+					{  instruct--; $$ = sial_ctype_decl(sial_toctype($1), 0, $4); }
+	| ctype_tok '{' dvarlist '}'
+					{  $$ = sial_enum_decl(sial_toctype($1), 0, $3); }
+	;
+
+farglist:
+	/* empty */			{ $$ = 0; }
+	| one_var_decl			{ $$ = $1; }
+	| farglist ',' one_var_decl	{ 
+						if(!$1) sial_error("Syntax error"); 
+						if($3) sial_addnewsvs($1, $1, $3); $$=$1; 
+					}
+	| farglist ',' VARARGS		{ 
+						if(!$1) sial_error("Syntax error"); 
+						sial_addtolist($1, sial_newvar(S_VARARG)); $$=$1; 
+					}
+	;
+	
+
+string:
+	STRTYPE                        { 
+						type_t *t;
+						t=sial_newtype(); 
+						t->type=V_STRING;
+						t->typattr=0;
+						$$ = t;
+					}
+	;
+
+rctype:
+	ctype_tok var 			{ $$ = sial_newctype(sial_toctype($1), $2); }
+	| ctype_tok tdef		{ $$ = sial_newctype(sial_toctype($1), sial_tdeftovar($2)); }
+	;
+
+ctype_tok:
+	STRUCT
+	| ENUM
+	| UNION
+	;
+
+btype_list:
+	btype				{ $$ = sial_newbtype($1); }
+	| btype_list btype		{ $$ = sial_addbtype($1, $2); }
+	;
+
+c_string:
+	STRING				{ $$ = $1; }
+	| c_string  STRING		{ $$ = sial_strconcat($1, $2); }
+	;
+
+btype:
+	LONG
+	| CHAR
+	| INT
+	| SHORT
+	| UNSIGNED
+	| SIGNED
+	| DOUBLE
+	| FLOAT
+	| VOID
+	;
+
+storage_list:
+	storage				{ $$ = sial_newbtype($1); }
+	| storage_list storage		{ sial_error("Only one storage class can be speficied"); }
+	;
+
+storage:
+	STATIC
+	| VOLATILE
+	| REGISTER
+	| TDEF
+	| EXTERN
+	| CONST
+	;
+
+dvarlist:
+	dvarini				{ $$ = $1; }
+	| dvarlist ',' dvarini		{ $$ = sial_linkdvar($1, $3); }
+	;
+
+dvarini:
+	dvar				{ $$ = $1; }
+	| dvar ASSIGN  term		{ $$ = sial_dvarini($1, $3); }
+	;
+
+dvar:
+	opt_var				{ $$ = sial_newdvar($1); needvar=0; }
+	| ':' term 			{ $$ = sial_dvarfld(sial_newdvar(0), $2); }
+	| dvar ':' term 		{ $$ = sial_dvarfld($1, $3); }
+	| dvar '[' opt_term ']'		{ $$ = sial_dvaridx($1, $3); }
+	| PTR dvar			{ $$ = sial_dvarptr($1, $2); }
+	| dvar '(' ')'			{ $$ = sial_dvarfct($1, 0); }
+	| dvar '(' farglist ')'		{ $$ = sial_dvarfct($1, $3); }
+	| '(' dvar ')'			{ $$ = $2; }
+	;
+
+opt_var:
+	/* empty */			{ $$ = 0; }
+	| var				{ $$ = $1; }
+	;
+
+var:
+	VAR				{ $$ = $1; }
+	;	
+
+tdef:
+	TYPEDEF				{ $$ = $1; }
+	;
+
+while:
+	WHILE '(' {VARON} term {VAROFF} ')'  { $$ = $4; }
+	;
+
+%%
+
+static int
+sial_toctype(int tok)
+{
+	switch(tok) {
+	case STRUCT: return V_STRUCT;
+	case ENUM: return V_ENUM;
+	case UNION: return V_UNION;
+	default: sial_error("Oops sial_toctype!"); return 0;
+	}
+}
+
+/*
+	This file gets included into the yacc specs.
+	So the "sial.h" is already included 
+*/
+
+int sialerror(char *p) { sial_error(p); return 0; }
+
--- crash/extensions/libsial/sial_builtin.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/extensions/libsial/sial_builtin.c	2007-09-24 16:07:06.000000000 -0400
@@ -0,0 +1,434 @@
+/*
+ * Copyright 2001 Silicon Graphics, Inc. All rights reserved.
+ */
+#include <string.h>
+#include <termio.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include "sial.h"
+
+/* information necessary for a builtin function */
+typedef struct builtin {
+
+	var_t*v;		/* resulting variable declaration after parsing */
+        bf_t *fp;		/* pointer to actual function */
+	char *proto;		/* associated prototype_t*/
+	struct builtin *next;	/* to chain them */
+
+} builtin;
+
+#define BT_EINVAL	1	/* Something is wrong and it's not ... */
+value_t*
+sial_exit(int v)
+{
+	/* Were we compiling ? */
+	sial_parseback();
+
+	/* we were running... exit () */
+	sial_dojmp(J_EXIT, &v);
+
+	/* NOT REACHED */
+	return 0;
+}
+
+value_t*
+sial_bexit(value_t *vv)
+{
+int v=sial_getval(vv);
+
+	/* we're not going back to the he caller so free 
+	   the input value_t */
+	sial_freeval(vv);
+	sial_exit(v);
+	/* NOT REACHED */
+	return 0;
+}
+
+#define MAXBYTES  4000
+#define INCREMENT 16
+value_t *
+sial_getstr(value_t *vmadr)
+{
+ull madr=sial_getval(vmadr);
+char *buf=sial_alloc(MAXBYTES+1);
+char *p=buf;
+value_t *v;
+
+	/* sial as already verified that this is a V_REF */
+	/* since this is reading from a unkown size pool
+	   we have to do an exponential reduction on the number of bytes
+	   read ... */
+	buf[0]=0;
+	while(1) {
+
+		int i;
+
+		if(!API_GETMEM(madr, p, INCREMENT)) break;
+
+		/* have we found the '\0' yet ? */
+		for(i=0;i<INCREMENT; i++) if(!p[i]) break;
+
+		madr+=INCREMENT;
+		p+=INCREMENT;
+		if((p-buf) >= MAXBYTES) {
+			buf[MAXBYTES]='\0';
+			break;
+		}
+
+	}
+	v=sial_setstrval(sial_newval(), buf);
+	sial_free(buf);
+	return v;
+}
+
+value_t *
+sial_substr(value_t *vp, value_t *vi, value_t *vj)
+{
+char *p=sial_getptr(vp, char); 
+ul i=sial_getval(vi); 
+int l=strlen(p);
+int j=(vj?sial_getval(vj):(l-i+1));
+char *s;
+value_t *v;
+
+	if((i+j-1)>l || !i) {
+
+		sial_error("Valid positions are [1..%d]\n", l);
+
+	}
+
+	s=sial_alloc(j+1);
+	strncpy(s, p+i-1, j);
+	s[j]='\0';
+	v=sial_setstrval(sial_newval(), s);
+	sial_free(s);
+	return v;
+}
+
+value_t *
+sial_getnstr(value_t* vmadr, value_t* vl)
+{
+ull madr=sial_getval(vmadr);
+ul l=sial_getval(vl);
+char *buf=sial_alloc(l+1);
+value_t *v;
+
+	if(!API_GETMEM(madr, buf, l)) buf[0]='\0';
+	else buf[l]='\0';
+	v=sial_setstrval(sial_newval(), buf);
+	sial_free(buf);
+	return v;
+}
+
+value_t *
+sial_atoi(value_t *vs, value_t* vbase)
+{
+char *s=sial_getptr(vs, char);
+int base=vbase ? sial_getval(vbase) : 0;
+
+	strtoull(s, 0, (int) base);
+	return sial_defbtypesize(sial_newval(), strtoull(s, 0, base), B_ULL);
+}
+
+value_t *
+sial_itoa(value_t* vi)
+{
+ull i=sial_getval(vi);
+char p[40];
+	
+	sprintf(p, "%llu", (unsigned long long)i);
+	return sial_setstrval(sial_newval(), p);
+}
+
+value_t *
+sial_strlen(value_t *vs)
+{
+char *s=sial_getptr(vs, char);
+ull l;
+	if(!s) l=0;
+	else l=strlen(s);
+
+	return sial_defbtype(sial_newval(), l);
+}
+
+value_t *
+sial_getchar(void)
+{
+char c; 
+struct termio tio, stio;
+int in=fileno(stdin);
+
+	if(ioctl(in, TCGETA, &tio)) c=255;
+	else {
+		stio=tio;
+		tio.c_lflag &= ~(ICANON | ECHO);
+		tio.c_iflag &= ~(ICRNL  | INLCR);
+		tio.c_cc[VMIN] = 1;
+		tio.c_cc[VTIME] = 0;
+        	ioctl(in, TCSETA, &tio);
+		c=getc(stdin);
+		ioctl(in, TCSETA, &stio);
+	}
+	return sial_defbtype(sial_newval(), (ull)c);
+}
+
+value_t *
+sial_gets(void)
+{
+char p[1024];
+	
+	if(!fgets(p, sizeof(p)-1, stdin)) p[0]='\0';
+	else p[strlen(p)-1]='\0';
+	return sial_setstrval(sial_newval(), p);
+}
+
+static builtin *bfuncs=0;
+
+/*
+	Check for the existance of a bt function
+*/
+void *
+sial_chkbuiltin(char *name)
+{
+builtin *bf;
+
+	for(bf=bfuncs; bf; bf=bf->next) {
+
+		if(!strcmp(name, bf->v->name)) {
+
+			return bf;
+		}
+	}
+	return 0;
+}
+
+/*
+	Remove a builtin.
+	This is done when we 'unload' a *.so file.
+*/
+void
+sial_rmbuiltin(var_t*v)
+{
+builtin *bf;
+builtin *last=0;
+
+	for(bf=bfuncs; bf; bf=bf->next) {
+
+		if(!strcmp(v->name, bf->v->name)) {
+
+			if(!last) bfuncs=bf->next;
+			else {
+
+				last->next=bf->next;
+			}
+			sial_free(bf->proto);
+			sial_free(bf);
+		}
+		last=bf;
+	}
+}
+
+/* 
+	Install a new builtin function.
+*/
+var_t* 
+sial_builtin(char *proto, bf_t* fp)
+{
+var_t*v;
+
+	/* parse the prototype_t*/
+	if((v=sial_parsexpr(proto))) {
+
+		builtin *bt;
+		int nargs=0;
+
+		/* check name */
+		if(!v->name || !v->name[0]) {
+
+			sial_freevar(v);
+			sial_msg("Syntax error: no function name specified [%s]\n", proto);
+			return 0;
+		}
+
+		/* check for function with same name */
+		if(sial_chkfname(v->name, 0)) {
+
+			sial_freevar(v);
+			sial_msg("Function already defined [%s]\n", proto);
+			return 0;
+		}
+
+		if(v->dv->fargs) {
+
+			var_t*vn=v->dv->fargs->next;
+
+			while(vn!=v->dv->fargs) {
+
+				nargs++;
+				vn=vn->next;
+			}
+		}
+		/* check number of args */
+		if(nargs > BT_MAXARGS) {
+
+			sial_freevar(v);
+			sial_msg("Too many parameters to function (max=%d) [%s]\n", BT_MAXARGS, proto);
+			return 0;
+		}
+
+
+		bt=sial_alloc(sizeof(builtin));
+		bt->proto=sial_strdup(proto);
+		bt->fp=fp;
+		bt->v=v;
+		bt->next=0;
+
+		/* install it */
+		if(!bfuncs) bfuncs=bt;
+		else {
+			builtin *btp;
+
+			for(btp=bfuncs; ; btp=btp->next) if(!btp->next) break;
+			btp->next=bt;
+		}
+		return v;
+	}
+
+	sial_msg("Builtin [%s] not loaded.", proto);
+
+	return 0;
+}
+
+#define bcast(f) ((bf_t*)f)
+
+static btspec_t sialbfuncs[] = {
+	{ "unsigned long long atoi(string, ...)",bcast(sial_atoi)},
+	{ "int exists(string)",			bcast(sial_exists)},
+	{ "void exit(int)", 			bcast(sial_bexit)},
+	{ "int getchar()",			bcast(sial_getchar)},
+	{ "string gets()",			bcast(sial_gets)},
+	{ "string getstr(char *)", 		bcast(sial_getstr)},
+	{ "string getnstr(char *, int)", 	bcast(sial_getnstr)},
+	{ "string itoa(int)",			bcast(sial_itoa)},
+	{ "void printf(string, ...)",		bcast(sial_printf)},
+	{ "void showtemp()",			bcast(sial_showtemp)},
+	{ "void showaddr(char *)",		bcast(sial_showaddr)},
+	{ "void memdebugon()",			bcast(sial_memdebugon)},
+	{ "void memdebugoff()",			bcast(sial_memdebugoff)},
+	{ "int sial_load(string)",		bcast(sial_bload)},
+	{ "int sial_unload(string)",		bcast(sial_bunload)},
+	{ "int depend(string)",			bcast(sial_bdepend)},
+	{ "int strlen(string)",			bcast(sial_strlen)},
+	{ "string sprintf(string, ...)",	bcast(sial_sprintf)},
+	{ "string substr(string, int, ...)",	bcast(sial_substr)},
+	{ "void prarr(string name, int i)",     bcast(sial_prarr)},
+	{ "int member(void*, string name)",     bcast(sial_ismember)},
+	{ "string findsym(string)",		bcast(sial_findsym)},
+};
+
+
+/*
+	Install the sial builtins.
+*/
+void
+sial_setbuiltins()
+{
+int i;
+
+	for(i=0;i<sizeof(sialbfuncs)/sizeof(sialbfuncs[0]);i++) {
+
+		(void)sial_builtin(sialbfuncs[i].proto, sialbfuncs[i].fp);
+	}
+}
+
+value_t* 
+sial_exebfunc(char *name, value_t **vals)
+{
+builtin *bf;
+value_t *lvals[BT_MAXARGS*2]; /* use factor 2 for api where char * is 4 bytes */
+value_t *v, *vr;
+
+
+	if((bf=sial_chkbuiltin(name))) {
+
+		int i=0, nargs=0;
+
+		if(vals) for(i=0;vals[i];i++);
+
+		memset(lvals, 0, sizeof(lvals));
+
+		if(bf->v->dv->fargs) {
+
+			var_t*vv=bf->v->dv->fargs->next;
+
+			while(vv != bf->v->dv->fargs) {
+
+				if(vv->name && !strcmp(vv->name, S_VARARG)) { 
+					while(nargs<i) {
+
+						lvals[nargs]=sial_cloneval(vals[nargs]);
+						nargs++;
+					}
+					break; 
+				}
+
+				/* verify type compatibility and convert */
+				if(vals[nargs]) {
+
+					lvals[nargs]=sial_cloneval(vv->v);
+					sial_chkandconvert(lvals[nargs], vals[nargs]);
+				}
+				nargs++;
+				vv=vv->next;
+			}
+		}
+
+		/* check parameters number */
+		if(i<nargs) {
+
+			sial_rerror(&bf->v->dv->pos, "Too few parameters to '%s'", bf->proto);
+
+		} else if(i>nargs){
+
+			sial_rerror(&bf->v->dv->pos, "Too many parameters to '%s'", bf->proto);
+
+		}
+
+		if(vals) {
+			/* the actual call */
+			v=(bf->fp) ( 
+				lvals[0],  lvals[1],
+				lvals[2],  lvals[3],
+				lvals[4],  lvals[5],
+				lvals[6],  lvals[7],
+				lvals[8],  lvals[9],
+				lvals[10], lvals[11],
+				lvals[12], lvals[13],
+				lvals[14], lvals[15],
+				lvals[16], lvals[17],
+				lvals[18], lvals[19]
+				);
+		} else {
+
+			v=(bf->fp) ((value_t*)0);
+		}
+
+		while(i) {
+
+			--i;
+			sial_freeval(vals[i]);
+			sial_freeval(lvals[i]);
+		}
+
+		/* make a copy of the return value_t info */
+		vr=sial_cloneval(bf->v->v);
+		sial_chkandconvert(vr, v);
+		sial_freeval(v);
+
+		return vr;
+	}
+
+	sial_error("Oops. sial_exebfunc()");
+	return 0;
+}
--- crash/extensions/libsial/sial_case.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/extensions/libsial/sial_case.c	2007-09-24 16:07:06.000000000 -0400
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2001 Silicon Graphics, Inc. All rights reserved.
+ */
+/*
+	Set of functions to handle the case construct.
+*/
+#include "sial.h"
+
+void
+sial_freecaseval(caseval_t*cv)
+{
+	sial_free(cv);
+}
+
+node_t*
+sial_caseval(int isdef, node_t*val)
+{
+caseval_t*cv=sial_alloc(sizeof(caseval_t));
+node_t*n=sial_newnode();
+value_t *v;
+
+	cv->isdef=isdef;
+	if(val) {
+
+		v=NODE_EXE(val);
+		cv->val=unival(v);
+		sial_freeval(v);
+		NODE_FREE(val);
+
+	} else cv->val=0;
+
+	sial_setpos(&cv->pos);
+
+	cv->next=0;
+	n->data=cv;
+	return n;
+}
+
+node_t*
+sial_addcaseval(node_t*n, node_t*n2)
+{
+caseval_t*cv=(caseval_t*)n->data;
+caseval_t*ncv=(caseval_t*)n2->data;
+
+	sial_free(n);
+	ncv->next=cv;
+	return n2;
+}
+
+void
+sial_freecase(void *vcl)
+{
+caselist_t*cl=(caselist_t*)vcl;
+
+	NODE_FREE(cl->stmt);
+	sial_free(cl);
+}
+
+node_t*
+sial_newcase(node_t*nc, node_t* n)
+{
+caseval_t*cv=(caseval_t*)nc->data;
+caselist_t*cl=sial_alloc(sizeof(caselist_t));
+node_t*nn=sial_newnode();
+
+
+	nn->data=cl;
+	nn->free=(ffct_t)sial_freecase;
+
+	cl->vals=cv;
+	sial_free(nc);
+
+	cl->stmt=n;
+	cl->next=0;
+
+	sial_setpos(&cl->pos);
+
+	return nn;
+}
+
+node_t*
+sial_addcase(node_t*n, node_t*n2)
+{
+caselist_t*lcl;
+caselist_t*ncl=(caselist_t*)n2->data;
+caselist_t*cl=(caselist_t*)n->data;
+
+	for(lcl=cl; lcl->next; lcl=lcl->next);
+
+	/* we need to add case in the order they are listed */
+	lcl->next=ncl;
+	sial_free(n2);
+	ncl->next=0;
+
+	sial_setpos(&ncl->pos);
+
+	return n;
+}
+
+int
+sial_docase(ull val, caselist_t*cl)
+{
+caselist_t*defclp=0, *clp;
+
+
+	for(clp=cl;clp;clp=clp->next) {
+
+	caseval_t*cvp;
+
+		for(cvp=clp->vals; cvp; cvp=cvp->next) {
+
+			if(cvp->val==val) goto out;
+			else if(cvp->isdef) defclp=clp;
+		}
+	}
+out:
+	if(clp || (clp=defclp)) {
+
+		for(;clp;clp=clp->next) {
+
+			if(clp->stmt) NODE_EXE(clp->stmt);
+		}
+	}
+	return 1;
+}
--- crash/extensions/libsial/sial_member.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/extensions/libsial/sial_member.c	2007-09-24 16:07:06.000000000 -0400
@@ -0,0 +1,321 @@
+/*
+ * Copyright 2001 Silicon Graphics, Inc. All rights reserved.
+ */
+#include <string.h>
+#include "sial.h"
+#include "sial.tab.h"
+
+/* these function are used to access and set members in structs */
+
+/* define a member access */
+typedef struct mem {
+	char *name;	/* member name */
+	int dir;	/* direct/indirect access */
+	node_t*expr;	/* expression node_t*/
+	stmember_t*stm;	/* associated member information */
+	char *local;	/* local memory or ... */
+	ull mem;	/* ... system memory access */
+	srcpos_t p;
+} mem;
+
+void *
+sial_adrval(value_t *v)
+{
+	switch(v->type.size) {
+
+		case 1: return &v->v.uc;
+		case 2: return &v->v.us;
+		case 4: return &v->v.ul;
+		case 8: return &v->v.ull;
+	}
+	sial_error("Oops sial_adrval");
+	return 0;
+}
+
+/* some API secondary entry points */
+void sial_member_soffset(member_t*m, int offset) { m->offset=offset; }
+void sial_member_ssize(member_t*m, int size) { m->size=size; }
+void sial_member_sfbit(member_t*m, int fbit) { m->fbit=fbit; }
+void sial_member_snbits(member_t*m, int nbits) { m->nbits=nbits; }
+void sial_member_sname(member_t*m, char *name) { m->name=sial_strdup(name); }
+
+
+void
+sial_setmem(mem *m, value_t *v)
+{
+stmember_t*stm=m->stm;
+
+	/* check type compatibility. Ctypes should point to the same stinfo...*/
+	if(stm->type.type != v->type.type
+	   /* pointer most point to the same type of object */
+	   || (v->type.type==V_REF && v->type.rtype != stm->type.rtype)
+	   /* ctypes should point to the same stinfo */
+	   || (is_ctype(v->type.type) && v->type.idx != stm->type.idx)) {
+
+		sial_error("Incompatible types for assignment");
+
+	}
+
+	if(stm->m.nbits) {
+
+		ull dvalue_t=0;
+
+		if(v->type.type!=V_BASE) {
+
+			sial_error("Invalid assignment to bit field");
+
+		}
+
+		/* do the bit gymnastic */
+		/* we need to create a ull that contain the current
+		   bit of teh destination */
+		if(m->local) {
+
+			memmove(m->local+stm->m.offset, ((char*)(&dvalue_t))+8-stm->m.size, stm->m.size);
+			dvalue_t=set_bit_value_t(dvalue_t, v->v.ull, stm->m.nbits, stm->m.fbit);
+			memmove(((char*)(&dvalue_t))+8-stm->m.size, m->local+stm->m.offset, stm->m.size);
+
+		}
+
+		if(m->mem) {
+
+			API_GETMEM(m->mem+stm->m.offset, ((char*)(&dvalue_t))+8-stm->m.size, stm->m.size);
+			dvalue_t=set_bit_value_t(dvalue_t, v->v.ull, stm->m.nbits, stm->m.fbit);
+			API_PUTMEM(m->mem+stm->m.offset, ((char*)(&dvalue_t))+8-stm->m.size, stm->m.size);
+
+		}
+		
+
+	} else {
+
+		/* move the data */
+		if(is_ctype(v->type.type)) {
+
+			if(m->local) {
+
+				memmove(m->local+stm->m.offset, v->v.data, stm->m.size);
+
+			} 
+			if(m->mem) {
+
+				API_PUTMEM(m->mem+stm->m.offset, v->v.data, stm->m.size);
+			}
+
+		} else {
+
+			sial_transval(v->type.size, stm->m.size, v, sial_issigned(v->type.typattr));
+
+			if(m->local) {
+
+				memmove(m->local+stm->m.offset, sial_adrval(v), stm->m.size);
+
+			}
+
+			if(m->mem) {
+
+				API_PUTMEM(m->mem+stm->m.offset, sial_adrval(v), stm->m.size);
+			}
+		}
+	}
+}
+
+#define vdata(p, t) ((t*)(p->v.data))
+
+void
+sial_exememlocal(value_t *vp, stmember_t* stm, value_t *v)
+{
+	/* expression should be a ctype_t*/
+	if(!is_ctype(vp->type.type)) {
+
+		sial_error("Invalid type for '.' expression");
+	}
+	/* get that value_t from the application memory */
+	if(is_ctype(stm->type.type) && !stm->type.idxlst) {
+
+		void *data=sial_alloc(stm->m.size);
+
+		memmove(data, vdata(vp, char)+stm->m.offset, stm->m.size);
+		if(vp->mem) v->mem=vp->mem+stm->m.offset;
+		v->v.data=data;
+
+	}
+	/* bit field gymnastic */
+	else if(stm->m.nbits) {
+
+		ull value=0;
+
+		memmove(vdata(vp, char)+stm->m.offset, ((char*)&value)+(sizeof(value)-stm->m.size), stm->m.size);
+		get_bit_value(value, stm->m.nbits, stm->m.fbit, stm->m.size, v);
+
+	} 
+	/* check if this is an array, if so then create a reference to it */
+	else if(stm->type.idxlst) {
+
+		ull mempos=vp->mem+stm->m.offset;
+		if(sial_defbsize()==8) v->v.ull=mempos;
+		else v->v.ul=mempos;
+		v->mem=mempos;
+
+	} else {
+
+		switch(TYPE_SIZE(&stm->type)) {
+			case 1:
+				memmove(&v->v.uc, vdata(vp, char)+stm->m.offset, 1);
+			break;
+			case 2:
+				memmove(&v->v.us, vdata(vp, char)+stm->m.offset, 2);
+			break;
+			case 4:
+				memmove(&v->v.ul, vdata(vp, char)+stm->m.offset, 4);
+			break;
+			case 8:
+				memmove(&v->v.ull, vdata(vp, char)+stm->m.offset, 8);
+			break;
+			default:
+				sial_error("Oops exemem2[%d]", TYPE_SIZE(&stm->type));
+			break;
+		}
+		if(vp->mem) v->mem=vp->mem+stm->m.offset;
+	}
+}
+
+value_t *
+sial_exemem(mem *m)
+{
+value_t *v=sial_newval();
+value_t *vp=NODE_EXE(m->expr);
+stmember_t*stm;
+srcpos_t p;
+
+	sial_curpos(&m->p, &p);
+
+	if(vp->type.type == V_REF) {
+
+		if(vp->type.ref > 1) {
+
+			sial_error("Too many levels of indirection for access to [%s]", m->name);
+
+		}
+	}
+
+	/* get the member information and attach it */
+	stm=m->stm=(stmember_t*)sial_member(m->name, &vp->type);
+	if(!stm) {
+
+		sial_freeval(v);
+		sial_freeval(vp);
+		sial_error("Invalid member name specified : %s", m->name);
+
+	}
+
+	/* get a copy of the type of thise member and put it in v */
+	sial_duptype(&v->type, &stm->type);
+
+	/* indirect i.e. (struct*)->member *most* be relative to the 
+	   system image. This is a restriction of this language */
+	if(m->dir==INDIRECT) {
+
+		ull mempos;
+
+		if(vp->type.type != V_REF  || !is_ctype(vp->type.rtype)) {
+
+			sial_error("Invalid type for '->' expression");
+		}
+
+		m->local=0;
+		m->mem=sial_defbsize()==8?vp->v.ull:vp->v.ul;
+		mempos=m->mem+stm->m.offset;
+
+		/* get that value_t from the system image */
+		if(is_ctype(v->type.type) && !stm->type.idxlst) {
+
+			v->v.data=sial_alloc(stm->m.size);
+			API_GETMEM(mempos, v->v.data, stm->m.size);
+			v->mem=mempos;
+
+		}
+		/* bit field gymnastic */
+		else if(stm->m.nbits) {
+
+			ull value=0;
+
+			API_GETMEM(m->mem+stm->m.offset, &value, stm->m.size);
+			get_bit_value(value, stm->m.nbits, stm->m.fbit, stm->m.size, v);
+			/* no mempos for bit fields ... */
+
+		} 
+		/* check if this is an array, if so then create a reference to it */
+		else if(stm->type.idxlst) {
+
+			if(sial_defbsize()==8) v->v.ull=mempos;
+			else v->v.ul=mempos;
+			v->mem=mempos;
+
+		} else {
+
+			v->mem=mempos;
+
+			switch(TYPE_SIZE(&stm->type)) {
+				case 1:
+					API_GETMEM(mempos, &v->v.uc, 1);
+				break;
+				case 2:
+					API_GETMEM(mempos, &v->v.us, 2);
+				break;
+				case 4:
+					API_GETMEM(mempos, &v->v.ul, 4);
+				break;
+				case 8:
+					API_GETMEM(mempos, &v->v.ull, 8);
+				break;
+				default:
+					sial_error("Oops exemem[%d]", TYPE_SIZE(&stm->type));
+				break;
+			}
+
+		}
+	}
+	/* direct i.e. (struct).member *most* be in referance to a local 
+	   structure. */
+	else {
+
+		m->mem=vp->mem;
+		m->local=vp->v.data;
+
+		/* extract the value from a local copy */
+		sial_exememlocal(vp, stm, v);
+	}
+	sial_curpos(&p, 0);
+	sial_freeval(vp);
+	v->setfct=(setfct_t)sial_setmem;
+	v->setval=(value_t*)m;
+	v->set=1;
+	return v;
+}
+
+void
+sial_freemem(mem *m)
+{
+	NODE_FREE(m->expr);
+	sial_free(m->name);
+	sial_free(m);
+}
+
+node_t*
+sial_newmem(int dir, node_t*expr, node_t*name)
+{
+char *nstr=NODE_NAME(name);
+node_t*n=sial_newnode();
+mem *m=sial_alloc(sizeof(mem));
+
+	/* dicard nam  node_t*/
+	NODE_FREE(name);
+	m->name=nstr;
+	m->dir=dir;
+	m->expr=expr;
+	sial_setpos(&m->p);
+	n->data=m;
+	n->exe=(xfct_t)sial_exemem;
+	n->free=(ffct_t)sial_freemem;
+	return n;
+}
--- crash/extensions/libsial/sial.l.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/extensions/libsial/sial.l	2007-10-30 11:13:53.000000000 -0400
@@ -0,0 +1,206 @@
+%{
+/*
+ * Copyright 2001 Silicon Graphics, Inc. All rights reserved.
+ */
+%}
+
+%{
+#include <string.h>
+
+#ifdef linux
+#define YY_INPUT(buf,result,max_size) \
+{ \
+	int c = sial_input(); \
+	result = (c == EOF) ? YY_NULL : (buf[0] = c, 1); \
+}
+#endif
+
+#define yylval siallval
+#include	"sial.h"
+#define YY_NO_UNPUT
+#include	"sial.tab.h"
+
+#define retok(t) return(t)
+int needvar=0, instruct=0;
+node_t *lastv;
+static char *lastvar=0;
+char *sial_lastvar(void) { return lastvar; }
+extern void sial_skip_directive(void);
+extern void sial_define(void);
+extern void sial_include(void);
+extern void sial_undefine(void);
+extern char sial_newchar(void);
+extern int  sial_chkmacvar(char *);
+%}
+
+ABC		[a-zA-Z_]
+ABCN		[a-zA-Z0-9_]
+N		[0-9]
+X		[0-9a-fA-F]
+W		[ \t\n]
+P		#[ \t]*
+OP              [(]
+CP              [)]
+
+%s var1
+%s var2
+%s var3
+%s var4
+%%
+
+{W}		{ ; }
+
+"..."		{ retok(VARARGS); }
+"&&"		{ retok(BAND); }
+"||"		{ retok(BOR); }
+"<"		{ retok(LT); }
+"<="		{ retok(LE); }
+"=="		{ retok(EQ); }
+">="		{ retok(GE); }
+">"		{ retok(GT); }
+"!="		{ retok(NE); }
+
+"&="		{ retok(ANDME); }
+"|"		{ retok(OR); }
+"|="		{ retok(ORME); }
+"!"		{ retok(NOT); }
+"^"		{ retok(XOR); }
+"&"		{ retok(AND); }
+"^="		{ retok(XORME); }
+">>"		{ retok(SHR); }
+"<<="		{ retok(SHLME); }
+">>="		{ retok(SHRME); }
+"<<"		{ retok(SHL); }
+"++"		{ retok(INCR); }
+"+"		{ retok(ADD); }
+"--"		{ retok(DECR); }
+"-"		{ retok(SUB); }
+"+="		{ retok(ADDME); }
+"-="		{ retok(SUBME); }
+"*="		{ retok(MULME); }
+"/="		{ retok(DIVME); }
+"/"		{ retok(DIV); }
+"%="		{ retok(MODME); }
+"%"		{ retok(MOD); }
+"="		{ retok(ASSIGN); }
+"->"		{ retok(INDIRECT); }
+"."		{ retok(DIRECT); }
+"{"		{ needvar=0; retok('{'); }
+
+\*+		{ 
+			yylval.i=strlen(yytext); 
+			return PTR; 
+		}
+
+(("0x"+){X}+[lL]*|{N}+[lL]*)	{ yylval.n = sial_newnum(yytext); retok(NUMBER); }
+
+{P}ident		{  sial_skip_directive(); }
+{P}pragma		{  sial_skip_directive(); }
+{P}define		{
+			/* preprocessor command */
+			/* either a simple constant or a macro */
+			sial_define();
+		}
+{P}include 	{
+
+			/* file inclusion */
+			sial_include();
+		}
+{P}undef		{
+			sial_undefine();
+		}
+while		{ retok(WHILE); }
+for		{ retok(FOR); }
+do		{ retok(DO); }
+if		{ retok(IF); }
+else		{ retok(ELSE); }
+break		{ retok(BREAK); }
+continue	{ retok(CONTINUE); }
+in		{ retok(IN); }
+return		{ retok(RETURN); }
+
+__char__	{ retok(yylval.i=CHAR); }
+__short__	{ retok(yylval.i=SHORT); }
+__int__		{ retok(yylval.i=INT); }
+__float__	{ retok(yylval.i=FLOAT); }
+__double__	{ retok(yylval.i=DOUBLE); }
+__register__	{ retok(yylval.i=REGISTER); }
+__volatile__	{ retok(yylval.i=VOLATILE); }
+__void__	{ retok(yylval.i=VOID); }
+__unsigned__	{ retok(yylval.i=UNSIGNED); }
+__signed__	{ retok(yylval.i=SIGNED); }
+__long__	{ retok(yylval.i=LONG); }
+__const__	{ retok(yylval.i=CONST); }
+__static__ 	{ retok(yylval.i=STATIC); }
+__extern__	{ retok(yylval.i=EXTERN); }
+
+char		{ retok(yylval.i=CHAR); }
+short		{ retok(yylval.i=SHORT); }
+int		{ retok(yylval.i=INT); }
+float		{ retok(yylval.i=FLOAT); }
+double		{ retok(yylval.i=DOUBLE); }
+register	{ retok(yylval.i=REGISTER); }
+volatile	{ retok(yylval.i=VOLATILE); }
+void		{ retok(yylval.i=VOID); }
+unsigned	{ retok(yylval.i=UNSIGNED); }
+signed		{ retok(yylval.i=SIGNED); }
+long		{ retok(yylval.i=LONG); }
+const		{ retok(yylval.i=CONST); }
+static 		{ retok(yylval.i=STATIC); }
+extern		{ retok(yylval.i=EXTERN); }
+
+string		{ retok(yylval.i=STRTYPE); }
+__inline	{ ; }
+switch		{ retok(SWITCH); }
+case		{ retok(CASE); }
+default		{ retok(DEFAULT); }
+enum		{ retok(yylval.i=ENUM); }
+union		{ retok(yylval.i=UNION);}
+struct 		{ retok(yylval.i=STRUCT); }
+typedef		{ retok(yylval.i=TDEF); }
+sizeof 		{ retok(SIZEOF); }
+print		{ retok(PRINT); }
+printo		{ retok(PRINTO); }
+printd		{ retok(PRINTD); }
+printx		{ retok(PRINTX); }
+take_array	{ retok(TAKE_ARR); }
+
+__var__              { BEGIN(var1); }
+<var1>{W}*{OP}{W}*   { BEGIN(var2); }
+<var2>{ABC}{ABCN}*   { BEGIN(var3); goto forcevar; }
+<var3>{W}*{CP}{W}*   { BEGIN(INITIAL); }
+
+
+{ABC}{ABCN}*	{ 
+				if((!needvar) && (yylval.t=sial_getctype(V_TYPEDEF, yytext, 1)))
+				{ 
+					/* hack to remember last tdef name */
+					if(lastvar) sial_free(lastvar);
+					lastvar=sial_alloc(strlen(yytext)+1);
+					strcpy(lastvar, yytext);
+					needvar++;
+					retok(TYPEDEF);
+				}
+forcevar:
+				needvar=0;
+				if(strlen(yytext) > MAX_SYMNAMELEN) {
+
+					sial_error("Symbol name too long");
+				}
+				if(!sial_chkmacvar(yytext)) {
+					yylval.n = sial_newvnode(yytext); 
+					retok(VAR);
+				}
+		}
+
+\"		{ yylval.n = sial_newstr(); retok(STRING); }
+\'.\'		{ yylval.n = sial_makenum(B_SC, yytext[1]); retok(NUMBER); }
+\'\\.\'		{ yylval.n = sial_makenum(B_SC, sial_getseq(yytext[2])); retok(NUMBER); }
+
+.		{ retok(yylval.i = yytext[0]); }
+
+%%
+#undef input
+#undef unput
+#define input()       sial_input()
+#define unput(c)      sial_unput(c)
--- crash/extensions/libsial/Makefile.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/extensions/libsial/Makefile	2007-10-30 11:13:53.000000000 -0400
@@ -0,0 +1,94 @@
+#
+# Copyright 2001 Silicon Graphics, Inc. All rights reserved.
+#
+#
+# Makefile for LIBSIAL
+#
+
+# Must be berkeley yacc.  Bison will not cut it
+YACC = bison
+
+LDIRT    = lex.sial.c lex.sialpp.c sial.tab.c sial.tab.h sialpp.tab.c \
+	sialpp.tab.h y.output mkbaseop baseops.c y.tab.c y.tab.h \
+	libsial.so* *.output
+
+LIBDIR	 = /usr/lib
+TARGETS  = libsial.a
+
+CFLAGS += -O3 -g -fPIC
+ifeq ($(TARGET), PPC64)
+	CFLAGS += -m64
+endif
+
+CFILES   = sial_util.c sial_node.c sial_var.c sial_func.c sial_str.c \
+	sial_op.c sial_num.c sial_stat.c sial_builtin.c sial_type.c \
+	sial_case.c sial_api.c sial_member.c sial_alloc.c sial_define.c \
+	sial_input.c sial_print.c
+
+OFILES   = $(CFILES:.c=.o) sialpp.tab.o sial.tab.o lex.sial.o lex.sialpp.o \
+	baseops.o
+
+HFILES   = sial.h sial_api.h
+
+LSOURCES = sial-lsed sialpp-lsed sial.l sialpp.l sial.y sialpp.y mkbaseop.c
+
+all: default
+
+showfiles:
+	@echo $(RELDIR)/$(CFILES) $(RELDIR)/$(HFILES) $(RELDIR)/$(LSOURCES)
+
+exports: all
+	install $(TARGETS) $(ROOT)$(LIBDIR)
+
+headers:
+	install -m 644 $(HFILES) $(ROOT)/usr/include
+
+install: headers exports
+	(cd scripts ; $(MAKE) install )
+
+baseops.o: mkbaseop.c
+	$(CC) $(CFLAGS) -o mkbaseop mkbaseop.c
+	./mkbaseop > baseops.c
+	$(CC) $(CFLAGS) -c baseops.c
+
+mkbaseop.c sial_member.o sial_op.o sial_stat.o sial_type.o y.tab.o : sial.tab.h
+
+lex.sial.o: lex.sial.c sial.tab.c sial.h
+	$(CC) $(CFLAGS) -c lex.sial.c
+
+lex.sial.c: sial.l
+	flex -L -Psial -t sial.l > lex.sial.c
+
+sial.tab.c: sial.tab.h
+
+sialpp.tab.o: sialpp.tab.c
+	$(CC) $(CFLAGS) -c sialpp.tab.c
+
+sial.tab.o: sial.tab.c
+	$(CC) $(CFLAGS) -c sial.tab.c
+
+sial.tab.h : sial.y
+	$(YACC) -psial -v -t -d sial.y
+
+lex.sialpp.o: lex.sialpp.c sialpp.tab.c sial.h
+	$(CC) $(CFLAGS) -c lex.sialpp.c
+
+lex.sialpp.c: sialpp.l
+	flex -Psialpp -t sialpp.l  > lex.sialpp.c
+
+sialpp.tab.c: sialpp.tab.h sial.tab.h
+
+sialpp.tab.h : sialpp.y sial.tab.h
+	$(YACC) -psialpp -v -t -d sialpp.y
+
+default: $(TARGETS)
+
+$(CFILES): $(HFILES) sial.tab.h
+
+$(TARGETS): $(OFILES)
+	$(AR) ccurl $(TARGETS) $(OFILES)
+
+clean: 
+	-/bin/rm -f *.o $(TARGETS) $(LDIRT)
+
+clobber: clean
--- crash/extensions/libsial/sialpp.y.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/extensions/libsial/sialpp.y	2007-09-24 16:07:06.000000000 -0400
@@ -0,0 +1,88 @@
+%{
+/*
+ * Copyright 2001 Silicon Graphics, Inc. All rights reserved.
+ */
+/*
+	This is the grammar for the preprocessor expression evaluation.
+*/
+#include "sial.h"
+#include "sial.tab.h"
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+static node_t *last_term;
+%}
+
+%union {
+	node_t	*n;
+	int	i;
+}
+
+%token	<n>	P_VAR P_NUMBER
+%token  <i>	P_DEFINED
+
+%type	<n>	term
+
+%right	<i>	'?'
+%left	<i>	P_BOR
+%left	<i>	P_BAND
+%left	<i>	P_OR
+%left	<i>	P_XOR
+%left	<i>	P_AND
+%left	<i>	P_EQ P_NE
+%left	<i>	P_GE P_GT P_LE P_LT
+%left	<i>	P_SHL P_SHR
+%left	<i>	P_ADD P_SUB
+%left	<i>	P_MUL P_DIV P_MOD
+%right	<i>	P_UMINUS P_FLIP P_NOT
+
+%%
+
+term:
+
+	  term '?' term ':' term %prec '?'
+	 				{ $$ = sial_newop(CEXPR, 3, $1, $3, $5); last_term = $$; }
+	| term P_BOR 	term		{ $$ = sial_newop(BOR, 2, $1, $3); last_term = $$; }
+	| term P_BAND	term		{ $$ = sial_newop(BAND, 2, $1, $3); last_term = $$; }
+	| P_NOT term			{ $$ = sial_newop(NOT, 1, $2); last_term = $$; }
+	| term P_EQ	term		{ $$ = sial_newop(EQ, 2, $1, $3); last_term = $$; }
+	| term P_GE	term		{ $$ = sial_newop(GE, 2, $1, $3); last_term = $$; }
+	| term P_GT	term		{ $$ = sial_newop(GT, 2, $1, $3); last_term = $$; }
+	| term P_LE	term		{ $$ = sial_newop(LE, 2, $1, $3); last_term = $$; }
+	| term P_LT	term		{ $$ = sial_newop(LT, 2, $1, $3); last_term = $$; }
+	| term P_NE	term		{ $$ = sial_newop(NE, 2, $1, $3); last_term = $$; }
+	| '(' term ')'			{ $$ = $2; last_term == $$; }
+	| term P_OR	term		{ $$ = sial_newop(OR, 2, $1, $3); last_term = $$; }
+	| term P_XOR	term		{ $$ = sial_newop(XOR, 2, $1, $3); last_term = $$; }
+	| term P_SHR	term		{ $$ = sial_newop(SHR, 2, $1, $3); last_term = $$; }
+	| term P_SHL	term		{ $$ = sial_newop(SHL, 2, $1, $3); last_term = $$; }
+	| term P_DIV	term		{ $$ = sial_newop(DIV, 2, $1, $3); last_term = $$; }
+	| term P_MOD	term		{ $$ = sial_newop(MOD, 2, $1, $3); last_term = $$; }
+	| term P_SUB	term		{ $$ = sial_newop(SUB, 2, $1, $3); last_term = $$; }
+	| term P_ADD	term		{ $$ = sial_newop(ADD, 2, $1, $3); last_term = $$; }
+	| term P_MUL	term		{ $$ = sial_newop(MUL, 2, $1, $3); last_term = $$; }
+	| term '&' term	%prec P_AND	{ $$ = sial_newop(AND, 2, $1, $3); last_term = $$; }
+	| P_SUB term %prec P_UMINUS	{ $$ = sial_newop(UMINUS, 1, $2); last_term = $$; }
+	| '~' term %prec P_FLIP		{ $$ = sial_newop(FLIP, 1, $2); last_term = $$; }
+	| '+' term %prec P_UMINUS	{ $$ = $2; last_term = $$; }
+	| P_DEFINED '(' {nomacs++;} P_VAR ')'		
+					{ nomacs=0; $$ = sial_macexists($4); last_term = $$; }
+	| P_NUMBER                      { last_term = $$; }
+	| P_VAR				{ $$ = sial_makenum(B_UL, 0); last_term = $$; }
+	;
+
+%%
+
+node_t *
+sial_getppnode()
+{
+	return last_term;
+}
+
+int
+sialpperror(char *s)
+{
+	sial_error(s);
+	return 1;
+}
+
--- crash/extensions/libsial/sial_util.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/extensions/libsial/sial_util.c	2007-10-30 11:13:53.000000000 -0400
@@ -0,0 +1,922 @@
+/*
+ * Copyright 2001 Silicon Graphics, Inc. All rights reserved.
+ */
+#include "sial.h"
+#include <string.h>
+#include <unistd.h>
+#include <curses.h>
+#include <term.h>
+#include <termio.h>
+#include <ctype.h>
+#include <stdarg.h>
+#include <malloc.h>
+#include <limits.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <stdlib.h>
+#include <regex.h>
+#include <ctype.h>
+
+static FILE *ofile=0;
+static int cols=25;
+static char *bold_on, *bold_off;
+
+
+static void
+sial_getwinsize(void)
+{
+struct winsize w;
+
+	if (ioctl (fileno(ofile), TIOCGWINSZ, &w) == 0)
+	{
+		cols=w.ws_col;
+	}
+	else /* use ENV */
+	{
+	char *ewidth;
+		if ((ewidth = getenv ("COLUMNS")))
+		cols = atoi (ewidth);
+		/* use what's in terminfo */
+		if (cols <= 0)
+		cols = tigetnum ("co");
+	}
+	if(cols <= 10) cols=10;
+	if(cols > 80) cols=80;
+}
+
+void
+sial_setofile(void * f)
+{
+int out;
+int ret;
+char *term;
+
+	ofile=(FILE *)f;
+
+	bold_on="";
+	bold_off="";
+	cols=80;
+
+	out=fileno(ofile);
+        if(isatty(out))
+        {
+
+        	if(!(term = getenv ("TERM"))) term="dumb";
+        	if(setupterm(term, out, &ret)!=ERR)
+        	{
+                	bold_on=tigetstr("bold");
+			if(!bold_on) bold_on="";
+                	bold_off=tigetstr("sgr0");
+			if(!bold_off) bold_off="";
+        	}
+		sial_getwinsize();
+        }
+}
+
+void *
+sial_getofile(void)
+{
+	return ofile;
+}
+
+/*
+	Output a line of text to the screen with line wrap
+	and escape sequence.
+*/
+#define ESC '<'
+#define ESC2 '>'
+
+static int
+sial_tabs(int tabs, char *t, int lf)
+{
+int i;
+
+	if(lf) fprintf(ofile, "\n");
+	for(i=0;i <tabs; i++) fprintf(ofile, t);
+	return tabs*4;
+}
+
+void
+sial_format(int tabs, char *str)
+{
+char *t="    ";
+char *p;
+int n;
+int mode=0;
+
+	n=sial_tabs(tabs, t, 0);
+	sial_getwinsize();
+		
+	for(p=str; *p; p++) {
+
+
+		/* check for escape */
+		if(!mode && *p == ESC && *(p+1) && *(p+1) == ESC) {
+
+			fprintf(ofile, "%s", bold_on);
+			p++;
+			mode=1;
+
+		} else if(mode && *p == ESC2 && *(p+1) && *(p+1) == ESC2) {
+
+			fprintf(ofile, "%s", bold_off);
+			p++;
+			mode=0;
+
+		} else if(*p==' ' || *p=='\t' ) {
+
+			char *p2;
+			int wl;
+
+			for(p2=p+1; *p2 && *p2 != ' ' && *p2 != '\t'; p2++);
+
+			wl=p2-p-1;
+
+			if(wl > cols) {
+
+				char *p3=p+(cols-n-1);
+
+				char c=*p3;
+				char c2=*(p3+1);
+
+				*p3='-';
+				*(p3+1)='\0';
+
+				fprintf(ofile, "%s", p);
+				*p3=c;
+				*(p3+1)=c2;
+				n=sial_tabs(tabs, t, 0);
+
+			} else if(n + (p2-p) >= cols) {
+
+				n=sial_tabs(tabs, t, 1);
+
+			} else {
+
+				fprintf(ofile, " ");
+				n++;
+			}
+
+		} else if(*p=='\n') {
+
+			n=sial_tabs(tabs, t, 1);
+			
+		} else {
+
+			fprintf(ofile, "%c", *p);
+			n++;
+		}
+	}
+
+}
+
+void
+sial_msg(char *fmt, ...)
+{
+va_list ap;
+	va_start(ap, fmt);
+	vfprintf(ofile, fmt, ap);
+	va_end(ap);
+}
+
+void
+sial_freenode(node_t *n)
+{
+	n->free(n->data);
+	sial_free(n);
+}
+
+int lineno=1, lastline=1;
+int col=1;
+static char *filename=0;
+static char *lastfile=0;
+
+void
+sial_setlastfile(char *fname, int line)
+{
+	if(!fname) return;
+	if(lastfile) sial_free(lastfile);
+	lastfile=sial_strdup(fname);
+	lastline=line;
+}
+
+void
+sial_rstpos(void)
+{
+	lineno=1;
+	col=1;
+	/* do not free filename */
+	filename=0;
+}
+
+void
+sial_setpos(srcpos_t *p)
+{
+	p->line=lineno;
+	p->col=col;
+	p->file=filename;
+}
+
+/* set the current position */
+void
+sial_curpos(srcpos_t *p, srcpos_t *s)
+{
+	if(s) {
+		s->line=lineno;
+		s->col=col;
+		s->file=filename;
+	}
+	lineno=p->line;
+	col=p->col;
+	filename=p->file;
+}
+
+int
+sial_line(int inc){ return lineno+=inc; }
+
+int
+sial_col(int inc) { return col+=inc; }
+
+char *
+sial_filename(void) { return filename; }
+
+/*
+	This function scans a printf() fmt string and transaletes the %p
+	to %08x or %016x depending on the pointer size of the object image.
+	We also substiture %> for 8 spaces if the pointer size is 4 bytes, this
+	permits easy allignment of output on either 32 or 64 bit images.
+
+	ex:
+
+	Proc	%> pid ppid
+	%p    %3d %3d
+
+	In this case the %> alligns the pid with it's corresponding value_t
+	in the next line of output.
+
+	We also process the '?' format which will be set to match the 
+	corresponding value_t type.
+
+	Also, format versus argument type validation is performed.
+
+*/
+
+/*
+	Printf formats have the form : 
+	%3$-*3$.*4$lld
+	%20x
+	%08x
+	%-08.8f
+*/
+/* these are the buildin blocks for a regex matching formats */
+#define F_POSP	"([0-9]+\\$)*"
+#define F_FLGS	"([-'+ #0]*)"
+#define F_WARG	"(\\*([0-9]+\\$)*){0,1}"
+#define F_WIDTH	"([0-9]*)"
+#define F_PREC	"((\\.(\\*([0-9]+\\$)*)*([0-9]*))*)"
+#define F_SIZE	"([hlL]*)"
+#define F_FMT	"([diouxXfeEgGcCsSpn?>]{1})"
+#define FMTREG F_POSP""F_FLGS""F_WARG""F_WIDTH""F_PREC""F_SIZE""F_FMT
+#define M_POSP          1
+#define M_FLAGS         2
+#define M_WIDTHARG      3
+#define M_WIDTDIGITS    4
+#define M_WIDTH         5
+#define M_PRECARG       8
+#define M_PRECDIGITS    9
+#define M_PREC          10
+#define M_SIZE          11
+#define M_FMT           12
+#define NMATCH          16
+static int addit[]={M_FLAGS,M_WIDTHARG,M_WIDTH,M_PRECARG,M_PREC,M_SIZE};
+
+#define ptrto(idx) (matches[idx].rm_so==matches[idx].rm_eo?0:(pi+matches[idx].rm_so))
+#define matchlen(idx) (matches[(idx)].rm_eo-matches[(idx)].rm_so)
+
+void sial_error(char *fmt, ...);
+
+static int
+chkforint(char *p, value_t **vals, int *curarg)
+{
+int pos=-1;
+
+	if(!p) return -1;
+
+	/* a single star ? */
+	if(isdigit(p[1])) {
+
+		if(sscanf(p+1, "%d", &pos)!=1) {
+
+			return pos;
+		}
+		pos--;
+
+	} else {
+
+		pos=*curarg;
+		*curarg=(*curarg)+1;
+
+	}
+
+	if(pos < BT_MAXARGS && vals[pos] && vals[pos]->type.type == V_BASE) return pos;
+	sial_error("Expected 'integer' type for arg%d", pos+1);
+	return -1;
+}
+
+#define pushval(val, s, sig) 	(										\
+					sig ?									\
+					(									\
+						(s==8) ? 							\
+							(val)->v.sll 						\
+						: (								\
+							(s==4)  ?						\
+								(val)->v.sl					\
+							: (							\
+								(s==2) ?					\
+									(val)->v.ss				\
+								:(						\
+									(s==1) ?				\
+										(val)->v.sc			\
+									:( 					\
+										sial_error("Oops pushval"),1	\
+									)					\
+								)						\
+							)							\
+						)								\
+					) : (									\
+						(s==8) ? 							\
+							(val)->v.ull 						\
+						: (								\
+							(s==4)  ?						\
+								(val)->v.ul					\
+							: (							\
+								(s==2) ?					\
+									(val)->v.us				\
+								:(						\
+									(s==1) ?				\
+										(val)->v.uc			\
+									:( 					\
+										sial_error("Oops pushval"),1	\
+									)					\
+								)						\
+							)							\
+						)								\
+					)									\
+			)
+						
+
+static char *
+add_fmt(int len, char *s, char *onefmt, int ppos, int wpos, int posarg, value_t **vals)
+{
+int size=(vals[posarg]->type.type == V_REF ? sial_defbsize(): vals[posarg]->type.size);
+int sign=(vals[posarg]->type.type == V_REF ? 0 : sial_issigned(vals[posarg]->type.typattr));
+
+	if(vals[posarg]->type.type == V_STRING) {
+
+		if(wpos>=0 && ppos<0) 
+			s+=snprintf(s, len, onefmt
+				, (int)sial_getval(vals[wpos])
+				, vals[posarg]->v.data);
+		else if(wpos<0 && ppos>=0) 
+			s+=snprintf(s, len, onefmt
+				, (int)sial_getval(vals[ppos])
+				, vals[posarg]->v.data);
+		else if(wpos>=0 && ppos>=0) 
+			s+=snprintf(s, len, onefmt
+				, (int)sial_getval(vals[wpos])
+				, (int)sial_getval(vals[ppos])
+				, vals[posarg]->v.data);
+		else s+=snprintf(s, len, onefmt
+				, vals[posarg]->v.data);
+
+	} else {
+#if defined(__s390x__) || defined(__s390__)
+		if(wpos>=0 && ppos<0) 
+			s+=snprintf(s, len, onefmt
+				, (int)sial_getval(vals[wpos])
+				, (unsigned long)pushval(vals[posarg], size, sign));
+		else if(wpos<0 && ppos>=0) 
+			s+=snprintf(s, len, onefmt
+				, (int)sial_getval(vals[ppos])
+				, (unsigned long)pushval(vals[posarg], size, sign));
+		else if(wpos>=0 && ppos>=0) 
+			s+=snprintf(s, len, onefmt
+				, (int)sial_getval(vals[wpos])
+				, (int)sial_getval(vals[ppos])
+				, (unsigned long) pushval(vals[posarg], size, sign));
+		else s+=snprintf(s, len, onefmt
+				, (unsigned long) pushval(vals[posarg], size, sign));
+#else
+		if(wpos>=0 && ppos<0) 
+			s+=snprintf(s, len, onefmt
+				, (int)sial_getval(vals[wpos])
+				, pushval(vals[posarg], size, sign));
+		else if(wpos<0 && ppos>=0) 
+			s+=snprintf(s, len, onefmt
+				, (int)sial_getval(vals[ppos])
+				, pushval(vals[posarg], size, sign));
+		else if(wpos>=0 && ppos>=0) 
+			s+=snprintf(s, len, onefmt
+				, (int)sial_getval(vals[wpos])
+				, (int)sial_getval(vals[ppos])
+				, pushval(vals[posarg], size, sign));
+		else s+=snprintf(s, len, onefmt
+				, pushval(vals[posarg], size, sign));
+#endif
+	}
+	return s;
+}
+
+static char *
+sial_ptr(char *fmt, value_t **vals)
+{
+    /* We need to ensure that we dont overflow our string buffer. Although its unlikely we will overflow it with
+       just numbers, strings will easliy overflow. So, lets check for strings and see how long they are.
+     */  
+int len=0;
+char *nfmt=NULL,*ni=NULL;
+char *onefmt=NULL, *onei=NULL;
+char *p=fmt;
+char last=' ';
+int curarg=0;
+#define NBYTES (len-(nfmt-ni))
+
+int i = 0;
+
+	while(vals[i] != NULL) {
+	    if(vals[i]->type.type == V_STRING)
+		len+=vals[i]->type.size;
+	    i++;
+	}
+	/* We add a fudge factor of 100, which should cover all the number arguments */
+	len+=strlen(fmt) + 100;
+	nfmt=sial_alloc(len);
+	ni=nfmt;
+	onefmt=sial_alloc(len);
+	onei=onefmt;
+
+
+
+	while(*p) {
+
+		if(*p=='%') {
+
+			static regex_t preg;
+			static int done=0;
+			regmatch_t matches[NMATCH];
+
+			if(!done) {
+
+				regcomp(&preg, FMTREG, REG_EXTENDED);
+				done=1;
+			}
+
+			/* build a new format translation */
+			onefmt=onei;
+			*onefmt++=*p++;
+
+			/* if the returned pointer is (char*)-1 or NULL then something is wrong */
+			if(!regexec(&preg, p, NMATCH, matches, 0)) {
+
+				int i, n=matches[0].rm_eo-1;
+				int posarg, wpos, ppos;
+				char *pi=p; /* save p for ptrto() macro */
+
+				/* check that the width and precision field args point
+				   to a int value_t. If they were used */
+				wpos=chkforint(ptrto(M_WIDTHARG), vals, &curarg);
+				ppos=chkforint(ptrto(M_PRECARG), vals, &curarg);
+
+				/* argument position was specfified ? */
+				if(ptrto(M_POSP)) {
+
+					/* we work from 0-n, printf works from 1-n */
+					if(sscanf(ptrto(M_POSP), "%d", &posarg)==1) posarg--;
+
+					if(posarg >= BT_MAXARGS || !vals[posarg]) {
+						sial_error("Invalid arg position specified [%d]", posarg+1);
+					}
+
+				} else posarg=curarg++;
+
+				/* jump over the format spec in the original */
+				p+=n;
+#if 0
+for(i=0;i<NMATCH;i++) {
+	char buf[40];
+
+	if(ptrto(i)) {
+		int n=matchlen(i);
+		strncpy(buf, pi+matches[i].rm_so, n);
+		buf[n]='\0';
+		printf("match[%d]=[%s]\n", i, buf);
+	}
+}
+#endif
+
+				/* copy all format specs to destination except fmt */
+				for(i=0;i<sizeof(addit)/sizeof(addit[0]);i++) {
+
+					switch(addit[i]) {
+
+						case M_WIDTHARG:
+
+							if(wpos >=0 ){
+
+								*onefmt++='*';
+
+							} else goto def;
+
+						break;
+						case M_PRECARG:
+
+							if(ppos >=0 ){
+
+								*onefmt++='.';
+								*onefmt++='*';
+
+							} else goto def;
+
+						break;
+						case M_PREC:
+							if(ptrto(addit[i])) *onefmt++='.';
+							goto def;
+						default:
+def:
+						if(ptrto(addit[i])) {
+							strcpy(onefmt, ptrto(addit[i]));
+							onefmt+=matchlen(addit[i]);
+						}
+					}
+				}
+
+				if(*p=='p') {
+
+ref:
+					/* if user overrides anything don't do nothing */
+					if(ptrto(M_FLAGS)||ptrto(M_WIDTH)||ptrto(M_WIDTHARG)||ptrto(M_PREC)||ptrto(M_PRECARG)||ptrto(M_SIZE)) {
+						*onefmt++='p';
+
+					} else {
+						if(sial_defbsize()==8) {
+
+							strcpy(onefmt, "016llx");
+							onefmt+=6;
+
+						} else {
+
+							strcpy(onefmt, "08x");
+							onefmt+=3;
+						}
+					}
+					*onefmt='\0';
+					p++;
+					nfmt=add_fmt(NBYTES, nfmt, onei, ppos, wpos, posarg, vals);
+
+				} else if(*p=='>') { 
+
+					nfmt--;
+					if(sial_defbsize()==8) {
+
+						int i;
+
+						for(i=0;i<8;i++) *nfmt++=last;
+					}
+					p++;
+                                        curarg--;
+
+				} else if(*p=='?') {
+
+					/* put the proper format for the user */
+					if(!vals[posarg]) {
+
+						sial_error("Expected additional argument %d\n", posarg+1);
+
+					} else switch(vals[posarg]->type.type) {
+
+						case V_BASE: case V_ENUM:
+						{
+							if(!ptrto(M_SIZE)) {
+
+								if(vals[posarg]->type.size==8) {
+
+									*onefmt++='l';
+									*onefmt++='l';
+								}
+							}
+							if(sial_issigned(vals[posarg]->type.typattr)) {
+
+								*onefmt++='d';
+
+							}else{
+
+								*onefmt++='u';
+							}
+						}
+						break;
+						case V_REF:
+						{
+							*p='p';
+							goto ref;
+						}
+						case V_STRING:
+						{
+							*onefmt++='s';
+						}
+						break;
+					}
+					p++;
+					*onefmt='\0';
+					nfmt=add_fmt(NBYTES, nfmt, onei, ppos, wpos, posarg, vals);
+
+				} else {
+
+					/* check that format and value_t agree */
+					/* can't do a lot more then check for strings vs anything_else */
+
+					if(!vals[posarg]) {
+
+						sial_error("Expected additional argument %d\n", posarg+1);
+
+
+					} else if(*p=='s') {
+
+						if(vals[posarg]->type.type != V_STRING) {
+
+							sial_error("Expected type 'string' as arg%d", posarg+1);
+						}
+
+					} else if(vals[posarg]->type.type == V_STRING) {
+
+						sial_error("Incompatible type 'string' in arg%d", posarg+1);
+
+					}
+					*onefmt++=*p++;
+					*onefmt='\0';
+					nfmt=add_fmt(NBYTES, nfmt, onei, ppos, wpos, posarg, vals);
+				}
+
+			} else {
+
+				sial_warning("Malformed format specifier!");
+
+			}
+
+		} else {
+	
+			last=*p;
+			if(nfmt-ni > len) sial_error("format tranlation overflow!");
+			*nfmt++=*p++;
+
+		}
+	}
+	sial_free(onei);
+	*nfmt='\0';
+	return ni;
+}
+
+value_t* sial_printf(value_t *vfmt, ...)
+{
+char *fmt = sial_getptr(vfmt, char);
+va_list ap;
+value_t *vals[BT_MAXARGS];
+int i;
+	
+	va_start(ap, vfmt);
+	for(i=0;i<BT_MAXARGS-2;i++){	
+		vals[i]=va_arg(ap,value_t*);
+	}								
+	va_end(ap);
+	fmt=sial_ptr(fmt, vals);
+	fprintf(ofile, "%s", fmt);
+	sial_free(fmt);
+	return sial_makebtype(1);
+}
+
+#define MAX_SPRINTF 2000
+value_t* sial_sprintf(value_t *vfmt, ...)
+{
+char *fmt=sial_getptr(vfmt, char);
+int i;
+va_list ap;
+value_t *vals[BT_MAXARGS];
+value_t *v;
+
+	va_start(ap, vfmt);
+	for(i=0;i<BT_MAXARGS-1;i++){
+		vals[i]=va_arg(ap,value_t*);
+	}
+	va_end(ap);
+	fmt=sial_ptr(fmt, vals);
+	v=sial_setstrval(sial_newval(), fmt);
+	sial_free(fmt);
+	return v;
+}
+
+
+/*
+	When there is a parse error in a file.
+*/
+void
+sial_error(char *fmt, ...)
+{
+va_list ap;
+
+	sial_setlastfile(filename, sial_line(0));
+	va_start(ap, fmt);
+	fprintf(ofile, "File %s, line %d, Error: ", filename, sial_line(0));
+	vfprintf(ofile, fmt, ap);
+	fprintf(ofile, "\n");
+	va_end(ap);
+	sial_exit(1);
+}
+
+/******************************************************************
+   Debug messaging support.
+******************************************************************/
+static unsigned int dbglvl=0, clist=DBG_ALL;
+static char *dbg_name=0;
+unsigned int sial_getdbg(void)
+{
+    return(dbglvl);
+}
+
+void sial_setdbg(unsigned int lvl)
+{
+    if(lvl > 9)
+        sial_msg("Invalid debug level value.\n");
+    else
+        dbglvl=lvl;
+}
+char *sial_getname(void)
+{
+    return dbg_name;
+}
+
+void sial_setname(char *name)
+{
+    if(dbg_name) sial_free(dbg_name);
+    dbg_name=sial_strdup(name);
+}
+
+#define MAXCLASSES 10
+static struct {
+    char *name;
+    int class;
+} classes [MAXCLASSES] = {
+    { "type", DBG_TYPE },
+    { "struct", DBG_STRUCT },
+    { 0 },
+};
+
+char **sial_getclass(void)
+{
+int i,j;
+static char *ptrs[MAXCLASSES+1];
+
+    for(i=j=0;classes[i].name;i++) {
+        if(clist&classes[i].class) ptrs[j++]=classes[i].name;
+    }
+    ptrs[i]=0;
+    return ptrs;
+}
+
+void sial_setclass(char *cl)
+{
+int i,j;
+    
+    for(i=0;classes[i].name;i++) {
+        if(!strcmp(classes[i].name,cl)) {
+            clist |= classes[i].class;
+            return;
+        }
+    }
+    sial_msg("Invalid class '%s' specified.\n", cl);
+}
+
+static void
+sial_dbg_all(int class, char *name, int lvl, char *fmt, va_list ap)
+{
+    if(lvl<=dbglvl && (clist & class) && (!dbg_name || !strcmp(name, dbg_name))) {
+        fprintf(ofile, "dbg(%d) : ", lvl);
+        vfprintf(ofile, fmt, ap);
+    }
+}
+
+void
+sial_dbg(int class, int lvl, char *fmt, ...)
+{
+va_list ap;
+    va_start(ap, fmt);
+    sial_dbg_all(class, 0, lvl, fmt, ap);
+    va_end(ap);
+}
+
+void
+sial_dbg_named(int class, char *name, int lvl, char *fmt, ...)
+{
+va_list ap;
+    va_start(ap, fmt);
+    sial_dbg_all(class, name, lvl, fmt, ap);
+    va_end(ap);
+}
+/******************************************************************/
+
+void
+sial_rerror(srcpos_t *p, char *fmt, ...)
+{
+va_list ap;
+
+	sial_setlastfile(p->file, p->line);
+	va_start(ap, fmt);
+	fprintf(ofile, "%s : line %d : Error: ", p->file, p->line);
+	vfprintf(ofile, fmt, ap);
+	fprintf(ofile, "\n");
+	va_end(ap);
+	sial_exit(1);
+}
+
+void
+sial_warning(char *fmt, ...)
+{
+va_list ap;
+
+	sial_setlastfile(filename, sial_line(0));
+	va_start(ap, fmt);
+	fprintf(ofile, "%s : line %d : Warning: ", filename, lineno);
+	vfprintf(ofile, fmt, ap);
+	fprintf(ofile, "\n");
+	va_end(ap);
+}
+
+void
+sial_rwarning(srcpos_t *p, char *fmt, ...)
+{
+va_list ap;
+
+	sial_setlastfile(p->file, p->line);
+	va_start(ap, fmt);
+	fprintf(ofile, "%s : line %d : Warning: ", p->file, p->line);
+	vfprintf(ofile, fmt, ap);
+	fprintf(ofile, "\n");
+	va_end(ap);
+}
+
+void
+sial_vilast()
+{
+	if(lastfile) {
+
+		sial_exevi(lastfile, lastline);
+
+	} else {
+
+		sial_msg("No last error record available");
+	}
+}
+
+void
+sial_getcomment(void)
+{
+	while(1) {
+	
+	unsigned char c;
+
+		while((c=sial_input())!='*' && c!=255) 
+
+		if(c==255) goto bad;
+
+		if((c=sial_input())=='/') return;
+		else if(c==255) {
+bad:
+			sial_error("Unterminated comment!");
+		}
+	}
+}
+
+/* on assignment this function is called to set the new value */
+void
+sial_setfct(value_t *v1, value_t *v2)
+{
+	/* duplicate type and data, safeguarding array info */
+	sial_dupval(v1, v2);
+
+	/* value_t v1 is still setable */
+	v1->set=1;
+	v1->setval=v1;
+}
+
+node_t *
+sial_sibling(node_t *n, node_t *m)
+{
+node_t *p;
+
+	if(m) {
+
+		for(p=n;p->next;p=p->next);
+		p->next=m;
+		m->next=0;
+	}
+	return n;
+}
+
--- crash/extensions/libsial/sialpp-lsed.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/extensions/libsial/sialpp-lsed	2007-09-24 16:07:06.000000000 -0400
@@ -0,0 +1,32 @@
+s/yyback/sialppback/g
+s/yybgin/sialppbgin/g
+s/yycrank/sialppcrank/g
+s/yyerror/sialpperror/g
+s/yyestate/sialppestate/g
+s/yyextra/sialppextra/g
+s/yyfnd/sialppfnd/g
+s/yyin/sialppin/g
+s/yyinput/sialppinput/g
+s/yyleng/sialppleng/g
+s/yylex/sialpplex/g
+s/yylineno/sialpplineno/g
+s/yylook/sialpplook/g
+s/yylsp/sialpplsp/g
+s/yylstate/sialpplstate/g
+s/yylval/sialpplval/g
+s/yymatch/sialppmatch/g
+s/yymorfg/sialppmorfg/g
+s/yyolsp/sialppolsp/g
+s/yyout/sialppout/g
+s/yyoutput/sialppoutput/g
+s/yyprevious/sialppprevious/g
+s/yysbuf/sialppsbuf/g
+s/yysptr/sialppsptr/g
+s/yysvec/sialppsvec/g
+s/yytchar/sialpptchar/g
+s/yytext/sialpptext/g
+s/yytop/sialpptop/g
+s/yyunput/sialppunput/g
+s/yyvstop/sialppvstop/g
+s/yywrap/sialppwrap/g
+s/yydebug/sialdebug/g
--- crash/extensions/libsial/sial_api.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/extensions/libsial/sial_api.c	2007-10-30 11:13:53.000000000 -0400
@@ -0,0 +1,1516 @@
+/*
+ * Copyright 2001 Silicon Graphics, Inc. All rights reserved.
+ */
+#include "sial.h"
+#include "sial.tab.h"
+#include <fcntl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <dirent.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <setjmp.h>
+#include <pwd.h>
+#include <string.h>
+
+/* here we do some caching of the information. This can have a speedup effect
+   since it limits the number of accesses we do the dwarf (or whatever type) db that
+   drives the type and symbols information 
+ */
+
+static stinfo_t slist={"root"};
+
+/* running key to new structures */
+static ull nextidx=0, abitype=ABI_MIPS;
+#define LOCALTYPESBASE 0x8000000000000000ll
+static ull sial_nextidx(void) { return LOCALTYPESBASE+nextidx++; }
+
+/* this set of function is used to cleanup tdefs after their use.
+   Trailing tdefs can be combersome. Trailing struct/union/enum get new idx
+   each time and are not a trouble */
+static stinfo_t*tag=0;
+void
+sial_tagst(void)
+{
+	tag=slist.next;
+}
+
+void
+sial_flushtdefs(void)
+{
+stinfo_t*st=slist.next;
+stinfo_t*last=&slist;
+
+	while(st != tag) {
+
+		stinfo_t*next=st->next;
+
+		if(st->ctype.type==V_TYPEDEF && st->idx & LOCALTYPESBASE) {
+
+			sial_free(st->name);
+			sial_free(st);
+			last->next=next;
+
+		} else last=st;
+
+		st=next;
+
+	}
+	tag=0;
+}
+
+static stinfo_t*
+sial_getst(char *name, int type)
+{
+stinfo_t*tst;
+
+	for(tst=slist.next; tst; tst=tst->next) {
+
+		if(tst->ctype.type == type && tst->name && ! strcmp(tst->name, name)) {
+
+			return tst;	
+		}
+	}
+	return 0;
+}
+
+#if 0
+Not used yet.
+static void
+sial_rmst(stinfo_t*rst)
+{
+stinfo_t*st=slist.next;
+stinfo_t*last=&slist;
+
+	while(st) {
+
+		if(st==rst) {
+
+			last->next=st->next;
+			sial_free(st->name);
+			sial_free(st);
+
+			return;
+
+		} 
+
+		last=st;
+		st=st->next;
+	}
+}
+#endif
+
+stinfo_t*
+sial_getstbyindex(ull idx, int type)
+{
+stinfo_t*tst;
+
+	for(tst=slist.next; tst; tst=tst->next) {
+
+		if(tst->ctype.type == type && tst->idx == idx) {
+
+			return tst;	
+		}
+	}
+	return 0;
+}
+
+static void
+sial_addst(stinfo_t*st)
+{
+stinfo_t*tst;
+
+	tst=slist.next;
+	slist.next=st;
+	st->next=tst;
+}
+
+typedef struct neg_s {
+    struct neg_s *next;
+    char *name;
+} neg_t;
+
+static neg_t *nlist=0;
+
+void
+sial_addneg(char *name)
+{
+neg_t *neg;
+
+    neg=sial_alloc(sizeof *neg);
+    neg->name=sial_strdup(name);
+    neg->next=nlist;
+    nlist=neg;
+}
+
+int 
+sial_isneg(char *name)
+{
+neg_t *neg;
+
+    for(neg=nlist; neg; neg=neg->next) 
+        if(!strcmp(neg->name, name)) return 1;
+    return 0;
+}
+
+/*
+	This function is called by sial_vardecl() when the typedef storage class
+	as been specified. In which case we need to create new typedefs not variables.
+*/
+void
+sial_tdef_decl(dvar_t*dv, type_t*t)
+{
+	while(dv) {
+
+		dvar_t*next;
+
+		stinfo_t*st=sial_calloc(sizeof(stinfo_t));
+
+		if(dv->nbits) sial_error("No bits fields for typedefs");
+		if(dv->idx) {
+
+			/* we change a 'typedef type var[n];' into a 'typedef type_t*var;' */
+			sial_freeidx(dv->idx);
+			dv->idx=0;
+			dv->ref++;
+		}
+#if 0
+At this time we do not give any error messages or warnings.
+If a type is redefined within a single file that will means
+problem for the user put this is not a full blown C compiler.
+
+		{
+		type_t*t=sial_newtype();
+
+			if(API_GETCTYPE(V_TYPEDEF, dv->name, t)) {
+
+				sial_warning("Typedef %s already defined in image, redefinition ignored",
+					dv->name);
+			}
+			sial_freetype(t);
+		}
+#endif
+		t->typattr &= ~sial_istdef(t->typattr);
+		sial_duptype(&st->rtype, t);
+		sial_pushref(&st->rtype, dv->ref);
+		st->name=dv->name;
+		dv->name=0;
+		st->idx=sial_nextidx();
+		st->ctype.type=V_TYPEDEF;
+
+		sial_addst(st);
+		
+		next=dv->next;
+		dv->next=0;
+		sial_freedvar(dv);
+		dv=next;
+	}
+}
+
+int
+sial_ispartial(type_t*t)
+{
+stinfo_t*st=sial_getstbyindex(t->idx, t->type);
+
+	if(!st) {
+
+		sial_error("Oops sial_ispartial");
+	}
+	return !st->all;
+}
+
+char *
+sial_gettdefname(ull idx)
+{
+stinfo_t*tst=sial_getstbyindex(idx, V_TYPEDEF);
+
+	if(tst) return tst->name;
+	else return 0;
+}
+
+static int init=0;
+static void
+sial_chkinit(void)
+{
+	if(!init) {
+
+		sial_error("Sial Package not initialized");
+
+	}
+}
+
+void
+sial_getmem(ull kp, void *p, int n)
+{
+	sial_chkinit();
+	if(!API_GETMEM(kp, p, n)) {
+
+		sial_error("Error on read from 0x%llx for %d", kp, n);
+
+	}
+}
+
+void
+sial_putmem(ull kp, char *p, int n)
+{
+	sial_chkinit();
+	if(!API_PUTMEM(kp, p,n)) {
+
+		sial_error("Error on write at 0x%llx for %d", kp, n);
+
+	}
+}
+
+void
+sial_partialctype(int type, char *name)
+{
+stinfo_t*st;
+
+	/* check first if we have a partial of that type
+	   already in progress (after a forward declaration) */
+	if((st=sial_getst(name, type))) {
+
+		/* if it's complete we need to start a new one */
+		if(!st->all) return;
+
+	}
+	st=sial_calloc(sizeof(stinfo_t));
+	st->name=sial_strdup(name);
+	st->ctype.type=type;
+	st->all=0;
+	st->ctype.idx=st->idx=sial_nextidx();
+	sial_addst(st);
+}
+
+void
+sial_startctype_named(int type, char *name)
+{
+stinfo_t*st;
+
+	/* if no partial yet start one */
+	if(!(st=sial_getst(name, type)) || st->all)
+		sial_partialctype(type, name);
+}
+
+void
+sial_startctype(int type, node_t*namen)
+{
+	sial_startctype_named(type, NODE_NAME(namen));
+}
+
+int
+sial_samectypename(int type, ull idx1, ull idx2)
+{
+stinfo_t*st1, *st2;
+
+	if((st1=sial_getstbyindex(idx1, type)) &&
+	   (st2=sial_getstbyindex(idx2, type))) {
+
+		// check names
+                if(!strcmp(st1->name, st2->name)) return 1;
+                
+                // check all members and sizes in order
+                // unamed ctypes can end up here too...
+                if(st1->stm) {
+                    stmember_t *m1=st1->stm, *m2=st2->stm;
+                    while(m1 && m2) {
+                        if(strcmp(m1->m.name, m2->m.name)) break;
+                        if(m1->m.offset != m2->m.offset ) break;
+                        if(m1->m.size != m2->m.size ) break;
+                        m1=m1->next;
+                        m2=m2->next;
+                    }
+                    if(!m1 && !m2) return 1;
+                }
+                else if(st1->enums) {
+                
+                    enum_t *e1=st1->enums, *e2=st2->enums;
+                    while(e1 && e2) {
+                        if(strcmp(e1->name, e2->name)) break;
+                        if(e1->value != e2->value ) break;
+                        e1=e1->next;
+                        e2=e2->next;
+                    }
+                    if(!e1 && !e2) return 1;
+                }
+
+	}
+	return 0;
+}
+
+#define VOIDIDX 0xbabebabell
+type_t*
+sial_getvoidstruct(int ctype)
+{
+type_t*bt=sial_newtype();
+
+	bt->type=ctype;
+	bt->idx=VOIDIDX;
+	bt->size=0;
+	bt->ref=0;
+	return bt;
+}
+
+void sial_fillst(stinfo_t *st);
+
+/* Just in case this is an unnamed structure member then we need
+   to add it to the slist ourselves using the index. sial_getctype() would 
+   not found it.
+*/
+static void
+sial_memstinfo(stmember_t *stm, char *pname)
+{
+int type=stm->type.ref?stm->type.rtype:stm->type.type;
+
+	if(is_ctype(type)) {
+
+		if(!sial_getstbyindex(stm->type.idx, type)) {
+
+			stinfo_t*st=sial_calloc(sizeof(stinfo_t));
+
+			sial_duptype(&st->ctype, &stm->type);
+			st->ctype.type=type;
+                        // dereference level is attached (wrongly) to type...
+                        // zap it
+                        st->ctype.ref=0;
+			st->idx=st->ctype.idx;
+			st->name=sial_strdup(pname);
+			sial_addst(st);
+		}
+	}
+}
+
+void
+sial_fillst(stinfo_t *st)
+{
+char *mname=0;
+ull idx=st->ctype.idx, lidx=0;
+stmember_t *stm=sial_calloc(sizeof(stmember_t)), **last=&st->stm;
+char *pname;
+
+        sial_dbg_named(DBG_STRUCT, st->name, 2, "Fill St started [local=%d].\n", (idx & LOCALTYPESBASE) ? 1 : 0);
+	/* bail out if this is local type */
+	if(idx & LOCALTYPESBASE) return;
+
+	if(st->stm) sial_error("Oops sial_fillst!");
+
+	while((pname=API_MEMBER(mname, idx,  &stm->type, &stm->m, &lidx))) {
+
+                sial_dbg_named(DBG_STRUCT, st->name, 2, "member '%s'\n", pname);
+		sial_memstinfo(stm, pname);
+		stm->next=0;
+		*last=stm;
+		last=&stm->next;
+		mname="";
+		stm=sial_calloc(sizeof(stmember_t));
+		if(pname[0]) sial_free(pname);
+	}
+	st->all=1;
+	sial_free(stm);
+}
+
+type_t*
+sial_getctype(int ctype, char *name, int silent)
+{
+stinfo_t *st;
+type_t *t=sial_newtype();
+
+	sial_chkinit();
+        sial_dbg_named(DBG_TYPE, name, 2, "getctype [%d] [%s] [s=%d]\n", ctype, name, silent);
+	if(!(st=sial_getst(name, ctype))) {
+
+                sial_dbg_named(DBG_TYPE, name, 2, "getctype [%s] not found in cache\n", name);
+                if(silent && sial_isneg(name)) return 0;
+
+		st=sial_calloc(sizeof(stinfo_t));
+		if(!API_GETCTYPE(ctype, name,  &st->ctype)) {
+
+                        sial_dbg_named(DBG_TYPE, name, 2, "[%s] not found in image\n", name);
+			sial_free(st);
+			sial_freetype(t);
+                        // add any tdef to the neg list
+                        if(ctype == V_TYPEDEF) sial_addneg(name);
+			if(silent) return 0;
+			/* we fill a partial structure for this one
+			   assuming it will be defined later. This is to permit cross
+			   referencing of structures, self referencing of structure, and 
+			   undefined structure (opaque structures) irix: see types.c : 
+			   __pasid_opaque  
+			*/
+                        sial_dbg_named(DBG_TYPE, name, 2, "[%s] creating partial type\n", name);
+			sial_partialctype(ctype, name);
+			return sial_getctype(ctype, name, silent);
+		}
+                sial_dbg_named(DBG_TYPE, name, 2, "getctype [%s] found in image\n", name);
+		st->name=sial_alloc(strlen(name)+1);
+		strcpy(st->name, name);
+		st->stm=0;
+		st->idx=st->ctype.idx;
+		st->all=1;
+		sial_addst(st);
+		/*
+		  if this is a typedef then drill down to the real type
+		  and make sure it is in the cache. That's what we return
+
+		  Bug cure: this would fail:
+
+			struct sv {
+        			int i;
+			};
+			struct foo {
+        			sv_t    ms_sv;
+			};
+
+			Because the rtype index returned by API_GETRTYPE() is the die offset
+			in the image. If We already have redefine the real type locally the
+			call to sial_getctype() will not have a matching index later when we
+			don't find the index in the type cache.
+
+			So we track the real index with ridx. This also ensures that 
+			redefining a struct locally and using a typetef from the image will actualy
+			end up pointing to the local struct and not the image struct.
+		*/
+		if(ctype == V_TYPEDEF) {
+
+			char *tname;
+			int itype;
+
+			tname=API_GETRTYPE(st->idx, t);
+
+			if(t->type==V_REF) itype=t->rtype;
+			else itype=t->type;
+
+			/* if it's a named struct, enum or union then make sure we have it in the cache */
+			if(is_ctype(itype) && tname && tname[0] && 
+				(strcmp(tname,"struct ") != 0 
+				    && strcmp(tname,"union ") != 0
+				    && strcmp(tname,"enum ") != 0)) {
+
+				sial_freetype(t);
+				t=sial_getctype(itype, tname, silent);
+
+				/* in IRIX we have a typedef struct __pasid_opaque* aspasid_t;
+				   w/ no struct __pasid_opaque defined. The aspasid_t ends
+				   up being used as a "named" void *. So we force a void * here */
+				/* XXX: This should at least generate a warning */
+				if(!t) {
+				    sial_warning("voidstruct created (%s)\n", tname);
+				    t=sial_getvoidstruct(itype);
+				}
+			} else if (is_ctype(itype) || itype == V_ENUM) {
+				
+				/* for unnamed structs, unions and enums create an entry */
+	                        stinfo_t*st=sial_calloc(sizeof(stinfo_t));
+
+                       		sial_duptype(&st->ctype, t);
+                        	st->idx=t->idx;
+                        	st->name=sial_strdup("");
+				sial_fillst(st);
+                        	sial_addst(st);
+			}
+			sial_duptype(&st->rtype, t);
+                        
+		} else if(is_ctype(ctype)) {
+
+			/* get all member info now ! */
+			sial_fillst(st);
+		}
+	}
+        else sial_dbg_named(DBG_TYPE, name, 2, "getctype [%s] found in cache\n", name);
+
+	if(ctype == V_ENUM || (ctype == V_TYPEDEF && st->rtype.type == V_ENUM)) {
+	    st->enums=API_GETENUM(name);
+	    sial_pushenums(st->enums);
+	}
+	if(ctype==V_TYPEDEF) sial_duptype(t, &st->rtype);
+	 else sial_duptype(t, &st->ctype);
+
+	return t;
+}
+
+type_t*
+sial_newctype(int ctype, node_t*n)
+{
+type_t*t;
+char *name;
+
+	t=sial_getctype(ctype, name=NODE_NAME(n), 0);
+	NODE_FREE(n);
+	sial_free(name);
+	return t;
+}
+
+/*
+	We don't use the type to point back to get the typedef name.
+	The type is now the real type not the type for the typedef.
+	So we keep a running sting of the last name variable name
+	the parser found and use that.
+	5/23/00
+*/
+node_t*
+sial_tdeftovar(type_t*td)
+{
+char *sial_lastvar(void);
+char *name=sial_lastvar();
+
+	sial_free(td);
+	return sial_newvnode(name);
+}
+
+/*
+	Check to see if a cached member info is available
+*/
+static stmember_t*
+sial_getm(char *name, type_t*tp, stinfo_t**sti)
+{
+ull idx=tp->idx;
+stinfo_t*st;
+stmember_t*stm;
+
+	for(st=slist.next; st; st=st->next) {
+
+		if(st->idx == idx) {
+
+			*sti=st;
+
+			if(!st->stm) sial_fillst(st);
+
+			for(stm=st->stm; stm; stm=stm->next) {
+
+
+				if(!strcmp(stm->m.name, name)) {
+
+					return stm;
+
+				}
+			}
+		}
+	}
+	return 0;
+}
+
+value_t *
+sial_ismember(value_t*vp, value_t*vm)
+{
+char *name=sial_getptr(vm, char);
+int ret=0;
+stinfo_t*st;
+
+	if(sial_getm(name, &vp->type, &st)) ret=1;
+
+	return sial_defbtype(sial_newval(), ret);
+}
+
+/* XXX this entire stuff could very well be machine specific ... */
+static int
+sial_getalign(type_t*t)
+{
+	/* this is a custome type deal w/ it */
+	if(t->type == V_BASE) {
+
+		int n;
+
+		/* Intel 386 ABI says that double values align on 4 bytes */
+		if(abitype==ABI_INTEL_X86) n=((t->size>4)?4:t->size);
+		else  n=t->size;
+		return n*8;
+	}
+	if(t->type == V_REF) {
+		/*
+		 * This is an array but if there are additional references
+		 * (>1) it is an array of pointers. In that case the pointer
+		 * alignment has to be used.
+		 */
+		if(t->idxlst && t->ref == 1) {
+			int ret;
+
+			sial_popref(t, 1);
+			ret=sial_getalign(t);
+			sial_pushref(t, 1);
+			return ret;
+		}
+		return sial_defbsize()*8;
+	}
+	/* alignment of a struct/union is on the largest of it's member or
+	   largest allignment of sub structures */
+	if(is_ctype(t->type)) {
+
+		stinfo_t*st;
+		stmember_t*sm;
+		int maxallign=0;
+
+		/* if this is a image type then let the api tell us */
+		if(!(t->idx & LOCALTYPESBASE)) {
+
+			return API_ALIGNMENT(t->idx)*8;
+
+		}
+	
+		if(!(st=sial_getstbyindex(t->idx, t->type))) {
+
+			sial_error("Oops sial_getalign");
+		}
+
+		for(sm=st->stm; sm; sm=sm->next) {
+
+			int a=sial_getalign(&sm->type);
+
+			if(a > maxallign) maxallign=a;
+
+		}
+
+		return maxallign;
+
+	}
+	/* other types shoudl not be part of a ctype declaration ... */
+	sial_error("Oops sial_getalign2!");
+	return 0;
+}
+
+static stinfo_t*
+sial_chkctype(int ctype, char *name)
+{
+stinfo_t*sti;
+
+	if(name) {
+
+		/* we should already have a partial structure on the stack */
+		sti=sial_getst(name, ctype);
+
+#if 0
+At this time I choose not to give any warning.
+Structure redefinition is a normal part of include files... 
+
+		/* We give a warning message for redefined types */
+		{
+		type_t*t=sial_newtype();
+
+			if(API_GETCTYPE(ctype, name, t)) {
+
+				sial_warning("%s %s redefinition", sial_ctypename(ctype), name);
+			}
+			sial_freetype(t);
+		}
+#endif
+
+		if(sti->all) {
+
+			sial_error("Oops sial_ctype_decl");
+		}
+
+		sial_free(name);
+
+	} else {
+
+		sti=sial_alloc(sizeof(stinfo_t));
+		sti->name=0;
+		sti->idx=sial_nextidx();
+		sial_addst(sti);
+	}
+	return sti;
+}
+
+/*
+	This function is used to create new enum types.
+	The syntax for enum is:
+	enum ident {
+		ident [= int],
+		[ident [= int] ] ...
+	};
+	So we check for an assign value and is it exists then 
+	we reset the counter to it.
+	This is the way the mips compiler does it. Which migt be
+	the right way or not, although I fail to see why it's done
+	that way.
+
+	So enum foo {
+		a,
+		b,
+		c=0,
+		d
+	};
+
+	Wil yield values :
+
+	a=0
+	b=1
+	c=0
+	c=1
+*/
+enum_t*
+sial_add_enum(enum_t*ep, char *name, int val)
+{
+enum_t *epi, *nep=sial_alloc(sizeof(enum_t));
+
+	nep->name=name;
+	nep->value=val;
+	nep->next=0;
+	if(!ep) return nep;
+	epi=ep;
+	while(ep->next) ep=ep->next;
+	ep->next=nep;
+	return epi;
+}
+	
+type_t*
+sial_enum_decl(int ctype, node_t*n, dvar_t*dvl)
+{
+dvar_t*dv=dvl, *next;
+int counter=0;
+stinfo_t*sti;
+enum_t *ep=0;
+char *name=n?NODE_NAME(n):0;
+type_t *t;
+
+	if(n) sial_startctype(ctype, n);
+	sti=sial_chkctype(ctype, name);
+
+	while(dv) {
+
+		int val;
+
+		/* evaluate an assignment ? */
+		if(dv->init) {
+
+			value_t *v=sial_exenode(dv->init);
+
+			if(!v) {
+
+				sial_rerror(&dv->pos, "Syntax error in enum expression");
+
+			} else if(v->type.type != V_BASE) {
+
+				sial_rerror(&dv->pos, "Integer expression needed");
+			}
+
+			val=sial_getval(v);
+			counter=val+1;
+			sial_freeval(v);
+
+		} else {
+
+			val=counter++;
+		}
+
+		ep=sial_add_enum(ep, dv->name, val);
+
+		next=dv->next;
+		dv->next=0;
+		dv->name=0;
+		sial_freedvar(dv);
+		dv=next;
+	}
+	sti->enums=ep;
+
+	/* now we push the values in the defines */
+	sial_pushenums(sti->enums);
+
+	/* we return a simple basetype_t*/
+	/* after stahing the idx in rtype */
+	t=sial_newbtype(INT);
+	t->rtype=sti->idx;
+	t->typattr |= sial_isenum(-1);
+		
+	return t;
+	
+}
+
+/*
+	The next functions are used to produce a new type
+	and make it available throught the local cache.
+	This enables custom type definitions on top of the
+ 	ctypes defined in the object symbol tables.
+
+	There is one function per suported architechture.
+
+*/
+/* macro for alignment to a log2 boundary */
+#define Alignto(v, a) (((v) + (a) -1) & ~((a)-1))
+/*
+	The algorith complies with the SysV mips ABI
+*/
+type_t*
+sial_ctype_decl(int ctype, node_t*n, var_t*list)
+{
+type_t*t;
+stinfo_t*sti;
+stmember_t **mpp;
+var_t*v;
+int bits_left, bit_alignment;
+int maxbytes, alignment, nextbit;
+char *name=n?NODE_NAME(n):0;
+
+	if(list->next==list) {
+
+		sial_error("Empty struct/union/enum declaration");
+	}
+
+	t=sial_newbtype(0);
+	sti=sial_chkctype(ctype, name);
+	t->type=sti->ctype.type=ctype;
+	t->idx=sti->ctype.idx=sti->idx;
+	sti->stm=0;
+	mpp=&sti->stm;
+
+#if LDEBUG
+printf("\n%s %s\n", ctype==V_STRUCT?"Structure":"Union", name ? name : "");
+#endif
+
+	/* these are the running position in the structure/union */
+	nextbit=0;	/* next bit open for business */
+	alignment=0;	/* keeps track of the structure alignment
+			   Mips ABI says align to bigest alignment of
+			   all members of the struct/union. Also
+			   unamed bit fields do not participate here. */
+	maxbytes=0;	/* tracking of the maximum member size for union */
+
+	for(v=list->next; v!=list; v=v->next) {
+
+		stmember_t*stm=sial_calloc(sizeof(stmember_t));
+		dvar_t*dv=v->dv;
+		int nbits;
+
+		stm->m.name=sial_strdup(v->name);
+		sial_duptype(&stm->type, &v->v->type);
+
+		/* if this member is a bit filed simply use that */
+		if(dv->bitfield) {
+
+			nbits=dv->nbits;
+
+			/* aligment is the size of the declared base type size */
+			bit_alignment=v->v->type.size*8;
+
+			if(nbits > bit_alignment) {
+
+				sial_error("Too many bits for specified type");
+			}
+
+			/* For unamed bit field align to smallest entity */
+			/* except for 0 bit bit fields */
+			if(!dv->name[0] && nbits) {
+
+				bit_alignment=((nbits+7)/8)*8;
+
+			} 
+
+			/* We compute the number of bits left in this entity */
+			bits_left = bit_alignment - (nextbit%bit_alignment);
+
+			/* 0 bits means, jump to next alignement unit anyway 
+			   if not already on such a boundary */
+			if(!nbits && (bits_left != bit_alignment)) nbits=bits_left;
+
+			/* Not enough space ? */
+			if(nbits > bits_left) {
+
+				/* jump to next start of entity */
+				nextbit += bits_left;
+
+			}
+
+			/* update member information */
+			stm->m.offset=(nextbit/bit_alignment)*v->v->type.size;
+			stm->m.fbit=nextbit % bit_alignment;
+			stm->m.nbits=nbits;
+			stm->m.size=v->v->type.size;
+#if LDEBUG
+			printf("    [%s] Bit member offset=%d, fbit=%d, nbits=%d\n", stm->m.name, stm->m.offset,  stm->m.fbit, stm->m.nbits);
+#endif
+			/* an unamed bit field does not participate in the alignment value */
+			if(!dv->name[0]) {
+	
+				bit_alignment=0;
+
+				/* reset size so that it does not have affect in sial_getalign() */
+				stm->type.size=1;
+			}
+
+		} else {
+
+			int nidx=1;
+
+			if(dv->idx) {
+
+				int i;
+
+				/* flag it */
+				stm->type.idxlst=sial_calloc(sizeof(int)*(dv->idx->nidx+1));
+
+				/* multiply all the [n][m][o]'s */
+				for(i=0;i<dv->idx->nidx;i++) {
+
+					value_t *vidx;
+					ull idxv;
+
+					vidx=sial_exenode(dv->idx->idxs[i]);
+					if(!vidx) {
+
+						sial_error("Error while evaluating array size");
+					}
+					if(vidx->type.type != V_BASE) {
+
+						sial_freeval(vidx);
+						sial_error("Invalid index type");
+
+					}
+
+					idxv=sial_getval(vidx);
+					sial_freeval(vidx);
+
+					stm->type.idxlst[i]=idxv;
+
+					nidx *= idxv;
+				}
+			
+
+			}
+
+			/* the number of bits on which this item aligns itself */
+			bit_alignment=sial_getalign(&stm->type);
+
+			/* jump to this boundary */
+			nextbit = Alignto(nextbit,bit_alignment);
+
+
+			if(stm->type.ref - (dv->idx?1:0)) {
+
+				nbits=nidx*sial_defbsize()*8;
+
+			} else {
+
+				nbits=nidx*stm->type.size*8;
+			}
+
+			if(abitype==ABI_INTEL_X86) {
+
+				int pos=nextbit/8;
+
+				pos = (pos & 0xfffffffc) + 3 - (pos & 0x2);
+				stm->m.offset=pos;
+
+			} else {
+
+				stm->m.offset=nextbit/8;
+			}
+			stm->m.nbits=0;
+			stm->m.size=nbits/8;
+#if LDEBUG
+printf("    [%s] Mmember offset=%d, size=%d size1=%d nidx=%d\n", stm->m.name, stm->m.offset, stm->m.size, stm->type.size, nidx);
+#endif
+
+		}
+
+		if(ctype==V_STRUCT) nextbit+=nbits;
+		     /* Union members overlap */
+		else nextbit=0;
+
+		/* keep track of the maximum alignment */
+		if(bit_alignment>alignment) alignment=bit_alignment;
+
+		/* keep track of maximum size for unions */
+		if(stm->m.size > maxbytes) maxbytes=stm->m.size;
+
+		stm->next=0;
+		*mpp=stm;
+		mpp=&stm->next;
+	}
+
+	/* pad the final structure according to it's most stricly aligned member */
+	if(nextbit) nextbit = Alignto(nextbit, alignment);
+	else nextbit=Alignto(maxbytes*8, alignment); /* --> it's the case for a union */
+
+	t->size=sti->ctype.size=nextbit/8;
+
+#if LDEBUG
+printf("Final size = %d\n", t->size);
+#endif
+
+	sti->all=1;
+	sial_addfunc_ctype(sti->idx);
+	return t;
+}
+
+/*
+   member access and caching.
+   If the member name is empty then the caller wants us
+   to populate the entire engregate. The apimember() should
+   support a getfirst() (member name == "") and getnext()
+   (member name != "") for this perpose.
+ */
+stmember_t*
+sial_member(char *mname, type_t*tp)
+{
+stinfo_t *sti;
+stmember_t *stm;
+
+	if(!is_ctype(tp->type) && ! (tp->type==V_REF && is_ctype(tp->rtype))) {
+
+		sial_error("Expression for member '%s' is not a struct/union", mname);
+
+	
+	}
+
+	if(tp->idx == VOIDIDX) {
+
+		sial_error("Reference to member (%s) from unknown structure type", mname);
+	}
+
+	if(!(stm=sial_getm(mname, tp, &sti))) {
+
+			sial_error("Unknown member name [%s]", mname);
+	}
+	return stm;
+}
+
+int
+sial_open()
+{
+	sial_setofile(stdout);
+	/* push an empty level for parsing allocation */
+	sial_pushjmp(0, 0, 0);
+	sial_setapiglobs();
+	init=1;
+	sial_setbuiltins();
+	return 1;
+}
+
+/* here is a set of api function that do nothing */
+static int apigetmem(ull iaddr, void *p, int nbytes) { return 1; }
+static int apiputmem(ull iaddr, void *p, int nbytes) { return 1; }
+static char* apimember(char *mname,  ull pidx, type_t*tm, member_t *m, ull *lidx) { return 0; }
+static int apigetctype(int ctype, char *name, type_t*tout) { return 0; }
+static char * apigetrtype(ull idx, type_t*t) { return ""; }
+static int apialignment(ull idx) { return 0; }
+static int apigetval(char *name, ull *val) { return 0; }
+static enum_t* apigetenum(char *name) { return 0; }
+static def_t *apigetdefs(void) { return 0; }
+static char* apifindsym(char *p) { return 0; }
+
+static apiops nullops= {
+	apigetmem, apiputmem, apimember, apigetctype, apigetrtype, apialignment,
+	apigetval, apigetenum, apigetdefs, 0, 0, 0, 0, apifindsym
+};
+
+apiops *sial_ops=&nullops;;
+
+void
+sial_apiset(apiops *o, int abi, int nbpw, int sign)
+{
+def_t *dt;
+
+	sial_ops=o?o:&nullops;
+	sial_setdefbtype(nbpw, sign);
+	/* get the pre defines and push them. */
+	dt=API_GETDEFS();
+	while(dt) {
+
+		sial_newmac(dt->name, dt->val, 0, 0, 1);
+		dt=dt->next;
+	}
+	/* add the sial define */
+	sial_newmac(sial_strdup("sial"), sial_strdup("1"), 0, 0, 1);
+}
+
+/*
+	Get and set path function.
+	ipath is include file search path.
+	mpath is macro search path
+*/
+static char *mpath="";
+static char *ipath="";
+void sial_setmpath(char *p) { mpath=p; }
+void sial_setipath(char *p) { ipath=p; }
+char *sial_getmpath(void) { return mpath; }
+char *sial_getipath(void) { return ipath; }
+
+static char *curp=0;
+char *sial_curp(char *p) { char *op=curp; p?(curp=p):(op=curp); return op; }
+
+static char*
+sial_cattry(char *first, char *second)
+{
+struct stat stats;
+char *buf=sial_alloc(strlen(first)+strlen(second)+2);
+
+	strcpy(buf, first);
+	strcat(buf, "/");
+	strcat(buf, second);
+	if(!stat(buf, &stats)) return buf;
+	sial_free(buf);
+	return 0;
+}
+
+char *
+sial_filepath(char *fname, char *path)
+{
+	struct stat buf;
+	/* valid file path, return immediatly */
+	if(stat(fname,&buf) == 0) {
+		/* must return a free'able name */
+		char *name=sial_strdup(fname);
+		TAG(name);
+		return name;
+
+	} else if(fname[0]=='~') {
+
+		if(strlen(fname)>1) {
+
+			char *rname, *start;
+			struct passwd *pwd;
+
+			if(fname[1]=='/') {
+
+				/* current user name */
+				pwd=getpwuid(getuid());
+
+				if(!pwd) {
+					sial_msg("Who are you : uid=%d \n?", getuid());
+					return 0;
+				}
+
+				start=fname+1;
+
+			} else {
+
+				char *p, s;
+
+				for(p=fname+1;*p;p++) if(*p=='/') break;
+				s=*p;
+				*p='\0';
+
+				/* other user */
+				pwd=getpwnam(fname+1);
+				if(!pwd) {
+
+					sial_msg("Who is this : %s ?\n", fname+1);
+					return 0;
+				}
+				if(s) *p=s;
+				start=p;
+			}
+			rname=sial_alloc(strlen(start+1)+strlen(pwd->pw_dir)+2);
+			strcpy(rname, pwd->pw_dir);
+			strcat(rname, start);
+			return rname;
+		}
+
+	} else {
+
+		char *p=sial_strdup(path);
+		char *tok, *curp;
+
+		/* we check if the file is found relatively to the current
+		   position. I.e. the position of the running script */
+		if((curp=sial_curp(0)) && (curp=sial_cattry(curp, fname))) {
+
+			sial_free(p);
+			return curp;
+		}
+
+		tok=strtok(p, ":");
+		while(tok) {
+
+			if((curp=sial_cattry(tok, fname))) {
+
+				sial_free(p);
+				return curp;
+			}
+			tok=strtok(NULL, ":");
+
+		}
+		sial_free(p);
+	}
+	return 0;
+}
+
+char*
+sial_filempath(char *fname) 
+{
+	return sial_filepath(fname, mpath);
+}
+
+char *
+sial_fileipath(char *fname) 
+{
+	return sial_filepath(fname, ipath);
+}
+
+/* load a file or a set of file */
+int
+sial_loadunload(int load, char *name, int silent)
+{
+DIR *dirp;
+int ret=1;
+char *fname=sial_filempath(name);
+
+	if(!fname) {
+
+		if(!silent) sial_msg("File not found : %s\n", name);
+		return 0;
+	}
+
+	if((dirp=opendir(fname))) {
+
+		struct dirent *dp;
+		char *buf;
+
+		while ((dp = readdir(dirp)) != NULL) {
+
+			if (!strcmp(dp->d_name, ".") || !strcmp(dp->d_name, ".."))
+				continue;
+
+			buf=sial_alloc(strlen(fname)+dp->d_reclen+2);
+			sprintf(buf, "%s/%s", fname, dp->d_name);
+			if(load) {
+				ret &= sial_newfile(buf, silent);
+			}else{
+				sial_deletefile(buf);
+			}
+			sial_free(buf);
+		}
+		closedir(dirp);
+	}
+	else {
+
+		if(load) {
+			ret=sial_newfile(fname, silent);
+		}else{
+			sial_deletefile(fname);
+		}
+	}
+ 	sial_free(fname); 
+	return ret;
+}
+
+/*
+	Load conditionaly.
+	If it's already load, return.
+*/
+ull
+sial_depend(char *name)
+{
+char *fname=sial_filempath(name);
+int ret=1 ;
+void *fp;
+
+	if(!fname) ret=0;
+	else if(!(fp=sial_findfile(fname,0)) || sial_isnew(fp)) {
+
+		ret=sial_loadunload(1, name, 1);
+		sial_free(fname);
+	}
+	return ret;
+}
+
+value_t *
+sial_bdepend(value_t *vname)
+{
+	return sial_makebtype(sial_depend(sial_getptr(vname, char)));
+}
+
+ull 
+sial_load(char *fname)
+{
+	return sial_loadunload(1, fname, 0);
+}
+
+value_t*
+sial_bload(value_t *vfname)
+{
+char *fname=sial_getptr(vfname, char);
+value_t *v;
+
+	v=sial_makebtype(sial_load(fname));
+	return v;
+}
+
+ull
+sial_unload(char *fname)
+{
+	return sial_loadunload(0, fname, 0);
+}
+
+value_t*
+sial_bunload(value_t *vfname)
+{
+char *fname=sial_getptr(vfname, char);
+
+	return sial_defbtype(sial_newval(), sial_unload(fname));
+}
+
+void
+sial_loadall()
+{
+char *path=sial_strdup(sial_getmpath());
+char *p, *pn;
+
+	p=pn=path;
+	while(*pn) {
+
+		if(*pn == ':') {
+
+			*pn++='\0';
+			sial_loadunload(1, p, 1);
+			p=pn;
+
+		} else pn++;
+	}
+	if(p!=pn) sial_loadunload(1, p, 1);
+	/* sial_free(path); */
+}
+
+static void
+add_flag(var_t*flags, int c)
+{
+char s[20];
+var_t *v;
+
+	sprintf(s, "%cflag", c);
+	v=sial_newvar(s);
+	sial_defbtype(v->v, (ull)0);
+	v->ini=1;
+	sial_enqueue(flags, v);
+}
+
+int
+sial_cmd(char *fname, char **argv, int argc)
+{
+value_t *idx, *val;
+
+	sial_chkinit();
+
+	if(sial_chkfname(fname, 0)) {
+
+		var_t*flags, *args, *narg;
+		char *opts, *newn=sial_alloc(strlen(fname)+sizeof("_usage")+1);
+		int c, i;
+		extern char *optarg;
+		extern int optind;
+		int dou;
+		char *f=sial_strdup("Xflag");
+
+		flags=(var_t*)sial_newvlist();
+
+		/* build a complete list of option variables */
+		for(c='a';c<='z';c++) add_flag(flags, c);
+		for(c='A';c<='Z';c++) add_flag(flags, c);
+
+		/* check if there is a getopt string associated with this command */
+		/* there needs to be a fname_opt() and a fname_usage() function */
+		sprintf(newn, "%s_opt", fname);
+
+		if(sial_chkfname(newn, 0)) opts=(char*)(unsigned long)sial_exefunc(newn, 0);
+		else opts="";
+
+		sprintf(newn, "%s_usage", fname);
+		dou=sial_chkfname(newn, 0);
+
+		/* build a set of variable from the given list of arguments */
+		/* each options generate a conrresponding flag ex: -X sets Xflag to one
+		   end the corresponding argument of a ":" option is in ex. Xarg
+	   	   each additional arguments is keaped in the array args[] */
+
+		if(opts[0]) {
+
+#ifdef linux
+			optind=0;
+#else
+			getoptreset();
+#endif
+			while ((c = getopt(argc, argv, opts)) != -1) {
+
+				var_t*flag, *opt;
+				char *a=sial_strdup("Xarg");;
+
+				if(c==':') {
+
+					sial_warning("Missing argument(s)");
+					if(dou) sial_exefunc(newn, 0);
+					sial_free(a);
+					goto out;
+
+				} else if(c=='?') {
+
+					if(dou) {
+
+						char *u=(char*)(unsigned long)sial_exefunc(newn, 0);
+
+						if(u) sial_msg("usage: %s %s\n", fname, u);
+					}
+					sial_free(a);
+					goto out;
+				}
+
+	
+				/* set the Xflag variable  to 1 */
+				f[0]=c;
+				flag=sial_inlist(f, flags);
+				sial_defbtype(flag->v, (ull)1);
+				flag->ini=1;
+
+				/* create the Xarg variable */
+				if(optarg && optarg[0]) {
+
+					char *p=sial_alloc(strlen(optarg)+1);
+
+					a[0]=c;
+					strcpy(p, optarg);
+					opt=(var_t*)sial_newvar(a);
+					sial_setstrval(opt->v, p);
+					opt->ini=1;
+					sial_enqueue(flags, opt);
+				}
+				sial_free(a);
+			}
+			sial_free(f);
+		}
+		else optind=1;
+
+		/* put every other args into the argv[] array_t*/
+		args=(var_t*)sial_newvar("argv");
+		args->ini=1;
+
+		/* create a argv[0] with the name of the command */
+		{
+
+			val=sial_makestr(fname);
+			idx=sial_makebtype(0);
+
+			/* create the value's value */
+			sial_addarrelem(&args->v->arr, idx, val);
+			sial_freeval(idx);
+		}
+
+		for ( i=1; optind < argc; optind++, i++) {
+
+			val=sial_makestr(argv[optind]);
+			idx=sial_makebtype(i);
+
+			/* create the value's value */
+			sial_addarrelem(&args->v->arr, idx, val);
+			sial_freeval(idx);
+		}
+
+		narg=(var_t*)sial_newvar("argc");
+		sial_defbtype(narg->v, i);
+		narg->ini=1;
+
+		sial_enqueue(flags, narg);
+
+		/* add the args variable to the flags queue */
+		sial_enqueue(flags, args);
+
+		/* now execute */
+		sial_runcmd(fname, flags);
+
+out:
+		/* free all arguments variables Xflag Xarg and argv[] */
+		sial_freesvs(flags);
+
+		sial_free(newn);
+		return 0;
+	}
+	return 1;
+}
+
--- crash/extensions/libsial/sial.h.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/extensions/libsial/sial.h	2007-10-30 11:13:53.000000000 -0400
@@ -0,0 +1,465 @@
+/*
+ * Copyright 2001 Silicon Graphics, Inc. All rights reserved.
+ */
+#include "sial_api.h"
+typedef unsigned long long caddr;
+
+#define SRCPOS_S	struct srcpos_s
+#define DVAR_S		struct dvar_s
+#define CASELIST_S	struct caselist_s
+#define CASEVAL_S	struct caseval_s
+#define STMEMBER_S	struct stmember_s
+#define STINFO_S	struct stinfo_s
+
+SRCPOS_S;
+DVAR_S;
+CASELIST_S;
+CASEVAL_S;
+STMEMBER_S;
+STINFO_S;
+
+
+/************* source position tracking ************/
+typedef SRCPOS_S {
+	char *file;
+	int line;
+	int col;
+} srcpos_t;
+
+/* member information */
+typedef MEMBER_S {
+
+	char *name;
+	int offset;	/* offset from top of structure */
+	int size;	/* size in bytes of the member or of the bit array */
+	int fbit;	/* fist bit (-1) is not a bit field */
+	int nbits;	/* number of bits for this member */
+	int value;      /* for a enum member, the corresponding value_t */
+
+} member_t;
+
+/* list to hold enum constant information */
+typedef ENUM_S {
+
+	struct enum_s *next;
+	char *name;
+	int value;
+
+} enum_t;
+
+/* list of macro symbols and there corresponding value_ts */
+typedef DEF_S {
+	struct def_s * next;
+	char *name;
+	char *val;
+
+} def_t;
+
+/* type_t information past back and forth */
+typedef TYPE_S {
+	int type;	/* type_t of type_t */
+	ull idx;	/* index to basetype_t or ctype_t */
+	int size;	/* size of this item */
+			/* ... next fields are use internally */
+	int typattr;	/* base type_t qualifiers */
+	int ref;	/* level of reference */
+	int fct;        /* 1 if function pointer */
+	int *idxlst;    /* points to list of indexes if array */
+	ull rtype;	/* type_t a reference refers too */
+} type_t;
+
+/* scope/storage of variables */
+#define S_FILE		1	/* persistant file scope */
+#define S_STAT		2	/* persistant statement scope */
+#define S_AUTO		3	/* stack (default) */
+#define S_GLOB		4	/* add to the global variables */
+
+typedef union vu_s {
+	unsigned char uc;
+	signed char sc;
+	unsigned short us;
+	signed short ss;
+	unsigned int ul;
+	signed int sl;
+	unsigned long long ull;
+	signed long long sll;
+	void *data;
+} vu_t;
+
+/************* value_t **************/
+typedef VALUE_S  {
+	type_t 	 type;
+	int	 set;	/* is this is a Lvalue_t then set is 1 */
+	VALUE_S	*setval;/* value_t to set back to */
+	void	(*setfct)(struct value_s*, struct value_s*);
+			/* the function that will set the value */
+	ARRAY_S	*arr;	/* array associated with value */
+	vu_t	 v;
+	ull	 mem;
+} value_t;
+
+/************** array linked lists *****************/
+typedef ARRAY_S {
+
+	ARRAY_S *next;	/* to support a linked list of array elements */
+	ARRAY_S *prev;
+	int ref;	/* reference count on this array */
+	VALUE_S *idx;	/* arrays can be indexed using any type of variables */
+	VALUE_S *val;	/* arrays element values */
+
+} array_t;
+
+/************* node_t *************/
+typedef NODE_S {
+	VALUE_S* (*exe)(void*);	/* execute it */
+	void   (*free)(void*);	/* free it up */
+	char* (*name)(void*);	/* get a name */
+	void *data;		/* opaque data */
+	NODE_S* next;
+	SRCPOS_S pos;
+} node_t;
+
+typedef IDX_S {
+
+	int nidx;
+	NODE_S *idxs[MAXIDX];
+
+} idx_t;
+
+/*************** variable list ****************/
+typedef VAR_S {
+
+	char *name;
+	VAR_S *next;
+	VAR_S *prev;
+	VALUE_S *v;
+	int ini;
+	DVAR_S *dv;
+
+} var_t;
+
+/* V_BASE subtype */
+#define B_SC    0       /* signed char */
+#define B_UC    1       /* unsignec char */
+#define B_SS    2       /* signed short */
+#define B_US    3       /* unsigned short */
+#define B_SL    4       /* signed long */
+#define B_UL    5       /* unsigned long */
+#define B_SLL   6       /* signed long long */
+#define B_ULL   7       /* unsigned long long */
+
+#define is_ctype(t) ((t)==V_UNION || (t)==V_STRUCT)
+#define VAL_TYPE(v)  (v->type.type)
+#define TYPE_SIZE(t) ((t)->type==V_REF?sial_defbsize():(t)->size)
+
+/* type_ts of jumps */
+#define J_CONTINUE	1
+#define J_BREAK		2
+#define J_RETURN	3
+#define J_EXIT		4
+
+#define sial_setval(v, v2)	if((v)->set) ((v)->setfct)((v)->setval, (v2))
+
+/************* case *************/
+typedef CASEVAL_S {
+
+	int isdef;
+	ull val;
+	CASEVAL_S *next;
+	SRCPOS_S pos;
+
+} caseval_t;
+
+typedef CASELIST_S {
+
+	CASEVAL_S *vals;
+	NODE_S *stmt;
+	CASELIST_S *next;
+	SRCPOS_S pos;
+
+} caselist_t;
+
+/*************** struct member info  ****************/
+typedef STMEMBER_S {
+
+        TYPE_S type;    /* corresponding type_t */
+        MEMBER_S m;     /* member information */
+
+        STMEMBER_S *next;
+
+} stmember_t;
+
+typedef DVAR_S {
+
+	char		*name;
+	int		 refcount;
+	int		 ref;
+	int		 fct;
+	int		 bitfield;
+	int		 nbits;
+	IDX_S		*idx;
+	NODE_S		*init;
+	VAR_S		*fargs;
+	SRCPOS_S	 pos;
+	DVAR_S		*next;
+
+} dvar_t;
+
+typedef STINFO_S {
+	char		*name;	/* structure name */
+	ull		 idx;	/* key for search */
+	int		 all;	/* local : partial or complete declaration ? */
+	TYPE_S		 ctype;	/* associated type */
+	TYPE_S		 rtype;	/* real type_t when typedef */
+	STMEMBER_S	*stm;	/* linked list of members */
+	ENUM_S		*enums;	/* enums names and values */
+	STINFO_S	 *next;  /* next struct on the list */
+
+} stinfo_t;
+
+stinfo_t *sial_getstbyindex(ull idx, int type_t);
+
+typedef value_t* (*xfct_t)(void *);
+typedef char*    (*nfct_t)(void *);
+typedef void     (*ffct_t)(void *);
+typedef void     (*setfct_t)(value_t*, value_t*);
+
+#ifdef DEBUG
+#define NODE_EXE(n) (printf("(%s):[%d]\n",__FILE__, __LINE__), (n)->exe((n)->data)) */
+#else
+#define NODE_EXE(n) ((n)->exe((n)->data))
+#endif
+#define NODE_NAME(n) ((n)->name?((n)->name((n)->data)):0)
+#define NODE_FREE(n) (sial_freenode(n))
+
+#ifdef __GNUC__
+#define __return_address (void*)(__builtin_return_address(0))
+#else
+// must be the SGI Mips compiler.
+#endif
+#if 1
+#define TAG(p) sial_caller(p, __return_address)
+#else
+#define TAG(p) ;
+#endif
+
+node_t  *sial_sibling(node_t*, node_t*);
+node_t  *sial_newnode(void);
+node_t  *sial_newvnode(char *);
+node_t  *sial_newstr(void);
+node_t  *sial_newnum(char *);
+node_t  *sial_newop(int op, int nagrs, ...);
+node_t  *sial_newptrto(int, node_t*);
+node_t  *sial_newmult(node_t*, node_t*, int);
+node_t  *sial_newstat(int op, int nargs, ...);
+node_t  *sial_stat_decl(node_t*, var_t*);
+node_t  *sial_addstat(node_t*, node_t*);
+node_t  *sial_type_cast(type_t*, node_t*);
+node_t  *sial_newmem(int, node_t*, node_t*);
+node_t  *sial_newcall(node_t*, node_t*);
+node_t  *sial_newindex(node_t*, node_t*);
+node_t  *sial_newadrof(node_t*);
+node_t  *sial_newcase(node_t*, node_t*);
+node_t  *sial_addcase(node_t*, node_t*);
+node_t  *sial_caseval(int, node_t*);
+node_t  *sial_addcaseval(node_t*, node_t*);
+node_t  *sial_sizeof(void *p, int type_t);
+node_t  *sial_tdeftovar(type_t *td);
+node_t  *sial_getppnode(void);
+node_t  *sial_allocstr(char *buf);
+node_t  *sial_makenum(int type_t, ull val);
+node_t  *sial_macexists(node_t *var_t);
+node_t  *sial_newptype(var_t *v);
+node_t  *sial_newpval(node_t *vn, int fmt);
+node_t  *sial_strconcat(node_t *, node_t *);
+node_t	*sial_typecast(type_t*type, node_t*expr);
+
+dvar_t  *sial_newdvar(node_t *v);
+dvar_t  *sial_linkdvar(dvar_t *dvl, dvar_t *dv);
+dvar_t  *sial_dvarini(dvar_t *dv, node_t *init);
+dvar_t  *sial_dvaridx(dvar_t *dv, node_t *n);
+dvar_t  *sial_dvarfld(dvar_t *dv, node_t *n);
+dvar_t  *sial_dvarptr(int ref, dvar_t *dv);
+dvar_t  *sial_dvarfct(dvar_t *dv, var_t *fargs);
+
+void  sial_pushjmp(int type_t, void *env, void *val);
+void  sial_popjmp(int type_t);
+void *sial_getcurfile(void);
+void  sial_walkarray(node_t *varnode_t, node_t *arrnode_t, void(*cb)(void *), void *data);
+void  get_bit_value(ull val, int nbits, int boff, int size, value_t *v);
+void  sial_enqueue(var_t *vl, var_t *v);
+void  sial_freenode(node_t *n);
+void  sial_validate_vars(var_t *svs);
+void  sial_freesvs(var_t *svs);
+void *sial_setexcept(void);
+void  sial_tdef_decl(dvar_t *dv, type_t *t);
+void  sial_refarray(value_t *v, int inc);
+void *sial_curmac(void);
+void  sial_setfct(value_t *v1, value_t *v2);
+void  sial_exevi(char *fname, int line);
+void  sial_unput(char);
+void  sial_dupval(value_t *v, value_t *vs);
+void  sial_parseback(void);
+void  sial_curpos(srcpos_t *p, srcpos_t *s);
+void  sial_rmexcept(void *osa);
+void  sial_chksign(type_t*t);
+void  sial_chksize(type_t*t);
+void  sial_setpos(srcpos_t *p);
+void  sial_rerror(srcpos_t *p, char *fmt, ...);
+void  sial_rwarning(srcpos_t *p, char *fmt, ...);
+void  sial_chkandconvert(value_t *vto, value_t *vfrm);
+void  sial_warning(char *fmt, ...);
+void  sial_format(int tabs, char *str);
+void  sial_freevar(var_t*v);
+void  sial_rmbuiltin(var_t*v);
+void  sial_rm_globals(void *vg);
+void  sial_addnewsvs(var_t*avl, var_t*svl, var_t*nvl);
+void  sial_dojmp(int type, void *val);
+void  sial_pushbuf(char *buf, char *fname, void(*f)(void*), void *d, void *m);
+void  sial_rsteofoneol(void);
+void  sial_settakeproto(int v);
+void  sial_popallin(void);
+void  sial_tagst(void);
+void  sial_flushtdefs(void);
+void  sial_setsvlev(int newlev);
+void  sial_flushmacs(void *tag);
+void  sial_add_auto(var_t*nv);
+void *sial_chkbuiltin(char *name);
+void  sial_freedata(value_t *v);
+void  sial_dupdata(value_t *v, value_t *vs);
+void  sial_setarray(array_t**arpp);
+void  sial_rawinput(int on);
+void  sial_setini(node_t*n);
+void  sial_valindex(value_t *var, value_t *idx, value_t *ret);
+void  sial_free_siblings(node_t*ni);
+void  sial_mkvsigned(value_t*v);
+void  sial_transval(int s1, int s2, value_t *v, int issigned);
+void  sial_popref(type_t*t, int ref);
+void  sial_getmem(ull kp, void *p, int n);
+void  sial_baseop(int op, value_t *v1, value_t *v2, value_t *result);
+void  sial_setinsizeof(int v);
+void  sial_freeidx(idx_t *idx);
+void  sial_freedvar(dvar_t*dv);
+void  sial_pushenums(enum_t *et);
+void  sial_addfunc_ctype(int idx);
+void  sial_setapiglobs(void);
+void  sial_setbuiltins(void);
+void  sial_setdefbtype(int size, int sign);
+void  get_bit_value(ull val, int nbits, int boff, int size, value_t *v);
+void *sial_findfile(char *name, int unlink);
+void  sial_newmac(char *mname, char *buf, int np, char **p, int silent);
+void *sial_getcurfile(void);
+void *sial_getcurfile(void);
+void  sial_startctype(int type, node_t*namen);
+void  sial_addtolist(var_t*vl, var_t*v);
+void  sial_arch_swapvals(void* vp, void *sp);
+void  sial_fillst(stinfo_t *st);
+void  sial_exememlocal(value_t *vp, stmember_t* stm, value_t *v);
+void  sial_do_deref(int n, value_t *v, value_t *ref);
+void  sial_addneg(char *name);
+
+stmember_t*sial_member(char *mname, type_t*tp);
+
+ull   set_bit_value_t(ull dvalue_t, ull value_t, int nbits, int boff);
+ull   unival(value_t *);
+ul    sial_bool(value_t *);
+
+value_t *sial_docall(node_t *, node_t *, void *);
+value_t *sial_docast(void);
+value_t *sial_newval(void);
+value_t *sial_exebfunc(char *, value_t **);
+value_t *sial_exevar(void *);
+value_t *sial_exenode(node_t *);
+value_t *sial_setstrval(value_t *, char *);
+value_t *sial_defbtype(value_t *, ull);
+value_t *sial_defbtypesize(value_t *, ull, int);
+value_t *sial_sprintf(value_t *, ...);
+value_t *sial_printf(value_t *, ...);
+value_t *sial_exists(value_t *vname);
+value_t *sial_exit(int v);
+value_t *sial_bload(value_t *name);
+value_t *sial_bdepend(value_t *name);
+value_t *sial_bunload(value_t *vfname);
+value_t *sial_showtemp(void);
+value_t *sial_showaddr(value_t *vadr);
+value_t *sial_findsym(value_t *vadr);
+value_t *sial_memdebugon(void);
+value_t *sial_memdebugoff(void);
+value_t *sial_ismember(value_t*vp, value_t*vm);
+
+value_t *sial_prarr(value_t*name, value_t*root);
+value_t *sial_getstr(value_t*vm);
+
+var_t *sial_vardecl(dvar_t *dv, type_t *t);
+var_t *sial_inlist(char *name, var_t *vl);
+var_t *sial_dupvlist(var_t *vl);
+var_t *sial_getcurgvar(void);
+var_t *sial_getvarbyname(char *name, int silent, int local);
+var_t *sial_getsgrp_avs(node_t *n);
+var_t *sial_getsgrp_svs(node_t *n);
+var_t *sial_parsexpr(char *);
+
+int   sial_file_decl(var_t *svs);
+int   sial_newfunc(var_t *fvar, node_t* body);
+int   sial_line(int inc);
+int   sial_samectypename(int type_t, ull idx1, ull idx2);
+int   sial_issigned(int attr);
+int   sial_isstatic(int atr);
+int   sial_isjuststatic(int attr);
+int   sial_isconst(int atr);
+int   sial_issigned(int atr);
+int   sial_istdef(int atr);
+int   sial_isxtern(int atr);
+int   sial_isvoid(int atr);
+int   sial_isstor(int atr);
+int   sial_ispartial(type_t*t);
+int   sial_input(void);
+int   sial_addsvs(int type, var_t*sv);
+int   sial_pushfile(char *name);
+int   sial_chkfname(char *fname, void *fd);
+int   sial_lookuparray(node_t*vnode, node_t*arrnode);
+int   sial_runcmd(char *fname, var_t*args);
+int   sial_getseq(int c);
+int   sial_newfile(char *name, int silent);
+int   sial_deletefile(char *name);
+int   sial_getsvlev(void);
+int   sial_idxtoattr(int idx);
+int   sial_docase(ull val, caselist_t*cl);
+int   siallex(void);
+int   sialpplex(void);
+int   sial_ismemdebug(void);
+int   sial_isenum(int atr);
+int   sial_funcexists(char *name);
+int   sial_isnew(void* p);
+int   sial_isneg(char *name);
+
+char  *sial_vartofunc(node_t *name);
+char  *sial_gettdefname(ull idx);
+char  *sial_ctypename(int type_t);
+char  *sial_filempath(char *fname);
+char  *sial_fileipath(char *fname);
+char  *sial_getline(void);
+char  *sial_cursorp(void);
+char  *sial_getbtypename(int typattr);
+char  *sial_filename(void);
+char  *sial_curp(char *);
+
+type_t  *sial_newcast(var_t *v);
+type_t  *sial_newctype(int ctype_t, node_t *n);
+type_t  *sial_addbtype(type_t *t, int newtok);
+type_t  *sial_ctype_decl(int ctype_t, node_t *n, var_t *list);
+type_t  *sial_enum_decl(int ctype_t, node_t *n, dvar_t *dvl);
+type_t  *sial_addstorage(type_t *t1, type_t *t2);
+type_t  *sial_getvoidstruct(int ctype);
+
+extern int lineno, needvar, instruct, nomacs;
+node_t *lastv;
+
+#define NULLNODE ((node_t*)0)
+
+/* configuration variables */
+#define S_MAXSTRLEN	1024	/* lengh of a STRING variable value_t */
+#define S_MAXDEEP	500	/* maximum stacking of calls */
+#define S_MAXFILES	200	/* maximum number of macro files  */
+
+#define S_VARARG	"__VARARG" /* name of the special var for ... */
--- crash/extensions/Makefile.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/extensions/Makefile	2007-09-24 15:49:41.000000000 -0400
@@ -0,0 +1,47 @@
+#
+# Makefile for building crash shared object extensions
+#
+# Copyright (C) 2005, 2007 David Anderson
+# Copyright (C) 2005, 2007 Red Hat, Inc. All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# To build the extension shared objects in this directory, run 
+# "make extensions" from the top-level directory.
+#
+# To add a new extension object, simply copy your module's .c file
+# to this directory, and it will be built automatically using
+# the "standard" compile line.  If that compile line does not 
+# suffice, create a .mk file with the same prefix as the .c file,
+# and that makefile will be invoked. 
+# 
+
+CONTRIB_SO := $(patsubst %.c,%.so,$(wildcard *.c))
+
+all: link_defs $(CONTRIB_SO)
+	
+link_defs:
+	@if [ ! -f defs.h ]; then \
+	  ln -s ../defs.h; fi 
+
+$(CONTRIB_SO): %.so: %.c
+	@if [ -f $*.mk ]; then \
+		make -f $*.mk; \
+	else \
+		echo "gcc -nostartfiles -shared -rdynamic -o $@ $*.c -fPIC -D$(TARGET) $(TARGET_CFLAGS)"; \
+		gcc -nostartfiles -shared -rdynamic -o $@ $*.c -fPIC -D$(TARGET) $(TARGET_CFLAGS); \
+	fi
+
+clean:
+	rm -f $(CONTRIB_SO)
+	@for MAKEFILE in `grep -sl "^clean:" *.mk`; \
+	  do make --no-print-directory -f $$MAKEFILE clean; \
+	done
--- crash/lkcd_v7.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/lkcd_v7.c	2005-11-10 15:25:45.000000000 -0500
@@ -89,7 +89,11 @@
 	ifd = 0;
 
 #ifdef LKCD_INDEX_FILE
-	lkcd->memory_pages = (dh->dh_memory_size * (getpagesize()/lkcd->page_size)) * 2;
+        if (dh->dh_memory_end < 0x1000000000LL) {
+            lkcd->memory_pages = dh->dh_memory_end / lkcd->page_size + 1;
+        } else {
+            lkcd->memory_pages = (dh->dh_memory_size * (getpagesize()/lkcd->page_size)) * 2;
+        }
 	dump_index_size = (lkcd->memory_pages * sizeof(off_t));	
 	lkcd->page_offsets = 0;
 	strcpy(dumpfile_index_name, dumpfile);
--- crash/filesys.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/filesys.c	2009-02-04 15:01:56.000000000 -0500
@@ -1,8 +1,8 @@
 /* filesys.c - core analysis suite
  *
  * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc.
- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson
- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 David Anderson
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 Red Hat, Inc. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -17,8 +17,9 @@
 
 #include "defs.h"
 #include <linux/major.h>
+#include <regex.h>
 
-static void show_mounts(ulong, int);
+static void show_mounts(ulong, int, struct task_context *);
 static int find_booted_kernel(void);
 static int find_booted_system_map(void);
 static int verify_utsname(char *);
@@ -33,7 +34,7 @@
 static int open_file_reference(struct reference *);
 static void memory_source_init(void);
 static int get_pathname_component(ulong, ulong, int, char *, char *);
-static ulong *get_mount_list(int *);
+static ulong *get_mount_list(int *, struct task_context *);
 char *inode_type(char *, char *);
 static void match_proc_version(void);
 static void get_live_memory_source(void);
@@ -43,6 +44,7 @@
 static int memory_driver_init(void);
 static int create_memory_device(dev_t);
 static void *radix_tree_lookup(ulong, ulong, int);
+static int match_file_string(char *, char *, char *);
 
 #define DENTRY_CACHE (20)
 #define INODE_CACHE  (20)
@@ -75,6 +77,7 @@
 #define DUMP_FULL_NAME   1
 #define DUMP_INODE_ONLY  2
 #define DUMP_DENTRY_ONLY 4
+#define DUMP_EMPTY_FILE  8
 
 /*
  *  Open the namelist, dumpfile and output devices.
@@ -99,6 +102,10 @@
 		}
 
 		if (pc->namelist) {
+			if (XEN_HYPER_MODE() && !pc->dumpfile)
+				error(FATAL, 
+				    "Xen hypervisor mode requires a dumpfile\n");
+
 			if (!pc->dumpfile && !get_proc_version())
 	                	error(INFO, "/proc/version: %s\n", 
 					strerror(errno));
@@ -190,7 +197,15 @@
                         if (!netdump_init(pc->dumpfile, fp))
                                 error(FATAL, "%s: initialization failed\n",
                                         pc->dumpfile);
-		} else if (pc->flags & NETDUMP) {
+		} else if (pc->flags & KDUMP) {
+                        if (!kdump_init(pc->dumpfile, fp))
+                                error(FATAL, "%s: initialization failed\n",
+                                        pc->dumpfile);
+		} else if (pc->flags & XENDUMP) {
+                        if (!xendump_init(pc->dumpfile, fp))
+                                error(FATAL, "%s: initialization failed\n",
+                                        pc->dumpfile);
+		} else if (pc->flags & DISKDUMP) {
                         if (!diskdump_init(pc->dumpfile, fp))
                                 error(FATAL, "%s: initialization failed\n",
                                         pc->dumpfile);
@@ -217,10 +232,7 @@
 static void
 match_proc_version(void)
 {
-	char command[BUFSIZE];
-	char buffer[BUFSIZE];
-	FILE *pipe;
-	int found;
+	char buffer[BUFSIZE], *p1, *p2;
 
 	if (pc->flags & KERNEL_DEBUG_QUERY)
 		return;
@@ -228,32 +240,37 @@
 	if (!strlen(kt->proc_version)) 
 		return;
 
-        sprintf(command, "/usr/bin/strings %s", pc->namelist);
-        if ((pipe = popen(command, "r")) == NULL) {
-                error(INFO, "%s: %s\n", pc->namelist, strerror(errno));
-                return;
-        }
-
-	found = FALSE;
-        while (fgets(buffer, BUFSIZE-1, pipe)) {
-		if (!strstr(buffer, "Linux version 2."))
-			continue;
-
-                if (STREQ(buffer, kt->proc_version)) 
-                	found = TRUE;
-		break;
-        }
-        pclose(pipe);
-
-	if (found) {
+	if (match_file_string(pc->namelist, kt->proc_version, buffer)) {
                 if (CRASHDEBUG(1)) {
-			fprintf(fp, "/proc/version:\n%s", kt->proc_version);
+			fprintf(fp, "/proc/version:\n%s\n", kt->proc_version);
 			fprintf(fp, "%s:\n%s", pc->namelist, buffer);
 		}
 		return;
 	}
 
-	if (find_booted_system_map()) 
+	error(WARNING, "%s%sand /proc/version do not match!\n\n", 
+		pc->namelist, 
+		strlen(pc->namelist) > 39 ? "\n         " : " ");
+
+	/*
+	 *  find_booted_system_map() requires VTOP(), which used to be a 
+	 *  hardwired masking of the kernel address.  But some architectures 
+	 *  may not know what their physical base address is at this point, 
+	 *  and others may have different machdep->kvbase values, so for all
+	 *  but the 0-based kernel virtual address architectures, bail out
+	 *  here with a relevant error message.
+	 */
+	if (!machine_type("S390") && !machine_type("S390X")) {
+		p1 = &kt->proc_version[strlen("Linux version ")];
+		p2 = strstr(p1, " ");
+		*p2 = NULLCHAR;
+		error(WARNING, "/proc/version indicates kernel version: %s\n", p1);
+		error(FATAL, "please use the vmlinux file for that kernel version, or try using\n"
+			"       the System.map for that kernel version as an additional argument.\n", p1);
+		clean_exit(1);
+	}
+
+	if (find_booted_system_map())
                 pc->flags |= SYSMAP;
 }
 
@@ -303,14 +320,12 @@
                 for (dp = readdir(dirp); dp != NULL; dp = readdir(dirp)) 
 			cnt++;
 
-		if ((searchdirs = (char **)malloc(cnt * sizeof(char *))) 
-		    == NULL) {
+		if ((searchdirs = calloc(cnt, sizeof(char *))) == NULL) {
 			error(INFO, "/usr/src/ directory list malloc: %s\n",
                                 strerror(errno));
 			closedir(dirp);
 			return default_searchdirs;
 		} 
-		BZERO(searchdirs, cnt * sizeof(char *));
 
 		for (i = 0; i < DEFAULT_SEARCHDIRS; i++) 
 			searchdirs[i] = default_searchdirs[i];
@@ -345,6 +360,16 @@
 		closedir(dirp);
 
 		searchdirs[cnt] = NULL;
+	} else {
+		if ((searchdirs = calloc(cnt, sizeof(char *))) == NULL) {
+			error(INFO, "search directory list malloc: %s\n",
+                                strerror(errno));
+			closedir(dirp);
+			return default_searchdirs;
+		} 
+		for (i = 0; i < DEFAULT_SEARCHDIRS; i++) 
+			searchdirs[i] = default_searchdirs[i];
+		cnt = DEFAULT_SEARCHDIRS;
 	}
 
         if (redhat_kernel_directory_v1(dirbuf)) {
@@ -483,13 +508,11 @@
 find_booted_kernel(void)
 {
 	char kernel[BUFSIZE];
-	char command[BUFSIZE];
 	char buffer[BUFSIZE];
 	char **searchdirs;
 	int i, preferred, wrapped;
         DIR *dirp;
         struct dirent *dp;
-	FILE *pipe;
 	int found;
 
 	pc->flags |= FINDKERNEL;
@@ -538,24 +561,11 @@
                             !is_elf_file(kernel))
 				continue;
 
-			sprintf(command, "/usr/bin/strings %s", kernel);
-	        	if ((pipe = popen(command, "r")) == NULL) {
-	        		error(INFO, "%s: %s\n", 
-					kernel, strerror(errno));
-				continue;
-			}
-
 			if (CRASHDEBUG(1)) 
 				fprintf(fp, "find_booted_kernel: check: %s\n", 
 					kernel);
 
-			while (fgets(buffer, BUFSIZE-1, pipe)) {
-				if (STREQ(buffer, kt->proc_version)) {
-					found = TRUE;
-					break;
-				}
-			}
-			pclose(pipe);
+			found = match_file_string(kernel, kt->proc_version, buffer);
 	
 			if (found)
 				break;
@@ -701,6 +711,8 @@
         
         fclose(version);
 
+	strip_linefeeds(kt->proc_version);
+
 	return TRUE;
 }
 
@@ -797,30 +809,14 @@
 static int
 verify_utsname(char *system_map)
 {
-	char command[BUFSIZE];
 	char buffer[BUFSIZE];
-	FILE *pipe;
-	int found;
 	ulong value;
 	struct new_utsname new_utsname;
 
-	sprintf(command, "/usr/bin/strings %s", system_map);
-       	if ((pipe = popen(command, "r")) == NULL) 
-		return FALSE;
-	
 	if (CRASHDEBUG(1)) 
 		fprintf(fp, "verify_utsname: check: %s\n", system_map);
 
-	found = FALSE;
-	while (fgets(buffer, BUFSIZE-1, pipe)) {
-		if (strstr(buffer, "D system_utsname")) {
-			found = TRUE;
-			break;
-		}
-	}
-	pclose(pipe);
-
-	if (!found)
+	if (!match_file_string(system_map, "D system_utsname", buffer))
 		return FALSE;
 	
 	if (extract_hex(buffer, &value, NULLCHAR, TRUE) &&
@@ -941,9 +937,10 @@
 {
 	char command[BUFSIZE];
 	char buf[BUFSIZE];
-	char *retbuf;
+	char *retbuf, *start, *end, *module;
 	FILE *pipe;
-	int done;
+	regex_t regex;
+	int regex_used, done;
 
 	if (!file_exists("/usr/bin/find", NULL) || 
 	    !file_exists("/bin/echo", NULL) ||
@@ -962,20 +959,34 @@
 
 	done = FALSE;
 	retbuf = NULL;
+	regex_used = ((start = strstr(file, "[")) && 
+		(end = strstr(file, "]")) && (start < end) &&
+		(regcomp(&regex, file, 0) == 0));
 
         while (fgets(buf, BUFSIZE-1, pipe) || !done) {
                 if (STREQ(buf, "search done\n")) {
                         done = TRUE;
                         break;
                 }
-                if (!retbuf &&
+                if (!retbuf && !regex_used &&
                     STREQ((char *)basename(strip_linefeeds(buf)), file)) {
                         retbuf = GETBUF(strlen(buf)+1);
                         strcpy(retbuf, buf);
                 }
+		if (!retbuf && regex_used) {
+			module = basename(strip_linefeeds(buf));
+			if (regexec(&regex, module, 0, NULL, 0) == 0) {
+				retbuf = GETBUF(strlen(buf)+1);
+				strcpy(retbuf, buf);
+			}
+		}
         }
 
+	if (regex_used)
+		regfree(&regex);
+
         pclose(pipe);
+
 	return retbuf;
 }
  
@@ -1125,6 +1136,8 @@
 {
 	int i;
 	int c, found;
+	struct task_context *tc, *namespace_context;
+	ulong value;
 	char *spec_string;
 	char buf1[BUFSIZE];
 	char buf2[BUFSIZE];
@@ -1133,7 +1146,9 @@
 	int flags = 0;
 	int save_next;
 
-        while ((c = getopt(argcnt, args, "if")) != EOF) {
+	namespace_context = pid_to_context(1);
+
+        while ((c = getopt(argcnt, args, "ifn:")) != EOF) {
                 switch(c)
 		{
 		case 'i':
@@ -1144,6 +1159,19 @@
 			flags |= MOUNT_PRINT_FILES;
 			break;
 
+		case 'n':
+			switch (str_to_context(optarg, &value, &tc)) {
+		        case STR_PID:
+                        case STR_TASK:
+				namespace_context = tc;
+                               	break;
+                        case STR_INVALID:
+                               	error(FATAL, "invalid task or pid value: %s\n",
+                                        	optarg);
+                               	break;
+			}
+			break;
+
 		default:
 			argerrs++;
 			break;
@@ -1162,7 +1190,7 @@
                         	shift_string_left(spec_string, 2);
 
 			open_tmpfile();
-			show_mounts(0, MOUNT_PRINT_ALL);
+			show_mounts(0, MOUNT_PRINT_ALL, namespace_context);
 
 			found = FALSE;
         		rewind(pc->tmpfile);
@@ -1181,16 +1209,20 @@
                         		continue;
 
 				for (i = 0; i < c; i++) {
-					if (STREQ(arglist[i], spec_string)) 
+					if (PATHEQ(arglist[i], spec_string))
 						found = TRUE;
 				}
 				if (found) {
 					fp = pc->saved_fp;
 					if (flags) {
 						sscanf(buf2,"%lx",&vfsmount);
-						show_mounts(vfsmount, flags);
+						show_mounts(vfsmount, flags, 
+							namespace_context);
 					} else {
-						fprintf(fp, mount_hdr);
+						if (!(pc->curcmd_flags & HEADER_PRINTED)) {
+							fprintf(fp, mount_hdr);
+							pc->curcmd_flags |= HEADER_PRINTED;
+						}
 						fprintf(fp, buf2);
 					}
 					found = FALSE;
@@ -1200,7 +1232,7 @@
 			close_tmpfile();
 		} while (args[++optind]);
 	} else
-		show_mounts(0, flags);
+		show_mounts(0, flags, namespace_context);
 }
 
 /*
@@ -1208,7 +1240,7 @@
  */
 
 static void
-show_mounts(ulong one_vfsmount, int flags)
+show_mounts(ulong one_vfsmount, int flags, struct task_context *namespace_context)
 {
 	ulong one_vfsmount_list;
 	long sb_s_files;
@@ -1246,7 +1278,7 @@
 		mount_cnt = 1;
 		mntlist = &one_vfsmount_list;
 	} else 
-		mntlist = get_mount_list(&mount_cnt); 
+		mntlist = get_mount_list(&mount_cnt, namespace_context); 
 
 	if (!strlen(mount_hdr)) {
 		devlen = strlen("DEVNAME");
@@ -1408,11 +1440,11 @@
  *  Allocate and fill a list of the currently-mounted vfsmount pointers.
  */
 static ulong *
-get_mount_list(int *cntptr)
+get_mount_list(int *cntptr, struct task_context *namespace_context)
 {
 	struct list_data list_data, *ld;
 	int mount_cnt;
-	ulong *mntlist, namespace, root;
+	ulong *mntlist, namespace, root, nsproxy, mnt_ns;
 	struct task_context *tc;
 	
         ld = &list_data;
@@ -1421,9 +1453,26 @@
 	if (symbol_exists("vfsmntlist")) {
         	get_symbol_data("vfsmntlist", sizeof(void *), &ld->start);
                	ld->end = symbol_value("vfsmntlist");
+	} else if (VALID_MEMBER(task_struct_nsproxy)) {
+ 		tc = namespace_context;
+
+        	readmem(tc->task + OFFSET(task_struct_nsproxy), KVADDR, 
+			&nsproxy, sizeof(void *), "task nsproxy", 
+			FAULT_ON_ERROR);
+        	if (!readmem(nsproxy + OFFSET(nsproxy_mnt_ns), KVADDR, 
+			&mnt_ns, sizeof(void *), "nsproxy mnt_ns", 
+			RETURN_ON_ERROR|QUIET))
+			error(FATAL, "cannot determine mount list location!\n");
+        	if (!readmem(mnt_ns + OFFSET(mnt_namespace_root), KVADDR, 
+			&root, sizeof(void *), "mnt_namespace root", 
+			RETURN_ON_ERROR|QUIET))
+			error(FATAL, "cannot determine mount list location!\n");
+
+        	ld->start = root + OFFSET(vfsmount_mnt_list);
+        	ld->end = mnt_ns + OFFSET(mnt_namespace_list);
+
 	} else if (VALID_MEMBER(namespace_root)) {
-		if (!(tc = pid_to_context(1)))
-	 		tc = CURRENT_CONTEXT();
+ 		tc = namespace_context;
 
         	readmem(tc->task + OFFSET(task_struct_namespace), KVADDR, 
 			&namespace, sizeof(void *), "task namespace", 
@@ -1497,7 +1546,7 @@
 		goto nopath;
 
         if (VALID_MEMBER(file_f_vfsmnt)) {
-		mntlist = get_mount_list(&mount_cnt);
+		mntlist = get_mount_list(&mount_cnt, pid_to_context(1));
         	vfsmount_buf = GETBUF(SIZE(vfsmount));
 
         	for (m = found = 0, vfsmnt = mntlist; 
@@ -1706,15 +1755,30 @@
 	MEMBER_OFFSET_INIT(fs_struct_pwd, "fs_struct", "pwd");
 	MEMBER_OFFSET_INIT(fs_struct_rootmnt, "fs_struct", "rootmnt");
 	MEMBER_OFFSET_INIT(fs_struct_pwdmnt, "fs_struct", "pwdmnt");
-	MEMBER_OFFSET_INIT(files_struct_max_fds, "files_struct", "max_fds");
-	MEMBER_OFFSET_INIT(files_struct_max_fdset, "files_struct", "max_fdset");
-	MEMBER_OFFSET_INIT(files_struct_open_fds, "files_struct", "open_fds");
 	MEMBER_OFFSET_INIT(files_struct_open_fds_init,  
 		"files_struct", "open_fds_init");
-	MEMBER_OFFSET_INIT(files_struct_fd, "files_struct", "fd");
+	MEMBER_OFFSET_INIT(files_struct_fdt, "files_struct", "fdt");
+	if (VALID_MEMBER(files_struct_fdt)) {
+		MEMBER_OFFSET_INIT(fdtable_max_fds, "fdtable", "max_fds");
+		MEMBER_OFFSET_INIT(fdtable_max_fdset, "fdtable", "max_fdset");
+		MEMBER_OFFSET_INIT(fdtable_open_fds, "fdtable", "open_fds");
+		MEMBER_OFFSET_INIT(fdtable_fd, "fdtable", "fd");
+	} else {
+		MEMBER_OFFSET_INIT(files_struct_max_fds, "files_struct", "max_fds");
+		MEMBER_OFFSET_INIT(files_struct_max_fdset, "files_struct", "max_fdset");
+		MEMBER_OFFSET_INIT(files_struct_open_fds, "files_struct", "open_fds");
+		MEMBER_OFFSET_INIT(files_struct_fd, "files_struct", "fd");
+	}
 	MEMBER_OFFSET_INIT(file_f_dentry, "file", "f_dentry");
 	MEMBER_OFFSET_INIT(file_f_vfsmnt, "file", "f_vfsmnt");
 	MEMBER_OFFSET_INIT(file_f_count, "file", "f_count");
+	MEMBER_OFFSET_INIT(path_mnt, "path", "mnt");
+	MEMBER_OFFSET_INIT(path_dentry, "path", "dentry");
+	if (INVALID_MEMBER(file_f_dentry)) {
+		MEMBER_OFFSET_INIT(file_f_path, "file", "f_path");
+		ASSIGN_OFFSET(file_f_dentry) = OFFSET(file_f_path) + OFFSET(path_dentry);
+		ASSIGN_OFFSET(file_f_vfsmnt) = OFFSET(file_f_path) + OFFSET(path_mnt);
+	}
 	MEMBER_OFFSET_INIT(dentry_d_inode, "dentry", "d_inode");
 	MEMBER_OFFSET_INIT(dentry_d_parent, "dentry", "d_parent");
 	MEMBER_OFFSET_INIT(dentry_d_covers, "dentry", "d_covers");
@@ -1736,10 +1800,15 @@
         MEMBER_OFFSET_INIT(vfsmount_mnt_mountpoint, 
 		"vfsmount", "mnt_mountpoint");
 	MEMBER_OFFSET_INIT(namespace_root, "namespace", "root");
+	MEMBER_OFFSET_INIT(task_struct_nsproxy, "task_struct", "nsproxy");
 	if (VALID_MEMBER(namespace_root)) {
 		MEMBER_OFFSET_INIT(namespace_list, "namespace", "list");
 		MEMBER_OFFSET_INIT(task_struct_namespace, 
 			"task_struct", "namespace");
+	} else if (VALID_MEMBER(task_struct_nsproxy)) {
+		MEMBER_OFFSET_INIT(nsproxy_mnt_ns, "nsproxy", "mnt_ns");
+        	MEMBER_OFFSET_INIT(mnt_namespace_root, "mnt_namespace", "root");
+        	MEMBER_OFFSET_INIT(mnt_namespace_list, "mnt_namespace", "list");
 	} else if (THIS_KERNEL_VERSION >= LINUX(2,4,20)) {
 		if (CRASHDEBUG(2))
 			fprintf(fp, "hardwiring namespace stuff\n");
@@ -1762,6 +1831,8 @@
 	STRUCT_SIZE_INIT(umode_t, "umode_t");
 	STRUCT_SIZE_INIT(dentry, "dentry");
 	STRUCT_SIZE_INIT(files_struct, "files_struct");
+	if (VALID_MEMBER(files_struct_fdt))
+		STRUCT_SIZE_INIT(fdtable, "fdtable");
 	STRUCT_SIZE_INIT(file, "file");
 	STRUCT_SIZE_INIT(inode, "inode");
 	STRUCT_SIZE_INIT(vfsmount, "vfsmount");
@@ -1777,8 +1848,12 @@
 
 	if (symbol_exists("height_to_maxindex")) {
 		int tmp;
-		ARRAY_LENGTH_INIT(tmp, height_to_maxindex,
-                        "height_to_maxindex", NULL, 0);
+		if (LKCD_KERNTYPES())
+			ARRAY_LENGTH_INIT_ALT(tmp, "height_to_maxindex",
+				"radix_tree_preload.nodes", NULL, 0);
+		else
+			ARRAY_LENGTH_INIT(tmp, height_to_maxindex,
+                        	"height_to_maxindex", NULL, 0);
 		STRUCT_SIZE_INIT(radix_tree_root, "radix_tree_root");
 		STRUCT_SIZE_INIT(radix_tree_node, "radix_tree_node");
 		MEMBER_OFFSET_INIT(radix_tree_root_height, 
@@ -1798,35 +1873,35 @@
 		goto show_hit_rates;
 
         for (i = 0; i < FILE_CACHE; i++)
-                fprintf(stderr, "   cached_file[%2d]: %lx (%ld)\n",
+                fprintf(fp, "   cached_file[%2d]: %lx (%ld)\n",
                         i, ft->cached_file[i],
                         ft->cached_file_hits[i]);
-        fprintf(stderr, "        file_cache: %lx\n", (ulong)ft->file_cache);
-        fprintf(stderr, "  file_cache_index: %d\n", ft->file_cache_index);
-        fprintf(stderr, "  file_cache_fills: %ld\n", ft->file_cache_fills);
+        fprintf(fp, "        file_cache: %lx\n", (ulong)ft->file_cache);
+        fprintf(fp, "  file_cache_index: %d\n", ft->file_cache_index);
+        fprintf(fp, "  file_cache_fills: %ld\n", ft->file_cache_fills);
 
 	for (i = 0; i < DENTRY_CACHE; i++)
-		fprintf(stderr, "  cached_dentry[%2d]: %lx (%ld)\n", 
+		fprintf(fp, "  cached_dentry[%2d]: %lx (%ld)\n", 
 			i, ft->cached_dentry[i],
 			ft->cached_dentry_hits[i]);
-	fprintf(stderr, "      dentry_cache: %lx\n", (ulong)ft->dentry_cache);
-	fprintf(stderr, "dentry_cache_index: %d\n", ft->dentry_cache_index);
-	fprintf(stderr, "dentry_cache_fills: %ld\n", ft->dentry_cache_fills);
+	fprintf(fp, "      dentry_cache: %lx\n", (ulong)ft->dentry_cache);
+	fprintf(fp, "dentry_cache_index: %d\n", ft->dentry_cache_index);
+	fprintf(fp, "dentry_cache_fills: %ld\n", ft->dentry_cache_fills);
 
         for (i = 0; i < INODE_CACHE; i++)
-                fprintf(stderr, "  cached_inode[%2d]: %lx (%ld)\n",
+                fprintf(fp, "  cached_inode[%2d]: %lx (%ld)\n",
                         i, ft->cached_inode[i],
                         ft->cached_inode_hits[i]);
-        fprintf(stderr, "       inode_cache: %lx\n", (ulong)ft->inode_cache);
-        fprintf(stderr, " inode_cache_index: %d\n", ft->inode_cache_index);
-        fprintf(stderr, " inode_cache_fills: %ld\n", ft->inode_cache_fills);
+        fprintf(fp, "       inode_cache: %lx\n", (ulong)ft->inode_cache);
+        fprintf(fp, " inode_cache_index: %d\n", ft->inode_cache_index);
+        fprintf(fp, " inode_cache_fills: %ld\n", ft->inode_cache_fills);
 
 show_hit_rates:
         if (ft->file_cache_fills) {
                 for (i = fhits = 0; i < FILE_CACHE; i++)
                         fhits += ft->cached_file_hits[i];
 
-                fprintf(stderr, "     file hit rate: %2ld%% (%ld of %ld)\n",
+                fprintf(fp, "     file hit rate: %2ld%% (%ld of %ld)\n",
                         (fhits * 100)/ft->file_cache_fills,
                         fhits, ft->file_cache_fills);
 	} 
@@ -1835,7 +1910,7 @@
                 for (i = dhits = 0; i < DENTRY_CACHE; i++)
                         dhits += ft->cached_dentry_hits[i];
 
-		fprintf(stderr, "   dentry hit rate: %2ld%% (%ld of %ld)\n",
+		fprintf(fp, "   dentry hit rate: %2ld%% (%ld of %ld)\n",
 			(dhits * 100)/ft->dentry_cache_fills,
 			dhits, ft->dentry_cache_fills);
 	}
@@ -1844,7 +1919,7 @@
                 for (i = ihits = 0; i < INODE_CACHE; i++)
                         ihits += ft->cached_inode_hits[i];
 
-		fprintf(stderr, "    inode hit rate: %2ld%% (%ld of %ld)\n",
+		fprintf(fp, "    inode hit rate: %2ld%% (%ld of %ld)\n",
                         (ihits * 100)/ft->inode_cache_fills,
                         ihits, ft->inode_cache_fills);
 	}
@@ -1998,8 +2073,9 @@
 open_files_dump(ulong task, int flags, struct reference *ref)
 {
         struct task_context *tc;
-	ulong files_struct_addr;
-	char *files_struct_buf;
+	ulong files_struct_addr; 
+	ulong fdtable_addr = 0;
+	char *files_struct_buf, *fdtable_buf = NULL;
 	ulong fs_struct_addr;
 	char *dentry_buf, *fs_struct_buf;
 	ulong root_dentry, pwd_dentry;
@@ -2012,7 +2088,7 @@
 	ulong fd;
 	ulong file;
 	ulong value;
-	int i, j;
+	int i, j, use_path;
 	int header_printed = 0;
 	char root_pathname[BUFSIZE];
 	char pwd_pathname[BUFSIZE];
@@ -2027,6 +2103,8 @@
 	BZERO(root_pathname, BUFSIZE);
 	BZERO(pwd_pathname, BUFSIZE);
 	files_struct_buf = GETBUF(SIZE(files_struct));
+	if (VALID_STRUCT(fdtable))
+		fdtable_buf = GETBUF(SIZE(fdtable));
 	fill_task_struct(task);
 
 	sprintf(files_header, " FD%s%s%s%s%s%s%sTYPE%sPATH\n",
@@ -2051,7 +2129,12 @@
                 readmem(fs_struct_addr, KVADDR, fs_struct_buf, SIZE(fs_struct), 
 			"fs_struct buffer", FAULT_ON_ERROR);
 
-		root_dentry = ULONG(fs_struct_buf + OFFSET(fs_struct_root));
+		use_path = (MEMBER_TYPE("fs_struct", "root") == TYPE_CODE_STRUCT);
+		if (use_path)
+			root_dentry = ULONG(fs_struct_buf + OFFSET(fs_struct_root) +
+				OFFSET(path_dentry));
+		else
+			root_dentry = ULONG(fs_struct_buf + OFFSET(fs_struct_root));
 
 		if (root_dentry) {
 			if (VALID_MEMBER(fs_struct_rootmnt)) {
@@ -2059,13 +2142,23 @@
                         		OFFSET(fs_struct_rootmnt));
 				get_pathname(root_dentry, root_pathname, 
 					BUFSIZE, 1, vfsmnt);
+			} else if (use_path) {
+				vfsmnt = ULONG(fs_struct_buf +
+					OFFSET(fs_struct_root) +
+					OFFSET(path_mnt));
+				get_pathname(root_dentry, root_pathname, 
+					BUFSIZE, 1, vfsmnt);
 			} else {
 				get_pathname(root_dentry, root_pathname, 
 					BUFSIZE, 1, 0);
 			}
 		}
 
-		pwd_dentry = ULONG(fs_struct_buf + OFFSET(fs_struct_pwd));
+		if (use_path)
+			pwd_dentry = ULONG(fs_struct_buf + OFFSET(fs_struct_pwd) +
+				OFFSET(path_dentry));
+		else
+			pwd_dentry = ULONG(fs_struct_buf + OFFSET(fs_struct_pwd));
 
 		if (pwd_dentry) {
 			if (VALID_MEMBER(fs_struct_pwdmnt)) {
@@ -2073,6 +2166,13 @@
                         		OFFSET(fs_struct_pwdmnt));
 				get_pathname(pwd_dentry, pwd_pathname, 
 					BUFSIZE, 1, vfsmnt);
+			} else if (use_path) {
+				vfsmnt = ULONG(fs_struct_buf +
+					OFFSET(fs_struct_pwd) +
+					OFFSET(path_mnt));
+				get_pathname(pwd_dentry, pwd_pathname, 
+					BUFSIZE, 1, vfsmnt);
+
 			} else {
 				get_pathname(pwd_dentry, pwd_pathname, 
 					BUFSIZE, 1, 0);
@@ -2107,24 +2207,45 @@
 
 	files_struct_addr = ULONG(tt->task_struct + OFFSET(task_struct_files));
 
-        if (files_struct_addr) {
-                readmem(files_struct_addr, KVADDR, files_struct_buf, 
-			SIZE(files_struct), "files_struct buffer", 
-			FAULT_ON_ERROR); 
-
-		max_fdset = INT(files_struct_buf + 
+	if (files_struct_addr) {
+		readmem(files_struct_addr, KVADDR, files_struct_buf,
+			SIZE(files_struct), "files_struct buffer",
+			FAULT_ON_ERROR);
+	
+		if (VALID_MEMBER(files_struct_max_fdset)) {
+			max_fdset = INT(files_struct_buf +
 			OFFSET(files_struct_max_fdset));
 
-		max_fds = INT(files_struct_buf + 
-                        OFFSET(files_struct_max_fds));
-        } 
+			max_fds = INT(files_struct_buf +
+			OFFSET(files_struct_max_fds));
+		}
+	}
 
-	if (!files_struct_addr || max_fdset == 0 || max_fds == 0) {
+	if (VALID_MEMBER(files_struct_fdt)) {
+		fdtable_addr = ULONG(files_struct_buf + OFFSET(files_struct_fdt));
+
+		if (fdtable_addr) {
+			readmem(fdtable_addr, KVADDR, fdtable_buf,
+	 			SIZE(fdtable), "fdtable buffer", FAULT_ON_ERROR); 
+			if (VALID_MEMBER(fdtable_max_fdset))
+				max_fdset = INT(fdtable_buf +
+					OFFSET(fdtable_max_fdset));
+			else
+				max_fdset = -1;
+			max_fds = INT(fdtable_buf +
+        	                OFFSET(fdtable_max_fds));
+		}
+	}
+
+	if ((VALID_MEMBER(files_struct_fdt) && !fdtable_addr) || 
+	    !files_struct_addr || max_fdset == 0 || max_fds == 0) {
 		if (ref) {
 			if (ref->cmdflags & FILES_REF_FOUND)
 				fprintf(fp, "\n");
 		} else
 			fprintf(fp, "No open files\n");
+		if (fdtable_buf)
+			FREEBUF(fdtable_buf);
 		FREEBUF(files_struct_buf);
 		return;
 	}
@@ -2146,8 +2267,12 @@
 		}
         }
 
-	open_fds_addr = ULONG(files_struct_buf + 
-		OFFSET(files_struct_open_fds));
+	if (VALID_MEMBER(fdtable_open_fds))
+		open_fds_addr = ULONG(fdtable_buf +
+			OFFSET(fdtable_open_fds));
+	else
+		open_fds_addr = ULONG(files_struct_buf +
+			OFFSET(files_struct_open_fds));
 
 	if (open_fds_addr) {
 		if (VALID_MEMBER(files_struct_open_fds_init) && 
@@ -2157,16 +2282,21 @@
 			        OFFSET(files_struct_open_fds_init),
 				&open_fds, sizeof(fd_set));
 		else
-			readmem(open_fds_addr, KVADDR, &open_fds, 
-				sizeof(fd_set), "files_struct open_fds", 
+			readmem(open_fds_addr, KVADDR, &open_fds,
+				sizeof(fd_set), "fdtable open_fds",
 				FAULT_ON_ERROR);
 	} 
 
-	fd = ULONG(files_struct_buf + OFFSET(files_struct_fd));
+	if (VALID_MEMBER(fdtable_fd))
+		fd = ULONG(fdtable_buf + OFFSET(fdtable_fd));
+	else
+		fd = ULONG(files_struct_buf + OFFSET(files_struct_fd));
 
 	if (!open_fds_addr || !fd) {
                 if (ref && (ref->cmdflags & FILES_REF_FOUND))
                 	fprintf(fp, "\n");
+		if (fdtable_buf)
+			FREEBUF(fdtable_buf);
 		FREEBUF(files_struct_buf);
 		return;
 	}
@@ -2175,7 +2305,8 @@
 	for (;;) {
 		unsigned long set;
 		i = j * __NFDBITS;
-		if (i >= max_fdset || i >= max_fds)
+		if (((max_fdset >= 0) && (i >= max_fdset)) || 
+		    (i >= max_fds))
 			 break;
 		set = open_fds.__fds_bits[j++];
 		while (set) {
@@ -2187,7 +2318,7 @@
 				if (ref && file) {
 					open_tmpfile();
                                         if (file_dump(file, 0, 0, i,
-                                            DUMP_FULL_NAME)) {
+                                            DUMP_FULL_NAME|DUMP_EMPTY_FILE)) {
 						BZERO(buf4, BUFSIZE);
 						rewind(pc->tmpfile);
 						fgets(buf4, BUFSIZE, 
@@ -2205,8 +2336,8 @@
 						fprintf(fp, files_header);
 						header_printed = 1;
 					}
-					file_dump(file, 0, 0, i,
-						  DUMP_FULL_NAME);
+					file_dump(file, 0, 0, i, 
+						DUMP_FULL_NAME|DUMP_EMPTY_FILE);
 				}
 			}
 			i++;
@@ -2220,6 +2351,8 @@
 	if (ref && (ref->cmdflags & FILES_REF_FOUND))
 		fprintf(fp, "\n");
 
+	if (fdtable_buf)
+		FREEBUF(fdtable_buf);
 	FREEBUF(files_struct_buf);
 }
 
@@ -2248,6 +2381,8 @@
 		}
 
         	for (i = 1; i < 4; i++) {
+			if (STREQ(arglist[i], "?"))
+				continue;
         		vaddr = htol(arglist[i], FAULT_ON_ERROR, NULL);
         		if (vaddr == ref->hexval) 
         			return TRUE;
@@ -2401,16 +2536,60 @@
 		dentry = ULONG(file_buf + OFFSET(file_f_dentry));
 	}
 
-	if (!dentry) 
+	if (!dentry) {
+		if (flags & DUMP_EMPTY_FILE) {
+			fprintf(fp, "%3d%s%s%s%s%s%s%s%s%s%s\n",
+				fd,
+				space(MINSPACE),
+				mkstring(buf1, VADDR_PRLEN, 
+				CENTER|RJUST|LONG_HEX, 
+				MKSTR(file)),
+				space(MINSPACE),
+				mkstring(buf2, VADDR_PRLEN, 
+				CENTER|LONG_HEX|ZERO_FILL, 
+				MKSTR(dentry)),
+				space(MINSPACE),
+				mkstring(buf3, VADDR_PRLEN, 
+				CENTER, 
+				"?"),
+				space(MINSPACE),
+				"?   ",
+				space(MINSPACE),
+				"?");
+			return TRUE;
+		}
 		return FALSE;
+	}
 
 	if (!inode) {
 		dentry_buf = fill_dentry_cache(dentry);
 		inode = ULONG(dentry_buf + OFFSET(dentry_d_inode));
 	}
 
-	if (!inode) 
+	if (!inode) { 
+		if (flags & DUMP_EMPTY_FILE) {
+			fprintf(fp, "%3d%s%s%s%s%s%s%s%s%s%s\n",
+				fd,
+				space(MINSPACE),
+				mkstring(buf1, VADDR_PRLEN, 
+				CENTER|RJUST|LONG_HEX, 
+				MKSTR(file)),
+				space(MINSPACE),
+				mkstring(buf2, VADDR_PRLEN, 
+				CENTER|RJUST|LONG_HEX, 
+				MKSTR(dentry)),
+				space(MINSPACE),
+				mkstring(buf3, VADDR_PRLEN, 
+				CENTER|LONG_HEX|ZERO_FILL, 
+				MKSTR(inode)),
+				space(MINSPACE),
+				"?   ",
+				space(MINSPACE),
+				"?");
+			return TRUE;
+		}
 		return FALSE;
+	}
 
 	inode_buf = fill_inode_cache(inode);
 
@@ -2494,6 +2673,20 @@
 }
 
 /*
+ *  Get the vfsmnt associated with a file.
+ */
+ulong
+file_to_vfsmnt(ulong file)
+{
+	char *file_buf;
+	ulong vfsmnt;
+
+	file_buf = fill_file_cache(file);
+	vfsmnt = ULONG(file_buf + OFFSET(file_f_vfsmnt));
+	return vfsmnt;
+}
+
+/*
  * get_pathname() fills in a pathname string for an ending dentry
  * See __d_path() in the kernel for help fixing problems.
  */
@@ -3069,7 +3262,7 @@
 	char modname1[BUFSIZE];
 	char modname2[BUFSIZE];
 	char *name;
-	int use_module;
+	int use_module, crashbuiltin;
 	struct stat stat1, stat2;
 
 	pc->flags |= DEVMEM;
@@ -3077,7 +3270,7 @@
 		goto live_report;
 
 	pc->live_memsrc = "/dev/mem";
-	use_module = FALSE;
+	use_module = crashbuiltin = FALSE;
 
 	if (file_exists("/dev/mem", &stat1) &&
 	    file_exists(pc->memory_device, &stat2) &&
@@ -3114,6 +3307,10 @@
 		}
 
 		pclose(pipe);
+
+		if (!use_module && file_exists("/dev/crash", &stat1) && 
+		    S_ISCHR(stat1.st_mode))
+			crashbuiltin = TRUE;
 	}
 
 	if (use_module) {
@@ -3124,6 +3321,15 @@
 		pc->live_memsrc = pc->memory_device;
 	}
 
+	if (crashbuiltin) {
+		pc->flags &= ~DEVMEM;
+		pc->flags |= CRASHBUILTIN;
+		pc->readmem = read_memory_device;
+		pc->writemem = write_memory_device;
+		pc->live_memsrc = pc->memory_device;
+		pc->memory_module = NULL;
+	}
+
 live_report:
 	if (CRASHDEBUG(1)) 
 		fprintf(fp, "get_live_memory_source: %s\n", pc->live_memsrc);
@@ -3305,10 +3511,11 @@
 /*
  *  If we're here, the memory driver module is being requested:
  *
- *   1. If the module is not already loaded, insmod it.
- *   2. Determine the misc driver minor device number that it was assigned.
- *   3. Create (or verify) the device file.
- *   4. Then just open it.
+ *   1. If /dev/crash is built into the kernel, just open it.
+ *   2. If the module is not already loaded, insmod it.
+ *   3. Determine the misc driver minor device number that it was assigned.
+ *   4. Create (or verify) the device file.
+ *   5. Then just open it.
  */ 
 
 static int 
@@ -3316,10 +3523,14 @@
 {
 	dev_t dev;
 
+	if (pc->flags & CRASHBUILTIN)
+		goto open_device;
+
 	if (!memory_driver_module_loaded(NULL)) {
 	    	if (!insmod_memory_driver_module()) 
 			return FALSE;
-	}
+	} else
+		pc->flags |= MODPRELOAD;
 
 	if (!get_memory_driver_dev(&dev)) 
 		return FALSE;
@@ -3327,6 +3538,7 @@
 	if (!create_memory_device(dev)) 
 		return FALSE;
 
+open_device:
 	if ((pc->mfd = open(pc->memory_device, O_RDONLY)) < 0) { 
 		error(INFO, "%s: open: %s\n", pc->memory_device, 
 			strerror(errno));
@@ -3378,10 +3590,14 @@
 #define RADIX_TREE_MAP_SHIFT  6
 #define RADIX_TREE_MAP_SIZE  (1UL << RADIX_TREE_MAP_SHIFT)
 #define RADIX_TREE_MAP_MASK  (RADIX_TREE_MAP_SIZE-1)
+#define RADIX_TREE_TAGS         2
+#define RADIX_TREE_TAG_LONGS    \
+	((RADIX_TREE_MAP_SIZE + BITS_PER_LONG - 1) / BITS_PER_LONG)
 
 struct radix_tree_node {
         unsigned int    count;
         void            *slots[RADIX_TREE_MAP_SIZE];
+	unsigned long	tags[RADIX_TREE_TAGS][RADIX_TREE_TAG_LONGS];
 };
 
 /*
@@ -3533,16 +3749,15 @@
 radix_tree_lookup(ulong root_rnode, ulong index, int height)
 {
 	unsigned int shift;
-	struct radix_tree_node **slot;
+	void *slot;
 	struct radix_tree_node slotbuf;
-	void **kslotp, **uslotp;
 
 	shift = (height-1) * RADIX_TREE_MAP_SHIFT;
-	kslotp = (void **)root_rnode;
+
+	readmem(root_rnode, KVADDR, &slot, sizeof(void *),
+		"radix_tree_root rnode", FAULT_ON_ERROR);
 
 	while (height > 0) {
-		readmem((ulong)kslotp, KVADDR, &slot, sizeof(void *),
-			"radix_tree_node slot", FAULT_ON_ERROR);
 
 		if (slot == NULL)
 			return NULL;
@@ -3551,15 +3766,13 @@
 			sizeof(struct radix_tree_node),
 			"radix_tree_node struct", FAULT_ON_ERROR);
 
-		uslotp = (void **)
-		    (slotbuf.slots + ((index >> shift) & RADIX_TREE_MAP_MASK));
-		kslotp = *uslotp;
-
+		slot = slotbuf.slots[((index >> shift) & RADIX_TREE_MAP_MASK)];
+		
 		shift -= RADIX_TREE_MAP_SHIFT;
 		height--;
 	}
 
-	return (void *) kslotp;
+	return slot;
 }
 
 int
@@ -3575,3 +3788,29 @@
 
 	return TRUE;
 }
+
+static int
+match_file_string(char *filename, char *string, char *buffer)
+{
+	int found;
+	char command[BUFSIZE];
+	FILE *pipe;
+
+
+	sprintf(command, "/usr/bin/strings %s", filename);
+        if ((pipe = popen(command, "r")) == NULL) {
+                error(INFO, "%s: %s\n", filename, strerror(errno));
+                return FALSE;
+        }
+
+        found = FALSE;
+        while (fgets(buffer, BUFSIZE-1, pipe)) {
+                if (strstr(buffer, string)) {
+                        found = TRUE;
+                        break;
+                }
+        }
+        pclose(pipe);
+
+	return found;
+}
--- crash/cmdline.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/cmdline.c	2009-01-26 14:54:24.000000000 -0500
@@ -1,8 +1,8 @@
 /* cmdline.c - core analysis suite
  *
  * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc.
- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson
- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 David Anderson
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 Red Hat, Inc. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -34,6 +34,7 @@
 static void wait_for_children(ulong);
 #define ZOMBIES_ONLY (1)
 #define ALL_CHILDREN (2)
+int shell_command(char *);
 
 #define READLINE_LIBRARY
 
@@ -70,15 +71,45 @@
 	 *       program invocation.
 	 *    4. from a terminal.
 	 *    5. from a pipe, if stdin is a pipe rather than a terminal.
+	 *
+	 *  But first, handle the interruption of an input file caused
+	 *  by a FATAL error in one of its commands.
+	 *
 	 */
-	if (pc->flags & RCHOME_IFILE)  
+	if (pc->ifile_in_progress) {
+		switch (pc->ifile_in_progress)
+		{
+		case RCHOME_IFILE:
+			pc->flags |= INIT_IFILE|RCHOME_IFILE;
+			sprintf(pc->command_line, "< %s/.%src", 
+				pc->home, pc->program_name);
+			break;
+		case RCLOCAL_IFILE:
+			sprintf(pc->command_line, "< .%src", pc->program_name);
+			pc->flags |= INIT_IFILE|RCLOCAL_IFILE;
+			break;
+		case CMDLINE_IFILE:
+			sprintf(pc->command_line, "< %s", pc->input_file);
+			pc->flags |= INIT_IFILE|CMDLINE_IFILE;
+			break;
+		case RUNTIME_IFILE:
+			sprintf(pc->command_line, "%s", pc->runtime_ifile_cmd);
+			pc->flags |= IFILE_ERROR;
+			break;
+		default:
+			error(FATAL, "invalid input file\n");
+		}
+	} else if (pc->flags & RCHOME_IFILE) {
                 sprintf(pc->command_line, "< %s/.%src", 
 			pc->home, pc->program_name);
-	else if (pc->flags & RCLOCAL_IFILE) 
+		pc->flags |= INIT_IFILE;
+	} else if (pc->flags & RCLOCAL_IFILE) { 
                 sprintf(pc->command_line, "< .%src", pc->program_name);
-	else if (pc->flags & CMDLINE_IFILE) 
+		pc->flags |= INIT_IFILE;
+	} else if (pc->flags & CMDLINE_IFILE) {
 		sprintf(pc->command_line, "< %s", pc->input_file);
-	else if (pc->flags & TTY) {
+		pc->flags |= INIT_IFILE;
+	} else if (pc->flags & TTY) {
 		if (!(pc->readline = readline(pc->prompt))) {
 			args[0] = NULL;
 			fprintf(fp, "\n");
@@ -276,6 +307,108 @@
 }
 
 /*
+ *  Pager arguments.
+ */
+
+static char *less_argv[5] = {
+	"/usr/bin/less",
+	"-E",
+	"-X",
+        "-Ps -- MORE --  forward\\: <SPACE>, <ENTER> or j  backward\\: b or k  quit\\: q",
+	NULL
+};
+
+static char *more_argv[2] = {
+	"/bin/more",
+	NULL
+};
+
+static char **CRASHPAGER_argv = NULL;
+
+int
+CRASHPAGER_valid(void)
+{
+	int i, c;
+	char *env, *CRASHPAGER_buf;
+	char *arglist[MAXARGS];
+
+	if (CRASHPAGER_argv)
+		return TRUE;
+
+	if (!(env = getenv("CRASHPAGER")))
+		return FALSE;
+
+	if (strstr(env, "|") || strstr(env, "<") || strstr(env, ">")) {	
+		error(INFO, 
+		    "CRASHPAGER ignored: contains invalid character: \"%s\"\n", 
+			env);
+		return FALSE;
+	}
+
+	if ((CRASHPAGER_buf = (char *)malloc(strlen(env)+1)) == NULL)
+		return FALSE;
+
+	strcpy(CRASHPAGER_buf, env);
+
+	if (!(c = parse_line(CRASHPAGER_buf, arglist)) ||
+	    !file_exists(arglist[0], NULL) || access(arglist[0], X_OK) || 
+	    !(CRASHPAGER_argv = (char **)malloc(sizeof(char *) * (c+1)))) {
+		free(CRASHPAGER_buf);
+		if (strlen(env))
+			error(INFO, 
+		    		"CRASHPAGER ignored: \"%s\"\n", env);
+		return FALSE;
+	}
+
+	for  (i = 0; i < c; i++)
+		CRASHPAGER_argv[i] = arglist[i];
+	CRASHPAGER_argv[i] = NULL;
+	
+	return TRUE;
+}
+
+/*
+ *  Set up a command string buffer for error/help output.
+ */
+char *
+setup_scroll_command(void)
+{
+	char *buf;
+	long i, len;
+
+	if (!(pc->flags & SCROLL))
+		return NULL;
+
+	switch (pc->scroll_command)
+	{
+	case SCROLL_LESS:
+ 		buf = GETBUF(strlen(less_argv[0])+1);
+		strcpy(buf, less_argv[0]);
+		break;
+	case SCROLL_MORE:
+ 		buf = GETBUF(strlen(more_argv[0])+1);
+		strcpy(buf, more_argv[0]);
+		break;
+	case SCROLL_CRASHPAGER:
+		for (i = len = 0; CRASHPAGER_argv[i]; i++)
+			len += strlen(CRASHPAGER_argv[i])+1;
+
+		buf = GETBUF(len);
+		
+        	for  (i = 0; CRASHPAGER_argv[i]; i++) {
+			sprintf(&buf[strlen(buf)], "%s%s", 
+				i ? " " : "",
+				CRASHPAGER_argv[i]);
+		}
+		break;
+	default:
+		return NULL;
+        }
+
+	return buf;
+}
+
+/*
  *  Parse the command line for pipe or redirect characters:  
  *
  *   1. if a "|" character is found, popen() what comes after it, and 
@@ -349,10 +482,8 @@
 			if (LASTCHAR(p) == '|')
 				error(FATAL_RESTART, "pipe to nowhere?\n");
 
-			if (pc->redirect & REDIRECT_SHELL_COMMAND) {
-				system(p);
-				return REDIRECT_SHELL_COMMAND;
-			} 
+			if (pc->redirect & REDIRECT_SHELL_COMMAND)
+				return shell_command(p);
 
                         if ((pipe = popen(p, "w")) == NULL) {
                                 error(INFO, "cannot open pipe\n");
@@ -415,6 +546,9 @@
                                 return REDIRECT_FAILURE;
                         }
 
+			if (pc->flags & IFILE_ERROR)
+				append = TRUE;
+
         		if ((ofile = 
 			    fopen(p, append ? "a+" : "w+")) == NULL) {
                 		error(INFO, "unable to open %s\n", p);
@@ -464,10 +598,13 @@
 		switch (pc->scroll_command)
 		{
 		case SCROLL_LESS:
-			strcpy(pc->pipe_command, "/usr/bin/less");
+			strcpy(pc->pipe_command, less_argv[0]);
 			break;
 		case SCROLL_MORE:
-			strcpy(pc->pipe_command, "/bin/more");
+			strcpy(pc->pipe_command, more_argv[0]);
+			break;
+		case SCROLL_CRASHPAGER:
+			strcpy(pc->pipe_command, CRASHPAGER_argv[0]);
 			break;
 		}
 
@@ -691,16 +828,13 @@
 void
 cmdline_init(void)
 {
-	int fd;
+	int fd = 0;
 
 	/*
 	 *  Stash a copy of the original termios setup. 
          *  Build a raw version for quick use for each command entry.
 	 */ 
-	if (isatty(fileno(stdin))) {
-        	if ((fd = open("/dev/tty", O_RDONLY)) < 0) 
-			error(FATAL, "/dev/tty: %s\n", strerror(errno));
-                
+        if (isatty(fileno(stdin)) && ((fd = open("/dev/tty", O_RDONLY)) >= 0)) {
 		if (tcgetattr(fd, &pc->termios_orig) == -1) 
 			error(FATAL, "tcgetattr /dev/tty: %s\n", 
 				strerror(errno));
@@ -724,8 +858,10 @@
 		readline_init();
         }
         else {
-        	fprintf(fp, pc->flags & SILENT ? 
-			"" : "    NOTE: stdin: not a tty\n");
+		if (fd < 0)
+			error(INFO, "/dev/tty: %s\n", strerror(errno));
+		if (!(pc->flags & SILENT))
+			fprintf(fp, "NOTE: stdin: not a tty\n\n");
                 fflush(fp);
 		pc->flags &= ~TTY;
         }
@@ -839,13 +975,15 @@
 restore_sanity(void)
 {
 	int fd, waitstatus;
+        struct extension_table *ext;
+	struct command_table_entry *cp;
 
         if (pc->stdpipe) {
 		close(fileno(pc->stdpipe));
                 pc->stdpipe = NULL;
 		if (pc->stdpipe_pid && PID_ALIVE(pc->stdpipe_pid)) {
 			while (!waitpid(pc->stdpipe_pid, &waitstatus, WNOHANG))
-				;
+				stall(1000);
 		}
 		pc->stdpipe_pid = 0;
         }
@@ -855,12 +993,16 @@
 		console("wait for redirect %d->%d to finish...\n",
 			pc->pipe_shell_pid, pc->pipe_pid);
 		if (pc->pipe_pid)
-			while (PID_ALIVE(pc->pipe_pid)) 
+			while (PID_ALIVE(pc->pipe_pid)) {
 				waitpid(pc->pipe_pid, &waitstatus, WNOHANG);
+				stall(1000);
+			}
                 if (pc->pipe_shell_pid)
-		        while (PID_ALIVE(pc->pipe_shell_pid)) 
+		        while (PID_ALIVE(pc->pipe_shell_pid)) {
                         	waitpid(pc->pipe_shell_pid, 
 					&waitstatus, WNOHANG);
+				stall(1000);
+			}
 		pc->pipe_pid = 0;
 	}
 	if (pc->ifile_pipe) {
@@ -872,12 +1014,16 @@
                     (FROM_INPUT_FILE|REDIRECT_TO_PIPE|REDIRECT_PID_KNOWN))) {
 			console("wait for redirect %d->%d to finish...\n",
 				pc->pipe_shell_pid, pc->pipe_pid);
-                	while (PID_ALIVE(pc->pipe_pid))
+                	while (PID_ALIVE(pc->pipe_pid)) {
 				waitpid(pc->pipe_pid, &waitstatus, WNOHANG);
+				stall(1000);
+			}
                         if (pc->pipe_shell_pid) 
-                                while (PID_ALIVE(pc->pipe_shell_pid))
+                                while (PID_ALIVE(pc->pipe_shell_pid)) {
                                         waitpid(pc->pipe_shell_pid,
                                                 &waitstatus, WNOHANG);
+					stall(1000);
+				}
 			if (pc->redirect & (REDIRECT_MULTI_PIPE))
 				wait_for_children(ALL_CHILDREN);
 		}
@@ -918,13 +1064,20 @@
 
 	wait_for_children(ZOMBIES_ONLY);
 
-	pc->flags &= ~(RUNTIME_IFILE|_SIGINT_);
+	pc->flags &= ~(INIT_IFILE|RUNTIME_IFILE|IFILE_ERROR|_SIGINT_|PLEASE_WAIT);
 	pc->sigint_cnt = 0;
 	pc->redirect = 0;
 	pc->pipe_command[0] = NULLCHAR;
 	pc->pipe_pid = 0;
 	pc->pipe_shell_pid = 0;
 	pc->sbrk = sbrk(0);
+	if ((pc->curcmd_flags & (UD2A_INSTRUCTION|BAD_INSTRUCTION)) ==
+		(UD2A_INSTRUCTION|BAD_INSTRUCTION))
+		error(WARNING, "A (bad) instruction was noted in last disassembly.\n"
+                     "         Use \"dis -b [number]\" to set/restore the number of\n"
+                     "         encoded bytes to skip after a ud2a (BUG) instruction.\n");
+	pc->curcmd_flags = 0;
+	pc->curcmd_private = 0;
 
 	restore_gdb_sanity();
 
@@ -942,6 +1095,16 @@
 	clear_vma_cache();
 	clear_active_set();
 
+	/*
+	 *  Call the cleanup() function of any extension.
+	 */
+        for (ext = extension_table; ext; ext = ext->next) {
+                for (cp = ext->command_table; cp->name; cp++) {
+                        if (cp->flags & CLEANUP)
+                                (*cp->func)();
+		}
+        }
+
 	if (CRASHDEBUG(4)) {
                 dump_filesys_table(0);
 		dump_vma_cache(0);
@@ -961,6 +1124,8 @@
 {
         int fd;
 
+	pc->flags &= ~IFILE_ERROR;
+
         if (pc->ifile_pipe) {
 		close(fileno(pc->ifile_pipe));
                 pc->ifile_pipe = NULL;
@@ -1076,7 +1241,6 @@
 	} else
 		this = 0;
 
-
         if (pc->flags & RUNTIME_IFILE) {
                 error(INFO, "embedded input files not allowed!\n");
                 return;
@@ -1111,6 +1275,30 @@
         pc->flags |= RUNTIME_IFILE;
 	incoming_fp = fp;
 
+	/*
+	 *  Handle runtime commands that use input files.
+	 */
+	if ((pc->ifile_in_progress = this) == 0) {
+		if (!pc->runtime_ifile_cmd) {
+			if (!(pc->runtime_ifile_cmd = (char *)malloc(BUFSIZE))) {
+				error(INFO, 
+				    "cannot malloc input file command line buffer\n");
+				return;
+			}
+			BZERO(pc->runtime_ifile_cmd, BUFSIZE);
+		}
+		if (!strlen(pc->runtime_ifile_cmd))
+			strcpy(pc->runtime_ifile_cmd, pc->orig_line);
+		pc->ifile_in_progress = RUNTIME_IFILE;
+	}
+
+	/*
+	 *  If there's an offset, then there was a FATAL error caused
+	 *  by the last command executed from the input file.
+	 */
+	if (pc->ifile_offset)
+		fseek(pc->ifile, (long)pc->ifile_offset, SEEK_SET);
+
         while (fgets(buf, BUFSIZE-1, pc->ifile)) {
                 /*
                  *  Restore normal environment.
@@ -1120,6 +1308,8 @@
         	BZERO(pc->command_line, BUFSIZE);
         	BZERO(pc->orig_line, BUFSIZE);
 
+		pc->ifile_offset = ftell(pc->ifile);
+
 		if (STRNEQ(buf, "#") || STREQ(buf, "\n"))
 			continue;
 
@@ -1168,6 +1358,10 @@
         fclose(pc->ifile);
         pc->ifile = NULL;
         pc->flags &= ~RUNTIME_IFILE;
+	pc->ifile_offset = 0;
+	if (pc->runtime_ifile_cmd)
+		BZERO(pc->runtime_ifile_cmd, BUFSIZE);
+	pc->ifile_in_progress = 0;
 }
 
 /*
@@ -1706,15 +1900,20 @@
 		error(FATAL, 
 		"scrolling must be turned off when repeating an input file\n");
 
+	pc->curcmd_flags |= REPEAT;
+
 	while (TRUE) {
 		optind = 0;
-console("exec_command...\n");
+
 		exec_command();
 		free_all_bufs();
 
 		if (received_SIGINT() || !output_open())
 			break;
 
+		if (!(pc->curcmd_flags & REPEAT))
+			break;
+
 		if (delay)
 			sleep(delay);
 
@@ -1829,19 +2028,6 @@
  *  Set up the standard output pipe using whichever was selected during init.
  */
 
-static char *less_argv[5] = {
-	"/usr/bin/less",
-	"-E",
-	"-X",
-        "-Ps -- MORE --  forward\\: <SPACE>, <ENTER> or j  backward\\: b or k  quit\\: q",
-	NULL
-};
-
-static char *more_argv[2] = {
-	"/bin/more",
-	NULL
-};
-
 static int
 setup_stdpipe(void)
 {
@@ -1877,6 +2063,9 @@
                 case SCROLL_MORE:
                         strcpy(pc->pipe_command, more_argv[0]);
                         break;
+		case SCROLL_CRASHPAGER:
+                        strcpy(pc->pipe_command, CRASHPAGER_argv[0]);
+                        break;
                 }
 
 		if (CRASHDEBUG(2))
@@ -1905,10 +2094,16 @@
 			path = more_argv[0];
 			execv(path, more_argv);
 			break;
+
+		case SCROLL_CRASHPAGER:
+			path = CRASHPAGER_argv[0];
+			execv(path, CRASHPAGER_argv);
+			break;
 		}
 
-		perror("child execv failed"); 
-		return(clean_exit(1));
+		perror(path); 
+		fprintf(stderr, "execv of scroll command failed\n");
+		exit(1);
 	}
 }
 
@@ -1939,5 +2134,38 @@
 			    fprintf(fp, "wait_for_children: reaped %d\n", pid);
                 	break;
         	}
+		stall(1000);
 	}
 }
+
+/*
+ *  Run an escaped shell command, redirecting the output to
+ *  the current output file.
+ */
+int
+shell_command(char *cmd)
+{
+	FILE *pipe;
+	char buf[BUFSIZE];
+
+	if ((pipe = popen(cmd, "r")) == NULL) {
+		error(INFO, "cannot open pipe: %s\n", cmd);
+		pc->redirect &= ~REDIRECT_SHELL_COMMAND;
+                pc->redirect |= REDIRECT_FAILURE;
+                return REDIRECT_FAILURE;
+        }
+
+        while (fgets(buf, BUFSIZE, pipe))
+		fprintf(fp, buf);
+        pclose(pipe);
+
+	return REDIRECT_SHELL_COMMAND;
+}
+
+int minimal_functions(char *name)
+{
+	return  STREQ("log", name) || STREQ("help",name) || \
+		STREQ("dis", name) || STREQ("q", name)   || \
+		STREQ("sym", name) || STREQ("exit", name)|| \
+		STREQ("rd", name)  || STREQ("eval", name) ; 
+}
--- crash/lkcd_common.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/lkcd_common.c	2007-11-19 10:48:18.000000000 -0500
@@ -3,8 +3,8 @@
  * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc.
  * Copyright (C) 2002 Silicon Graphics, Inc. 
  * Copyright (C) 2002 Free Software Foundation, Inc.
- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson
- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2002, 2003, 2004, 2005, 2007 David Anderson
+ * Copyright (C) 2002, 2003, 2004, 2005, 2007 Red Hat, Inc. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -53,6 +53,8 @@
 
 struct lkcd_environment lkcd_environment = { 0 };
 struct lkcd_environment *lkcd = &lkcd_environment;
+static int uncompress_errloc;
+static int uncompress_recover(unsigned char *, ulong, unsigned char *, ulong);
 
 ulonglong 
 fix_lkcd_address(ulonglong addr)
@@ -62,7 +64,7 @@
 
     for (i = 0; i < lkcd->fix_addr_num; i++) {
 	if ( (addr >=lkcd->fix_addr[i].task) && 
-		(addr <= lkcd->fix_addr[i].task + STACKSIZE())){
+		(addr < lkcd->fix_addr[i].task + STACKSIZE())){
 
 	    offset = addr - lkcd->fix_addr[i].task;
 	    addr = lkcd->fix_addr[i].saddr + offset;
@@ -208,6 +210,7 @@
 
 	case LKCD_DUMP_V8:
 	case LKCD_DUMP_V9:
+	case LKCD_DUMP_V10:
 		lkcd->version = LKCD_DUMP_V8;
 		return TRUE;
 
@@ -623,6 +626,10 @@
 {
         static int i = 0;
 
+        if (pc->flags & SILENT) {
+                return;
+        }
+
         switch (++i%4) {
         case 0:
                 lkcd_print("|\b");
@@ -667,6 +674,8 @@
 {
 	uint64_t zone, page;
 	int ii, ret;
+	int max_zones;
+	struct physmem_zone *zones;
 
 	zone = paddr & lkcd->zone_mask;
 
@@ -693,19 +702,21 @@
 		lkcd->num_zones++;
 	}
 
+retry:
 	/* find the zone */
 	for (ii=0; ii < lkcd->num_zones; ii++) {
 		if (lkcd->zones[ii].start == zone) {
 			if (lkcd->zones[ii].pages[page].offset != 0) {
 			   if (lkcd->zones[ii].pages[page].offset != off) {
-				error(INFO, "conflicting page: zone %lld, "
+				if (CRASHDEBUG(1))
+				    error(INFO, "LKCD: conflicting page: zone %lld, "
 					"page %lld: %lld, %lld != %lld\n",
 					(unsigned long long)zone, 
 					(unsigned long long)page, 
 					(unsigned long long)paddr, 
 					(unsigned long long)off,
 					(unsigned long long)lkcd->zones[ii].pages[page].offset);
-			  	abort();
+				return -1;
 			   }
 			   ret = 0;
 			} else {
@@ -734,8 +745,20 @@
 			ret = 1;
 			lkcd->num_zones++;
 		} else {
-			lkcd_print("fixme, need to add more zones (ZONE_ALLOC)\n");
-			exit(1);
+			/* need to expand zone */
+			max_zones = lkcd->max_zones * 2;
+			zones = malloc(max_zones * sizeof(struct physmem_zone));
+			if (!zones) {
+				return -1; /* This should be fatal */
+			}
+			BZERO(zones, max_zones * sizeof(struct physmem_zone));
+			memcpy(zones, lkcd->zones,
+				lkcd->max_zones * sizeof(struct physmem_zone));
+			free(lkcd->zones);
+
+			lkcd->zones = zones;
+			lkcd->max_zones = max_zones;
+			goto retry;
 		}
 	}
 
@@ -765,11 +788,32 @@
 }
 
 
+#ifdef IA64
+
+int
+lkcd_get_kernel_start(ulong *addr)
+{
+	if (!addr)
+		return 0;
+
+	switch (lkcd->version)
+	{
+        case LKCD_DUMP_V8:
+        case LKCD_DUMP_V9:
+		return lkcd_get_kernel_start_v8(addr);
+
+	default:
+		return 0;
+	}
+}
+
+#endif
+
 
 int
 lkcd_lseek(physaddr_t paddr)
 {
-        long i;
+        long i = 0;
 	int err;
         int eof;
         void *dp;
@@ -814,7 +858,7 @@
     lseek(lkcd->fd, lkcd->page_offset_max, SEEK_SET);
     eof = FALSE;
     while (!eof) {
-	if( (i%2048) == 0) {
+	if( (i++%2048) == 0) {
 	    lkcd_speedo();
 	}
 
@@ -1164,40 +1208,103 @@
         return 1;
 }
 
+/* Returns the bit offset if it's able to correct, or negative if not */
+static int
+uncompress_recover(unsigned char *dest, ulong destlen,
+    unsigned char *source, ulong sourcelen)
+{
+        int byte, bit;
+        ulong retlen = destlen;
+        int good_decomp = 0, good_rv = -1;
+
+        /* Generate all single bit errors */
+        if (sourcelen > 16384) {
+                lkcd_print("uncompress_recover: sourcelen %ld too long\n",
+                    sourcelen);
+                return(-1);
+        }
+        for (byte = 0; byte < sourcelen; byte++) {
+                for (bit = 0; bit < 8; bit++) {
+                        source[byte] ^= (1 << bit);
+
+                        if (uncompress(dest, &retlen, source, sourcelen) == Z_OK &&
+                            retlen == destlen) {
+                                good_decomp++;
+                                lkcd_print("good for flipping byte %d bit %d\n",
+                                    byte, bit);
+                                good_rv = bit + byte * 8;
+                        }
+
+                        /* Put it back */
+                        source[byte] ^= (1 << bit);
+                }
+        }
+        if (good_decomp == 0) {
+                lkcd_print("Could not correct gzip errors.\n");
+                return -2;
+        } else if (good_decomp > 1) {
+                lkcd_print("Too many valid gzip decompressions: %d.\n", good_decomp);
+                return -3;
+        } else {
+                source[good_rv >> 8] ^= 1 << (good_rv % 8);
+                uncompress(dest, &retlen, source, sourcelen);
+                source[good_rv >> 8] ^= 1 << (good_rv % 8);
+                return good_rv;
+        }
+}
+
+
 /*
  *  Uncompress a gzip'd buffer.
+ *
+ *  Returns FALSE on error.  If set, then
+ *    a non-negative value of uncompress_errloc indicates the location of
+ *    a single-bit error, and the data may be used.
  */
 static int 
 lkcd_uncompress_gzip(unsigned char *dest, ulong destlen, 
 	unsigned char *source, ulong sourcelen)
 {
         ulong retlen = destlen;
+        int rc = FALSE;
 
 	switch (uncompress(dest, &retlen, source, sourcelen)) 
 	{
 	case Z_OK:
 		if (retlen == destlen)
-			return TRUE;
+                        rc = TRUE;
+                        break;
 
 		lkcd_print("uncompress: returned length not page size: %ld\n",
 				retlen);
-		return FALSE;
+                rc = FALSE;
+                break;
 
 	case Z_MEM_ERROR:
 		lkcd_print("uncompress: Z_MEM_ERROR (not enough memory)\n");
-		return FALSE;
+                rc = FALSE;
+                break;
 
 	case Z_BUF_ERROR:
 		lkcd_print("uncompress: "
 			"Z_BUF_ERROR (not enough room in output buffer)\n");
-		return FALSE;
+                rc = FALSE;
+                break;
 
 	case Z_DATA_ERROR:
 		lkcd_print("uncompress: Z_DATA_ERROR (input data corrupted)\n");
-		return FALSE;
+                rc = FALSE;
+                break;
+        default:
+                rc = FALSE;
+                break;
 	}
 
-	return FALSE;
+        if (rc == FALSE) {
+                uncompress_errloc =
+                    uncompress_recover(dest, destlen, source, sourcelen);
+        }
+	return rc;
 }
 
 
@@ -1252,8 +1359,9 @@
 	dp_flags = lkcd->get_dp_flags();
 	dp_address = lkcd->get_dp_address();
 
-        if (dp_flags & LKCD_DUMP_END)
+        if (dp_flags & LKCD_DUMP_END) {
                 return LKCD_DUMPFILE_END;
+        }
 
 	if ((lkcd->flags & LKCD_VALID) && (page > lkcd->total_pages)) 
 		lkcd->total_pages = page;
@@ -1315,3 +1423,15 @@
 	}
 }
 
+int
+get_lkcd_regs_for_cpu(struct bt_info *bt, ulong *eip, ulong *esp)
+{
+	switch (lkcd->version) {
+	case LKCD_DUMP_V8:
+	case LKCD_DUMP_V9:
+		return get_lkcd_regs_for_cpu_v8(bt, eip, esp);
+	default:
+		return -1;
+	}
+}
+
--- crash/extensions.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/extensions.c	2007-09-24 15:49:04.000000000 -0400
@@ -18,9 +18,6 @@
 #include "defs.h"
 #include <dlfcn.h>
 
-static void load_extension(char *);
-static void unload_extension(char *);
-
 #define DUMP_EXTENSIONS   (0)
 #define LOAD_EXTENSION    (1)
 #define UNLOAD_EXTENSION  (2)
@@ -110,6 +107,7 @@
 void 
 dump_extension_table(int verbose)
 {
+	int i;
 	struct extension_table *ext;
 	struct command_table_entry *cp;
 	char buf[BUFSIZE];
@@ -120,23 +118,37 @@
 
 	if (verbose) {
        		for (ext = extension_table; ext; ext = ext->next) {
-                        fprintf(fp, "     filename: %s\n", ext->filename);
-                        fprintf(fp, "       handle: %lx\n", (ulong)ext->handle);
-                        fprintf(fp, "command_table: %lx (", 
-				(ulong)ext->command_table);
-                        for (others = 0, cp = ext->command_table; cp->name;cp++)
-                                fprintf(fp, "%s%s%s", others++ ? " " : "",
-                                        cp->name, cp->help_data ? "*" : "");
-                        fprintf(fp, ")\n");
-			fprintf(fp, "        flags: %lx (", ext->flags);
+                        fprintf(fp, "        filename: %s\n", ext->filename);
+                        fprintf(fp, "          handle: %lx\n", (ulong)ext->handle);
+
+
+			fprintf(fp, "           flags: %lx (", ext->flags);
 			others = 0;
 			if (ext->flags & REGISTERED)
 				fprintf(fp, "%sREGISTERED", others++ ?
 					"|" : "");
 			fprintf(fp, ")\n");
-                        fprintf(fp, "         next: %lx\n", (ulong)ext->next);
-                        fprintf(fp, "         prev: %lx\n%s", 
-				(ulong)ext->prev, ext->next ? "\n" : "");
+                        fprintf(fp, "            next: %lx\n", (ulong)ext->next);
+                        fprintf(fp, "            prev: %lx\n", (ulong)ext->prev);
+
+                        for (i = 0, cp = ext->command_table; cp->name; cp++, i++) {
+                        	fprintf(fp, "command_table[%d]: %lx\n", i, (ulong)cp); 
+				fprintf(fp, "                  name: %s\n", cp->name);
+				fprintf(fp, "                  func: %lx\n", (ulong)cp->func);
+				fprintf(fp, "             help_data: %lx\n", (ulong)cp->help_data); 
+				fprintf(fp, "                 flags: %lx (", cp->flags);
+				others = 0;
+				if (cp->flags & CLEANUP)
+					fprintf(fp, "%sCLEANUP", others++ ? "|" : "");
+				if (cp->flags & REFRESH_TASK_TABLE)
+					fprintf(fp, "%sREFRESH_TASK_TABLE", others++ ? "|" : "");
+				if (cp->flags & HIDDEN_COMMAND)
+					fprintf(fp, "%sHIDDEN_COMMAND", others++ ? "|" : "");
+				fprintf(fp, ")\n");
+			}
+
+			if (ext->next) 
+				fprintf(fp, "\n");
 		}
 		return;
 	}
@@ -171,7 +183,7 @@
 /*
  *  Load an extension library.
  */
-static void 
+void 
 load_extension(char *lib)
 {
 	struct extension_table *ext;
@@ -208,7 +220,7 @@
         *  _init() function before dlopen() returns below.
 	*/
 	pc->curext = ext;
-	ext->handle = dlopen(ext->filename, RTLD_NOW); 
+	ext->handle = dlopen(ext->filename, RTLD_NOW|RTLD_GLOBAL); 
 
 	if (!ext->handle) {
 		strcpy(buf, dlerror());
@@ -252,7 +264,7 @@
 /*
  *  Unload all, or as specified, extension libraries.
  */
-static void 
+void 
 unload_extension(char *lib)
 {
         struct extension_table *ext;
@@ -342,4 +354,23 @@
 	pc->curext->flags |= REGISTERED;             /* Mark of approval */
 }
 
+/* 
+ *  Hooks for sial.
+ */
+unsigned long 
+get_curtask(void) 
+{ 
+	return CURRENT_TASK(); 
+}
+
+char *
+crash_global_cmd(void) 
+{ 
+	return pc->curcmd;
+}
 
+struct command_table_entry *
+crash_cmd_table(void) 
+{ 
+	return pc->cmd_table; 
+}
--- crash/s390.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/s390.c	2009-01-15 14:07:23.000000000 -0500
@@ -1,9 +1,9 @@
 /* s390.c - core analysis suite
  *
  * Copyright (C) 2001, 2002 Mission Critical Linux, Inc.
- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson
- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved.
- * Copyright (C) 2005 Michael Holzheu, IBM Corporation
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006 David Anderson
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2005, 2006 Michael Holzheu, IBM Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -21,17 +21,6 @@
 #define S390_WORD_SIZE    4
 #define S390_ADDR_MASK    0x7fffffff
 
-#define S390_PAGE_SHIFT   12
-#define S390_PAGE_SIZE    (1UL << S390_PAGE_SHIFT)
-#define S390_PAGE_MASK     (~(S390_PAGE_SIZE-1))
-
-#define S390_PGDIR_SHIFT  20
-#define S390_PGDIR_SIZE   (1UL << S390_PGDIR_SHIFT)
-#define S390_PGDIR_MASK   (~(S390_PGDIR_SIZE-1))
-
-#define S390_PTRS_PER_PGD       2048
-#define S390_PTRS_PER_PTE       256
-
 #define S390_PMD_BASE_MASK      (~((1UL<<6)-1))
 #define S390_PT_BASE_MASK       S390_PMD_BASE_MASK
 #define S390_PAGE_BASE_MASK     (~((1UL<<12)-1))
@@ -44,26 +33,10 @@
 #define S390_PAGE_INVALID       0x400    /* HW invalid */
 #define S390_PAGE_INVALID_MASK  0x601ULL /* for linux 2.6 */
 #define S390_PAGE_INVALID_NONE  0x401ULL /* for linux 2.6 */
-#define S390_PAGE_TABLE_LEN     0xf      /* only full page-tables */
-#define S390_PAGE_TABLE_INV     0x20     /* invalid page-table */
 
 #define S390_PTE_INVALID_MASK   0x80000900
 #define S390_PTE_INVALID(x) ((x) & S390_PTE_INVALID_MASK)
 
-#define S390_PMD_INVALID_MASK   0x80000000
-#define S390_PMD_INVALID(x) ((x) & S390_PMD_INVALID_MASK)
-
-/* pgd/pmd/pte query macros */
-#define s390_pmd_none(x) ((x) & S390_PAGE_TABLE_INV)
-#define s390_pmd_bad(x) (((x) & (~S390_PMD_BASE_MASK & \
-                                 ~S390_PAGE_TABLE_INV)) != \
-                                 S390_PAGE_TABLE_LEN)
-
-#define s390_pte_none(x) (((x) & (S390_PAGE_INVALID | S390_RO_S390 | \
-                                  S390_PAGE_PRESENT)) == \
-                                  S390_PAGE_INVALID)
-
-
 #define ASYNC_STACK_SIZE  STACKSIZE() // can be 4096 or 8192
 #define KERNEL_STACK_SIZE STACKSIZE() // can be 4096 or 8192
 
@@ -73,8 +46,6 @@
  * declarations of static functions
  */
 static void s390_print_lowcore(char*, struct bt_info*,int);
-static unsigned long s390_pgd_offset(unsigned long, unsigned long);
-static unsigned long s390_pte_offset(unsigned long, unsigned long);
 static int s390_kvtop(struct task_context *, ulong, physaddr_t *, int);
 static int s390_uvtop(struct task_context *, ulong, physaddr_t *, int);
 static int s390_vtop(unsigned long, ulong, physaddr_t*, int);
@@ -86,7 +57,6 @@
 static ulong s390_processor_speed(void);
 static int s390_eframe_search(struct bt_info *);
 static void s390_back_trace_cmd(struct bt_info *);
-static void s390_back_trace(struct gnu_request *, struct bt_info *);
 static void s390_dump_irq(int);
 static void s390_get_stack_frame(struct bt_info *, ulong *, ulong *);
 static int s390_dis_filter(ulong, char *);
@@ -158,7 +128,10 @@
 		machdep->nr_irqs = 0;  /* TBD */
 		machdep->vmalloc_start = s390_vmalloc_start;
 		machdep->dump_irq = s390_dump_irq;
-		machdep->hz = HZ;
+		if (!machdep->hz)
+			machdep->hz = HZ;
+		machdep->section_size_bits = _SECTION_SIZE_BITS;
+		machdep->max_physmem_bits = _MAX_PHYSMEM_BITS;
 		break;
 
 	case POST_INIT:
@@ -178,8 +151,6 @@
 	fprintf(fp, "              flags: %lx (", machdep->flags);
 	if (machdep->flags & KSYMS_START)
 		fprintf(fp, "%sKSYMS_START", others++ ? "|" : "");
-	if (machdep->flags & SYSRQ)
-		fprintf(fp, "%sSYSRQ", others++ ? "|" : "");
 	fprintf(fp, ")\n");
 
 	fprintf(fp, "             kvbase: %lx\n", machdep->kvbase);
@@ -226,23 +197,12 @@
 	fprintf(fp, "                pmd: %lx\n", (ulong)machdep->pmd);
 	fprintf(fp, "               ptbl: %lx\n", (ulong)machdep->ptbl);
 	fprintf(fp, "       ptrs_per_pgd: %d\n", machdep->ptrs_per_pgd);
+	fprintf(fp, "   max_physmem_bits: %ld\n", machdep->max_physmem_bits);
+	fprintf(fp, "  section_size_bits: %ld\n", machdep->section_size_bits);
 	fprintf(fp, "           machspec: %lx\n", (ulong)machdep->machspec);
 }
 
 /*
- * Check if address is in the vmalloc area
- */
-int
-s390_IS_VMALLOC_ADDR(ulong addr)
-{
-	static unsigned long high_memory = 0;
-	if(!high_memory){
-		high_memory = s390_vmalloc_start();
-	}
-	return (addr > high_memory);
-}
-
-/*
  * Check if address is in context's address space
  */
 static int 
@@ -293,7 +253,7 @@
 /*
  * Check if page is mapped
  */
-inline int 
+static inline int 
 s390_pte_present(unsigned long x)
 {
 	if(THIS_KERNEL_VERSION >= LINUX(2,6,0)) {
@@ -307,60 +267,87 @@
 /*
  * page table traversal functions
  */
-static unsigned long 
-s390_pgd_offset(unsigned long pgd_base, unsigned long vaddr)
-{
-	unsigned long pgd_off, pmd_base;
-
-	pgd_off = ((vaddr >> S390_PGDIR_SHIFT) & (S390_PTRS_PER_PGD - 1))
-		* S390_WORD_SIZE;
-	readmem(pgd_base + pgd_off, PHYSADDR, &pmd_base,sizeof(long),
-		"pgd_base",FAULT_ON_ERROR);
-	return pmd_base;
-}
 
-unsigned long s390_pte_offset(unsigned long pte_base, unsigned long vaddr)
+/* Segment table traversal function */
+static ulong _kl_sg_table_deref_s390(ulong vaddr, ulong table, int len)
 {
-	unsigned pte_off, pte_val;
+	ulong offset, entry;
+
+	offset = ((vaddr >> 20) & 0x7ffUL) * 4;
+	if (offset >= (len + 1)*64)
+		/* Offset is over the table limit. */
+		return 0;
+	readmem(table + offset, KVADDR, &entry, sizeof(entry), "entry",
+		FAULT_ON_ERROR);
 
-	pte_off = ((vaddr >> S390_PAGE_SHIFT) & (S390_PTRS_PER_PTE - 1))
-		* S390_WORD_SIZE;
-	readmem(pte_base + pte_off, PHYSADDR, &pte_val, sizeof(long),
-		"pte_val",FAULT_ON_ERROR);
-	return pte_val;
+	/*
+	 * Check if the segment table entry could be read and doesn't have
+	 * any of the reserved bits set.
+	 */
+	if (entry & 0x80000000UL)
+		return 0;
+	/* Check if the segment table entry has the invalid bit set. */
+	if (entry & 0x40UL)
+		return 0;
+	/* Segment table entry is valid and well formed. */
+	return entry;
+}
+
+/* Page table traversal function */
+static ulong _kl_pg_table_deref_s390(ulong vaddr, ulong table, int len)
+{
+	ulong offset, entry;
+
+	offset = ((vaddr >> 12) & 0xffUL) * 4;
+	if (offset >= (len + 1)*64)
+		/* Offset is over the table limit. */
+		return 0;
+	readmem(table + offset, KVADDR, &entry, sizeof(entry), "entry",
+		FAULT_ON_ERROR);
+	/*
+	 * Check if the page table entry could be read and doesn't have
+	 * any of the reserved bits set.
+	 */
+	if (entry & 0x80000900UL)
+		return 0;
+	/* Check if the page table entry has the invalid bit set. */
+	if (entry & 0x400UL)
+		return 0;
+	/* Page table entry is valid and well formed. */
+	return entry;
 }
 
-/*
- * Generic vtop function for user and kernel addresses
- */
+/* lookup virtual address in page tables */
 static int
-s390_vtop(unsigned long pgd_base, ulong kvaddr, physaddr_t *paddr, int verbose)
+s390_vtop(unsigned long table, ulong vaddr, physaddr_t *phys_addr, int verbose)
 {
-	unsigned pte_base, pte_val;
+	ulong entry, paddr;
+	int len;
 
-	/* get the pgd entry */
-	pte_base = s390_pgd_offset(pgd_base,kvaddr);
-	if(S390_PMD_INVALID(pte_base) ||
-	   s390_pmd_bad(pte_base) ||
-	   s390_pmd_none(pte_base)) {
-		*paddr = 0;
-		return FALSE;
-	}
-	/* get the pte */
-	pte_base = pte_base & S390_PT_BASE_MASK;
-	pte_val = s390_pte_offset(pte_base,kvaddr);
-	if(S390_PTE_INVALID(pte_val) ||
-	   s390_pte_none(pte_val)){
-		*paddr = 0;
+	/*
+	 * Get the segment table entry.
+	 * We assume that the segment table length field in the asce
+	 * is set to the maximum value of 127 (which translates to
+	 * a segment table with 2048 entries) and that the addressing
+	 * mode is 31 bit.
+	 */
+	entry = _kl_sg_table_deref_s390(vaddr, table, 127);
+	if (!entry)
 		return FALSE;
-	}
-	if(!s390_pte_present(pte_val)){
-		/* swapped out */
-		*paddr = pte_val;
+	table = entry & 0x7ffffc00UL;
+	len = entry & 0xfUL;
+
+	/* Get the page table entry */
+	entry = _kl_pg_table_deref_s390(vaddr, table, len);
+	if (!entry)
 		return FALSE;
-	}
-	*paddr = (pte_val & S390_PAGE_BASE_MASK) |
-		  (kvaddr & (~(S390_PAGE_MASK)));
+
+	/* Isolate the page origin from the page table entry. */
+	paddr = entry & 0x7ffff000UL;
+
+	/* Add the page offset and return the final value. */
+	*phys_addr = paddr + (vaddr & 0xfffUL);
+
 	return TRUE;
 }
 
@@ -422,6 +409,10 @@
 	if (STREQ(name, "Letext") || STREQ(name, "gcc2_compiled."))
 		return FALSE;
 
+	/* reject L2^B symbols */
+	if (strstr(name, "L2\002") == name)
+	    	return FALSE;
+
 	/* throw away all symbols containing a '.' */
 	for(i = 0; i < strlen(name);i++){
 		if(name[i] == '.')
@@ -483,7 +474,7 @@
 		return FALSE;
 	}
 	fprintf(fp,"PTE      PHYSICAL  FLAGS\n");
-	fprintf(fp,"%08x %08x",pte, pte & S390_PAGE_BASE_MASK);
+	fprintf(fp,"%08lx %08lx",pte, pte & S390_PAGE_BASE_MASK);
 	fprintf(fp,"  (");
 	if(pte & S390_PAGE_INVALID)
 		fprintf(fp,"INVALID ");
@@ -510,7 +501,7 @@
 /*
  * returns cpu number of task
  */ 
-int 
+static int 
 s390_cpu_of_task(unsigned long task)
 {
 	int cpu;
@@ -551,12 +542,13 @@
                         return FALSE;
         } else {
 		/* Linux 2.6 */
-		unsigned long runqueue_addr, runqueue_offset, per_cpu_offset;
+		unsigned long runqueue_addr, runqueue_offset;
 		unsigned long cpu_offset, per_cpu_offset_addr, running_task;
-		char runqueue[4096];
+		char *runqueue;
 		int cpu;
 
 		cpu = s390_cpu_of_task(task);
+		runqueue = GETBUF(SIZE(runqueue));
 
 		runqueue_offset=symbol_value("per_cpu__runqueues");
 		per_cpu_offset_addr=symbol_value("__per_cpu_offset");
@@ -564,10 +556,10 @@
 			&cpu_offset, sizeof(long),"per_cpu_offset",
 			FAULT_ON_ERROR);
 		runqueue_addr=runqueue_offset + cpu_offset;
-		readmem(runqueue_addr,KVADDR,&runqueue,sizeof(runqueue),
+		readmem(runqueue_addr,KVADDR,runqueue,SIZE(runqueue),
 			"runqueue", FAULT_ON_ERROR);
-		running_task = *((unsigned long*)&runqueue[MEMBER_OFFSET(
-				"runqueue", "curr")]);
+		running_task = ULONG(runqueue + OFFSET(runqueue_curr));
+		FREEBUF(runqueue);
 		if(running_task == task)
 			return TRUE;
 		else
@@ -700,7 +692,7 @@
 		} else if(skip_first_frame){
 			skip_first_frame=0;
 		} else {
-			fprintf(fp," #%i [%08x] ",i,backchain);
+			fprintf(fp," #%i [%08lx] ",i,backchain);
 			fprintf(fp,"%s at %x\n", closest_symbol(r14), r14);
 			if (bt->flags & BT_LINE_NUMBERS)
 				s390_dump_line_number(r14);
@@ -716,13 +708,15 @@
 				frame_size = stack_base - old_backchain 
 					     + KERNEL_STACK_SIZE;
 			} else {
-				frame_size = backchain - old_backchain;
+				frame_size = MIN((backchain - old_backchain),
+					(stack_base - old_backchain +
+					KERNEL_STACK_SIZE));
 			}
 			for(j=0; j< frame_size; j+=4){
 				if(j % 16 == 0){
-					fprintf(fp,"\n%08x: ",old_backchain+j);
+					fprintf(fp,"\n%08lx: ",old_backchain+j);
 				}
-				fprintf(fp," %08x",ULONG(&stack[old_backchain -
+				fprintf(fp," %08lx",ULONG(&stack[old_backchain -
 							 stack_base + j]));
 			}
 			fprintf(fp,"\n\n");
@@ -771,10 +765,10 @@
 		return;
 	}
 	fprintf(fp," LOWCORE INFO:\n");
-	fprintf(fp,"  -psw      : %#010x %#010x\n", tmp[0],
+	fprintf(fp,"  -psw      : %#010lx %#010lx\n", tmp[0],
 		tmp[1]);
 	if(show_symbols){
-		fprintf(fp,"  -function : %s at %x\n", 
+		fprintf(fp,"  -function : %s at %lx\n", 
 	       		closest_symbol(tmp[1] & S390_ADDR_MASK), 
 			tmp[1] & S390_ADDR_MASK);
 		if (bt->flags & BT_LINE_NUMBERS)
@@ -783,12 +777,12 @@
 	ptr = lc + MEMBER_OFFSET("_lowcore","cpu_timer_save_area");
 	tmp[0]=UINT(ptr);
 	tmp[1]=UINT(ptr + S390_WORD_SIZE);
-	fprintf(fp,"  -cpu timer: %#010x %#010x\n", tmp[0],tmp[1]);
+	fprintf(fp,"  -cpu timer: %#010lx %#010lx\n", tmp[0],tmp[1]);
 
 	ptr = lc + MEMBER_OFFSET("_lowcore","clock_comp_save_area");
 	tmp[0]=UINT(ptr);
 	tmp[1]=UINT(ptr + S390_WORD_SIZE);
-	fprintf(fp,"  -clock cmp: %#010x %#010x\n", tmp[0], tmp[1]);
+	fprintf(fp,"  -clock cmp: %#010lx %#010lx\n", tmp[0], tmp[1]);
 
 	fprintf(fp,"  -general registers:\n");
 	ptr = lc + MEMBER_OFFSET("_lowcore","gpregs_save_area");
@@ -796,25 +790,25 @@
 	tmp[1]=ULONG(ptr + S390_WORD_SIZE);
 	tmp[2]=ULONG(ptr + 2 * S390_WORD_SIZE);
 	tmp[3]=ULONG(ptr + 3 * S390_WORD_SIZE);
-	fprintf(fp,"     %#010x %#010x %#010x %#010x\n", 
+	fprintf(fp,"     %#010lx %#010lx %#010lx %#010lx\n", 
 		tmp[0],tmp[1],tmp[2],tmp[3]);
 	tmp[0]=ULONG(ptr + 4 * S390_WORD_SIZE);
 	tmp[1]=ULONG(ptr + 5 * S390_WORD_SIZE);
 	tmp[2]=ULONG(ptr + 6 * S390_WORD_SIZE);
 	tmp[3]=ULONG(ptr + 7 * S390_WORD_SIZE);
-	fprintf(fp,"     %#010x %#010x %#010x %#010x\n", 
+	fprintf(fp,"     %#010lx %#010lx %#010lx %#010lx\n", 
 		tmp[0],tmp[1],tmp[2],tmp[3]);
 	tmp[0]=ULONG(ptr + 8 * S390_WORD_SIZE);
 	tmp[1]=ULONG(ptr + 9 * S390_WORD_SIZE);
 	tmp[2]=ULONG(ptr + 10* S390_WORD_SIZE);
 	tmp[3]=ULONG(ptr + 11* S390_WORD_SIZE);
-	fprintf(fp,"     %#010x %#010x %#010x %#010x\n", 
+	fprintf(fp,"     %#010lx %#010lx %#010lx %#010lx\n", 
 		tmp[0],tmp[1],tmp[2],tmp[3]);
 	tmp[0]=ULONG(ptr + 12* S390_WORD_SIZE);
 	tmp[1]=ULONG(ptr + 13* S390_WORD_SIZE);
 	tmp[2]=ULONG(ptr + 14* S390_WORD_SIZE);
 	tmp[3]=ULONG(ptr + 15* S390_WORD_SIZE);
-	fprintf(fp,"     %#010x %#010x %#010x %#010x\n", 
+	fprintf(fp,"     %#010lx %#010lx %#010lx %#010lx\n", 
 		tmp[0], tmp[1], tmp[2], tmp[3]);
 
 	fprintf(fp,"  -access registers:\n");
@@ -823,25 +817,25 @@
 	tmp[1]=ULONG(ptr + S390_WORD_SIZE);
 	tmp[2]=ULONG(ptr + 2 * S390_WORD_SIZE);
 	tmp[3]=ULONG(ptr + 3 * S390_WORD_SIZE);
-	fprintf(fp,"     %#010x %#010x %#010x %#010x\n", 
+	fprintf(fp,"     %#010lx %#010lx %#010lx %#010lx\n", 
 		tmp[0], tmp[1], tmp[2], tmp[3]);
 	tmp[0]=ULONG(ptr + 4 * S390_WORD_SIZE);
 	tmp[1]=ULONG(ptr + 5 * S390_WORD_SIZE);
 	tmp[2]=ULONG(ptr + 6 * S390_WORD_SIZE);
 	tmp[3]=ULONG(ptr + 7 * S390_WORD_SIZE);
-	fprintf(fp,"     %#010x %#010x %#010x %#010x\n", 
+	fprintf(fp,"     %#010lx %#010lx %#010lx %#010lx\n", 
 		tmp[0], tmp[1], tmp[2], tmp[3]);
 	tmp[0]=ULONG(ptr + 8 * S390_WORD_SIZE);
 	tmp[1]=ULONG(ptr + 9 * S390_WORD_SIZE);
 	tmp[2]=ULONG(ptr + 10* S390_WORD_SIZE);
 	tmp[3]=ULONG(ptr + 11* S390_WORD_SIZE);
-	fprintf(fp,"     %#010x %#010x %#010x %#010x\n", 
+	fprintf(fp,"     %#010lx %#010lx %#010lx %#010lx\n", 
 		tmp[0], tmp[1], tmp[2], tmp[3]);
 	tmp[0]=ULONG(ptr + 12* S390_WORD_SIZE);
 	tmp[1]=ULONG(ptr + 13* S390_WORD_SIZE);
 	tmp[2]=ULONG(ptr + 14* S390_WORD_SIZE);
 	tmp[3]=ULONG(ptr + 15* S390_WORD_SIZE);
-	fprintf(fp,"     %#010x %#010x %#010x %#010x\n", 
+	fprintf(fp,"     %#010lx %#010lx %#010lx %#010lx\n", 
 		tmp[0], tmp[1], tmp[2], tmp[3]);
 
 	fprintf(fp,"  -control registers:\n");
@@ -850,26 +844,26 @@
 	tmp[1]=ULONG(ptr + S390_WORD_SIZE);
 	tmp[2]=ULONG(ptr + 2 * S390_WORD_SIZE);
 	tmp[3]=ULONG(ptr + 3 * S390_WORD_SIZE);
-	fprintf(fp,"     %#010x %#010x %#010x %#010x\n", 
+	fprintf(fp,"     %#010lx %#010lx %#010lx %#010lx\n", 
 		tmp[0], tmp[1], tmp[2], tmp[3]);
 	tmp[0]=ULONG(ptr + 4 * S390_WORD_SIZE);
 	tmp[1]=ULONG(ptr + 5 * S390_WORD_SIZE);
 	tmp[2]=ULONG(ptr + 6 * S390_WORD_SIZE);
 	tmp[3]=ULONG(ptr + 7 * S390_WORD_SIZE);
-	fprintf(fp,"     %#010x %#010x %#010x %#010x\n", 
+	fprintf(fp,"     %#010lx %#010lx %#010lx %#010lx\n", 
 		tmp[0], tmp[1], tmp[2], tmp[3]);
 
 	tmp[0]=ULONG(ptr);
 	tmp[1]=ULONG(ptr + S390_WORD_SIZE);
 	tmp[2]=ULONG(ptr + 2 * S390_WORD_SIZE);
 	tmp[3]=ULONG(ptr + 3 * S390_WORD_SIZE);
-	fprintf(fp,"     %#010x %#010x %#010x %#010x\n", 
+	fprintf(fp,"     %#010lx %#010lx %#010lx %#010lx\n", 
 		tmp[0], tmp[1], tmp[2], tmp[3]);
 	tmp[0]=ULONG(ptr + 4 * S390_WORD_SIZE);
 	tmp[1]=ULONG(ptr + 5 * S390_WORD_SIZE);
 	tmp[2]=ULONG(ptr + 6 * S390_WORD_SIZE);
 	tmp[3]=ULONG(ptr + 7 * S390_WORD_SIZE);
-	fprintf(fp,"     %#010x %#010x %#010x %#010x\n", 
+	fprintf(fp,"     %#010lx %#010lx %#010lx %#010lx\n", 
 		tmp[0], tmp[1], tmp[2], tmp[3]);
 
 	ptr = lc + MEMBER_OFFSET("_lowcore","floating_pt_save_area");
@@ -878,8 +872,8 @@
 	tmp[1]=ULONG(ptr + 2 * S390_WORD_SIZE);
 	tmp[2]=ULONG(ptr + 4 * S390_WORD_SIZE);
 	tmp[3]=ULONG(ptr + 6 * S390_WORD_SIZE);
-	fprintf(fp,"     %#018llx %#018llx\n", tmp[0], tmp[1]);
-	fprintf(fp,"     %#018llx %#018llx\n", tmp[2], tmp[3]);
+	fprintf(fp,"     %#018lx %#018lx\n", tmp[0], tmp[1]);
+	fprintf(fp,"     %#018lx %#018lx\n", tmp[2], tmp[3]);
 }
 
 /*
--- crash/lkcd_vmdump_v1.h.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/lkcd_vmdump_v1.h	2008-02-20 12:12:46.000000000 -0500
@@ -1,8 +1,8 @@
 /* lkcd_vmdump_v1.h - core analysis suite
  *
  * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc.
- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson
- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006 David Anderson
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006 Red Hat, Inc. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -34,6 +34,7 @@
 #include <linux/utsname.h>              /* for utsname structure            */
 #endif
 #ifndef IA64
+typedef unsigned int u32;
 #include <asm/ptrace.h>                 /* for pt_regs                      */
 #endif
 
@@ -114,8 +115,12 @@
 
 	/* the dump registers */
 #ifndef IA64
+#ifndef S390
+#ifndef S390X
 	struct pt_regs       dh_regs;
 #endif
+#endif
+#endif
 
 	/* the address of the current task */
 	struct task_struct  *dh_current_task;
--- crash/xendump.h.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/xendump.h	2007-03-15 09:06:19.000000000 -0400
@@ -0,0 +1,177 @@
+/* 
+ * xendump.h
+ *
+ * Copyright (C) 2006, 2007 David Anderson
+ * Copyright (C) 2006, 2007 Red Hat, Inc. All rights reserved.
+ *
+ * This software may be freely redistributed under the terms of the
+ * GNU General Public License.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <endian.h>
+#include <elf.h>
+
+#define XC_SAVE_SIGNATURE  "LinuxGuestRecord"
+#define XC_CORE_MAGIC      0xF00FEBED
+#define XC_CORE_MAGIC_HVM  0xF00FEBEE
+
+/*
+ *  From xenctrl.h, but probably not on most host machines.
+ */
+typedef struct xc_core_header {
+    unsigned int xch_magic;
+    unsigned int xch_nr_vcpus;
+    unsigned int xch_nr_pages;
+    unsigned int xch_ctxt_offset;
+    unsigned int xch_index_offset;
+    unsigned int xch_pages_offset;
+} xc_core_header_t;
+
+struct pfn_offset_cache {
+	off_t file_offset;
+	ulong pfn;
+	ulong cnt;
+};
+#define PFN_TO_OFFSET_CACHE_ENTRIES  (5000)
+
+struct elf_index_pfn {
+	ulong index;
+	ulong pfn;
+};
+#define INDEX_PFN_COUNT (128)
+
+struct last_batch {
+	ulong index;
+	ulong start;
+	ulong end;
+	ulong accesses;
+	ulong duplicates; 
+};
+
+struct xendump_data {
+        ulong flags;       /* XENDUMP_LOCAL, plus anything else... */
+	int xfd;
+	int pc_next;
+	uint page_size;
+	FILE *ofp;
+	char *page;
+	ulong accesses;
+	ulong cache_hits;
+	ulong redundant;
+	ulong last_pfn;
+	struct pfn_offset_cache *poc;
+
+	struct xc_core_data {
+		int p2m_frames;
+		ulong *p2m_frame_index_list;
+		struct xc_core_header header;
+		int elf_class;
+		uint64_t format_version;
+		off_t elf_strtab_offset;
+		off_t shared_info_offset;
+		off_t ia64_mapped_regs_offset;
+		struct elf_index_pfn elf_index_pfn[INDEX_PFN_COUNT];
+		struct last_batch last_batch;
+		Elf32_Ehdr *elf32;
+		Elf64_Ehdr *elf64;
+	} xc_core;
+
+	struct xc_save_data {
+		ulong nr_pfns;
+		int vmconfig_size;
+		char *vmconfig_buf;
+		ulong *p2m_frame_list;
+		uint pfns_not;
+		off_t pfns_not_offset;
+		off_t vcpu_ctxt_offset;
+		off_t shared_info_page_offset;
+		off_t *batch_offsets;
+		ulong batch_count;
+		ulong *region_pfn_type;
+		ulong ia64_version;
+		ulong *ia64_page_offsets;
+	} xc_save;
+
+	ulong panic_pc;
+	ulong panic_sp;
+};
+
+#define XC_SAVE            (XENDUMP_LOCAL << 1)
+#define XC_CORE_ORIG       (XENDUMP_LOCAL << 2)
+#define XC_CORE_P2M_CREATE (XENDUMP_LOCAL << 3)
+#define XC_CORE_PFN_CREATE (XENDUMP_LOCAL << 4)
+#define XC_CORE_NO_P2M     (XENDUMP_LOCAL << 5)
+#define XC_SAVE_IA64       (XENDUMP_LOCAL << 6)
+#define XC_CORE_64BIT_HOST (XENDUMP_LOCAL << 7)
+#define XC_CORE_ELF        (XENDUMP_LOCAL << 8)
+
+#define MACHINE_BYTE_ORDER()  \
+        (machine_type("X86") || \
+         machine_type("X86_64") || \
+         machine_type("IA64") ? __LITTLE_ENDIAN : __BIG_ENDIAN)
+
+#define BYTE_SWAP_REQUIRED(endian) (endian != MACHINE_BYTE_ORDER())
+
+static inline uint32_t
+swab32(uint32_t x)
+{
+        return (((x & 0x000000ffU) << 24) |
+                ((x & 0x0000ff00U) <<  8) |
+                ((x & 0x00ff0000U) >>  8) |
+                ((x & 0xff000000U) >> 24));
+}
+
+#define MFN_NOT_FOUND (-1)
+#define PFN_NOT_FOUND (-1)
+
+#define INVALID_MFN (~0UL)
+
+/*
+ *  ia64 "xm save" format is completely different than the others.
+ */
+typedef struct xen_domctl_arch_setup {
+    uint64_t flags;      /* XEN_DOMAINSETUP_* */
+/* #ifdef __ia64__ */
+    uint64_t bp;            /* mpaddr of boot param area */
+    uint64_t maxmem;        /* Highest memory address for MDT.  */
+    uint64_t xsi_va;        /* Xen shared_info area virtual address.  */
+    uint32_t hypercall_imm; /* Break imm for Xen hypercalls.  */
+/* #endif */
+} xen_domctl_arch_setup_t;
+
+/*
+ *  xc_core ELF note, which differs from the standard Elf[32|64]_Nhdr
+ *  structure by the additional name field.
+ */
+struct elfnote {
+	uint32_t namesz; 
+	uint32_t descsz;
+	uint32_t type;
+	char name[4]; 
+};
+
+#define XEN_ELFNOTE_DUMPCORE_NONE            0x2000000
+#define XEN_ELFNOTE_DUMPCORE_HEADER          0x2000001
+#define XEN_ELFNOTE_DUMPCORE_XEN_VERSION     0x2000002
+#define XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION  0x2000003
+
+struct xen_dumpcore_elfnote_header_desc {
+	uint64_t xch_magic;
+	uint64_t xch_nr_vcpus;
+	uint64_t xch_nr_pages;
+	uint64_t xch_page_size;
+}; 
+
+#define FORMAT_VERSION_0000000000000001 0x0000000000000001ULL
+
+struct xen_dumpcore_elfnote_format_version_desc {
+	uint64_t version;
+}; 
+
+struct xen_dumpcore_p2m {
+	uint64_t pfn;
+	uint64_t gmfn; 
+};
--- crash/xen_hyper_command.c.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/xen_hyper_command.c	2008-12-03 12:03:24.000000000 -0500
@@ -0,0 +1,1857 @@
+/*
+ *  xen_hyper_command.c
+ *
+ *  Portions Copyright (C) 2006-2007 Fujitsu Limited
+ *  Portions Copyright (C) 2006-2007 VA Linux Systems Japan K.K.
+ *
+ *  Authors: Itsuro Oda <oda@valinux.co.jp>
+ *           Fumihiko Kakuma <kakuma@valinux.co.jp>
+ *
+ *  This file is part of Xencrash.
+ *
+ *  Xencrash is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation (version 2 of the License).
+ *
+ *  Xencrash is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with Xencrash; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA.
+ */
+
+#include "defs.h"
+
+#ifdef XEN_HYPERVISOR_ARCH
+#include "xen_hyper_defs.h"
+
+#ifdef X86
+char *xhregt[] = {
+	"ebx", "ecx", "edx", "esi", "edi", "ebp", "eax", "ds", "es",
+	"fs", "gs", "orig_eax", "eip", "cs", "eflags", "esp", "ss",
+	NULL
+};
+#endif
+
+#ifdef X86_64
+char *xhregt[] = {
+	"r15", "r14", "r13", "r12", "rbp", "rbx", "r11", "r10", "r9", "r8",
+	"rax", "rcx", "rdx", "rsi", "rdi", "orig_rax", "rip", "cs", "eflags",
+	"rsp", "ss", "fs", "gs", "ds", "es", "fs", "gs",
+	NULL
+};
+#endif
+
+#ifdef IA64
+char *xhregt[] = {
+	"aaa", "bbb",
+	NULL
+};
+#endif
+
+static void xen_hyper_do_domain(struct xen_hyper_cmd_args *da);
+static void xen_hyper_do_doms(struct xen_hyper_cmd_args *da);
+static void xen_hyper_show_doms(struct xen_hyper_domain_context *dc);
+static void xen_hyper_do_dumpinfo(ulong flag, struct xen_hyper_cmd_args *dia);
+static void xen_hyper_show_dumpinfo(ulong flag,
+	struct xen_hyper_dumpinfo_context *dic);
+static void xen_hyper_do_pcpus(ulong flag, struct xen_hyper_cmd_args *pca);
+static void xen_hyper_show_pcpus(ulong flag, struct xen_hyper_pcpu_context *pcc);
+static void xen_hyper_do_sched(ulong flag, struct xen_hyper_cmd_args *scha);
+static void xen_hyper_show_sched(ulong flag, struct xen_hyper_sched_context *schc);
+static void xen_hyper_do_vcpu(struct xen_hyper_cmd_args *vca);
+static void xen_hyper_do_vcpus(struct xen_hyper_cmd_args *vca);
+static void xen_hyper_show_vcpus(struct xen_hyper_vcpu_context *vcc);
+static char *xen_hyper_domain_to_type(ulong domain, int *type, char *buf, int verbose);
+static char *xen_hyper_domain_context_to_type(
+	struct xen_hyper_domain_context *dc, int *type, char *buf, int verbose);
+static int xen_hyper_str_to_domain_context(char *string, ulong *value,
+	struct xen_hyper_domain_context **dcp);
+static int xen_hyper_str_to_dumpinfo_context(char *string, ulong *value, struct xen_hyper_dumpinfo_context **dicp);
+static int xen_hyper_strvcpu_to_vcpu_context(char *string, ulong *value,
+	struct xen_hyper_vcpu_context **vccp);
+static int
+xen_hyper_strid_to_vcpu_context(char *strdom, char *strvc, ulong *valdom,
+	ulong *valvc, struct xen_hyper_vcpu_context **vccp);
+static int xen_hyper_str_to_pcpu_context(char *string, ulong *value,
+	struct xen_hyper_pcpu_context **pccp);
+
+/*
+ *  Display domain struct.
+ */
+void
+xen_hyper_cmd_domain(void)
+{
+	struct xen_hyper_cmd_args da;
+	struct xen_hyper_domain_context *dc;
+	ulong val;
+        int c, cnt, type, bogus;
+
+	BZERO(&da, sizeof(struct xen_hyper_cmd_args));
+        while ((c = getopt(argcnt, args, "")) != EOF) {
+                switch(c)
+                {
+                default:
+                        argerrs++;
+                        break;
+                }
+        }
+
+        if (argerrs)
+                cmd_usage(pc->curcmd, SYNOPSIS);
+
+	cnt = bogus = 0;
+        while (args[optind]) {
+		if (IS_A_NUMBER(args[optind])) {
+			type = xen_hyper_str_to_domain_context(args[optind], &val, &dc);
+			switch (type) {
+			case XEN_HYPER_STR_DID:
+			case XEN_HYPER_STR_DOMAIN:
+				da.value[cnt] = val;
+				da.type[cnt] = type;
+				da.addr[cnt] = dc->domain;
+				da.context[cnt] = dc;
+				cnt++;
+				break;
+			case XEN_HYPER_STR_INVALID:
+				error(INFO, "invalid domain or id value: %s\n\n",
+					args[optind]);
+				bogus++;
+			}
+		} else {
+			error(FATAL, "invalid address: %s\n",
+				args[optind]);
+		}
+		optind++;
+	}
+	da.cnt = cnt;
+	if (bogus && !cnt) {
+		return;
+	}
+	
+	xen_hyper_do_domain(&da);
+}
+
+/*
+ *  Do the work requested by xen_hyper_cmd_dom().
+ */
+static void
+xen_hyper_do_domain(struct xen_hyper_cmd_args *da)
+{
+	int i;
+
+	if (da->cnt) {
+		if (da->cnt == 1) {
+			xhdt->last = da->context[0];
+		}
+		for (i = 0; i < da->cnt; i++) {
+			dump_struct("domain", da->addr[i], 0);
+		}
+	} else {
+		dump_struct("domain", xhdt->last->domain, 0);
+	}
+}
+
+/*
+ *  Display domain status.
+ */
+void
+xen_hyper_cmd_doms(void)
+{
+	struct xen_hyper_cmd_args da;
+	struct xen_hyper_domain_context *dc;
+	ulong val;
+        int c, cnt, type, bogus;
+
+	BZERO(&da, sizeof(struct xen_hyper_cmd_args));
+        while ((c = getopt(argcnt, args, "")) != EOF) {
+                switch(c)
+                {
+                default:
+                        argerrs++;
+                        break;
+                }
+        }
+
+        if (argerrs)
+                cmd_usage(pc->curcmd, SYNOPSIS);
+
+	cnt = bogus = 0;
+        while (args[optind]) {
+		if (IS_A_NUMBER(args[optind])) {
+			type = xen_hyper_str_to_domain_context(args[optind], &val, &dc);
+			switch (type) {
+			case XEN_HYPER_STR_DID:
+			case XEN_HYPER_STR_DOMAIN:
+				da.value[cnt] = val;
+				da.type[cnt] = type;
+				da.addr[cnt] = dc->domain;
+				da.context[cnt] = dc;
+				cnt++;
+				break;
+			case XEN_HYPER_STR_INVALID:
+				error(INFO, "invalid domain or id value: %s\n\n",
+					args[optind]);
+				bogus++;
+			}
+		} else {
+			error(FATAL, "invalid address: %s\n",
+				args[optind]);
+		}
+		optind++;
+	}
+	da.cnt = cnt;
+	if (bogus && !cnt) {
+		return;
+	}
+	
+	xen_hyper_do_doms(&da);
+}
+
+/*
+ *  Do the work requested by xen_hyper_cmd_doms().
+ */
+static void
+xen_hyper_do_doms(struct xen_hyper_cmd_args *da)
+{
+	struct xen_hyper_domain_context *dca;
+	char buf1[XEN_HYPER_CMD_BUFSIZE];
+	char buf2[XEN_HYPER_CMD_BUFSIZE];
+	int i;
+
+	sprintf(buf1, "   DID  %s ST T ",
+		mkstring(buf2, VADDR_PRLEN, CENTER|RJUST, "DOMAIN"));
+	mkstring(&buf1[strlen(buf1)], INT_PRLEN, CENTER|RJUST, "MAXPAGE");
+	strncat(buf1, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf1)-1);
+	mkstring(&buf1[strlen(buf1)], INT_PRLEN, CENTER|RJUST, "TOTPAGE");
+	strncat(buf1, " VCPU ", XEN_HYPER_CMD_BUFSIZE-strlen(buf1)-1);
+	mkstring(&buf1[strlen(buf1)], VADDR_PRLEN, CENTER|RJUST, "SHARED_I");
+	strncat(buf1, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf1)-1);
+	mkstring(&buf1[strlen(buf1)], LONG_PRLEN, CENTER|RJUST, "P2M_MFN");
+	fprintf(fp, "%s\n", buf1);
+	if (da->cnt) {
+		for (i = 0; i < da->cnt; i++) {
+			xen_hyper_show_doms(da->context[i]);
+		}
+	} else {
+		for (i = 0, dca=xhdt->context_array; i < XEN_HYPER_NR_DOMAINS();
+			i++, dca++) {
+			xen_hyper_show_doms(dca);
+		}
+	}
+}
+
+static void
+xen_hyper_show_doms(struct xen_hyper_domain_context *dc)
+{
+	char *act, *crash;
+	uint cpuid;
+	int type, i, j;
+	struct xen_hyper_pcpu_context *pcc;
+#if defined(X86) || defined(X86_64)
+	char *shared_info;
+#elif defined(IA64)
+	char *domain_struct;
+	ulong pgd;
+#endif
+	char buf1[XEN_HYPER_CMD_BUFSIZE];
+	char buf2[XEN_HYPER_CMD_BUFSIZE];
+
+	if (!(dc->domain)) {
+		return;
+	}
+
+#if defined(X86) || defined(X86_64)
+	shared_info = GETBUF(XEN_HYPER_SIZE(shared_info));
+	if (dc->shared_info) {
+		if (!readmem(dc->shared_info, KVADDR, shared_info,
+			XEN_HYPER_SIZE(shared_info), "fill_shared_info_struct",
+			ACTIVE() ? (RETURN_ON_ERROR|QUIET) : RETURN_ON_ERROR)) {
+			error(WARNING, "cannot fill shared_info struct.\n");
+			BZERO(shared_info, XEN_HYPER_SIZE(shared_info));
+		}
+	}
+#elif defined(IA64)
+	if ((domain_struct = xen_hyper_read_domain(dc->domain)) == NULL) {
+		error(FATAL, "cannot read domain.\n");
+	}
+#endif
+	act = NULL;
+	for_cpu_indexes(i, cpuid)
+	{
+		pcc = xen_hyper_id_to_pcpu_context(cpuid);
+		for (j = 0; j < dc->vcpu_cnt; j++) {
+			if (pcc->current_vcpu == dc->vcpu[j]) {
+				act = ">";
+				break;
+			}
+		}
+		if (act)	break;
+	}
+	if (act == NULL)	act = " ";
+	if (xht->crashing_vcc && dc->domain == xht->crashing_vcc->domain) {
+		crash = "*";
+	} else {
+		crash = " ";
+	}
+	sprintf(buf1, "%s%s%5d ", act, crash, dc->domain_id);
+	mkstring(&buf1[strlen(buf1)], VADDR_PRLEN, CENTER|LONG_HEX|RJUST, (char *)(dc->domain));
+	strncat(buf1, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf1)-1);
+	sprintf(&buf1[strlen(buf1)], "%s ",
+		xen_hyper_domain_state_string(dc, buf2, !VERBOSE));
+	sprintf(&buf1[strlen(buf1)], "%s ",
+		xen_hyper_domain_context_to_type(dc, &type, buf2, !VERBOSE));
+	mkstring(&buf1[strlen(buf1)], INT_PRLEN, CENTER|INT_HEX|RJUST,
+		MKSTR((long)(dc->max_pages)));
+	strncat(buf1, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf1)-1);
+	mkstring(&buf1[strlen(buf1)], INT_PRLEN, CENTER|INT_HEX|RJUST,
+		MKSTR((long)(dc->tot_pages)));
+	sprintf(&buf1[strlen(buf1)], " %3d  ", dc->vcpu_cnt);
+	mkstring(&buf1[strlen(buf1)], VADDR_PRLEN, CENTER|LONG_HEX|RJUST,
+		MKSTR(dc->shared_info));
+	strncat(buf1, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf1)-1);
+#if defined(X86) || defined(X86_64)
+	if (dc->shared_info) {
+		mkstring(&buf1[strlen(buf1)], LONG_PRLEN, CENTER|LONG_HEX|RJUST,
+			MKSTR(ULONG(shared_info +
+				XEN_HYPER_OFFSET(shared_info_arch) +
+				XEN_HYPER_OFFSET(arch_shared_info_pfn_to_mfn_frame_list_list)))
+		);
+	} else {
+		mkstring(&buf1[strlen(buf1)], LONG_PRLEN, CENTER|RJUST, "----");
+	}
+	FREEBUF(shared_info);
+#elif defined(IA64)
+	pgd = ULONG(domain_struct + XEN_HYPER_OFFSET(domain_arch) +
+		XEN_HYPER_OFFSET(arch_domain_mm) +
+		XEN_HYPER_OFFSET(mm_struct_pgd));
+	if (pgd) {
+		mkstring(&buf1[strlen(buf1)], LONG_PRLEN,
+			CENTER|LONG_HEX|RJUST,
+			MKSTR((pgd - DIRECTMAP_VIRT_START) >> machdep->pageshift));
+	} else {
+		mkstring(&buf1[strlen(buf1)], LONG_PRLEN, CENTER|RJUST, "----");
+	}
+#endif
+
+	fprintf(fp, "%s\n", buf1);
+}
+
+/*
+ * Display ELF Notes information.
+ */
+void
+xen_hyper_cmd_dumpinfo(void)
+{
+	struct xen_hyper_cmd_args dia;
+	ulong flag;
+	ulong val;
+	struct xen_hyper_dumpinfo_context *dic;
+	int c, cnt, type, bogus;
+
+	BZERO(&dia, sizeof(struct xen_hyper_cmd_args));
+	flag= 0;
+        while ((c = getopt(argcnt, args, "rt")) != EOF) {
+                switch(c)
+                {
+		case 't':
+			flag |= XEN_HYPER_DUMPINFO_TIME;
+                        break;
+		case 'r':
+			flag |= XEN_HYPER_DUMPINFO_REGS;
+                        break;
+                default:
+                        argerrs++;
+                        break;
+                }
+        }
+
+        if (argerrs)
+                cmd_usage(pc->curcmd, SYNOPSIS);
+
+	cnt = bogus = 0;
+        while (args[optind]) {
+		if (IS_A_NUMBER(args[optind])) {
+			type = xen_hyper_str_to_dumpinfo_context(args[optind], &val, &dic);
+			switch (type)
+			{
+			case XEN_HYPER_STR_PCID:
+			case XEN_HYPER_STR_ADDR:
+				dia.value[cnt] = val;
+				dia.type[cnt] = type;
+				dia.context[cnt] = dic;
+				cnt++;
+				break;
+
+			case XEN_HYPER_STR_INVALID:
+				error(INFO, "invalid note address or id "
+					"value: %s\n\n", args[optind]);
+				bogus++;
+				break;
+			}
+		} else {
+			error(INFO, "invalid note address or id "
+				"value: %s\n\n", args[optind]);
+		}
+		optind++;
+	}
+	dia.cnt = cnt;
+	if (!cnt && bogus) {
+		return;
+	}
+	
+	xen_hyper_do_dumpinfo(flag, &dia);
+}
+
+/*
+ * Do the work requested by xen_hyper_cmd_dumpinfo().
+ */
+static void
+xen_hyper_do_dumpinfo(ulong flag, struct xen_hyper_cmd_args *dia)
+{
+	struct xen_hyper_dumpinfo_context *dic;
+	char buf[XEN_HYPER_CMD_BUFSIZE];
+	int i, cnt;
+
+	if (dia->cnt) {
+		cnt = dia->cnt;
+	} else {
+		cnt = XEN_HYPER_NR_PCPUS();
+	}
+	for (i = 0; i < cnt; i++) {
+		if (i == 0 || flag & XEN_HYPER_DUMPINFO_REGS ||
+			flag & XEN_HYPER_DUMPINFO_TIME) {
+			if (i) {
+				fprintf(fp, "\n");
+			}
+			sprintf(buf, " PCID ");
+			mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|RJUST, "ENOTE");
+//			sprintf(&buf[strlen(buf)], "  PID   PPID  PGRP  SID");
+			strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1);
+			mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|RJUST, "CORE");
+			if (xhdit->note_ver >= XEN_HYPER_ELF_NOTE_V2) {
+				strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1);
+				mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|RJUST, "XEN_CORE");
+			}
+			if (xhdit->note_ver >= XEN_HYPER_ELF_NOTE_V3) {
+				strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1);
+				mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|RJUST, "XEN_INFO");
+			}
+			fprintf(fp, "%s\n", buf);
+		}
+		if (dia->cnt) {
+			dic = dia->context[i];
+		} else {
+			dic = xen_hyper_id_to_dumpinfo_context(xht->cpu_idxs[i]);
+		}
+		xen_hyper_show_dumpinfo(flag, dic);
+	}
+}
+
+static void
+xen_hyper_show_dumpinfo(ulong flag, struct xen_hyper_dumpinfo_context *dic)
+{
+	char buf[XEN_HYPER_CMD_BUFSIZE];
+	char *note_buf;
+	ulong addr;
+	ulong *regs;
+	long tv_sec, tv_usec;
+	int i, regcnt;
+
+	if (!dic || !dic->note) {
+		return;
+	}
+
+	note_buf = dic->ELF_Prstatus_ptr;
+	sprintf(buf, "%5d ", dic->pcpu_id);
+	mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|LONG_HEX|RJUST,
+		MKSTR(dic->note));
+
+#if 0
+	pid = INT(note_buf + XEN_HYPER_OFFSET(ELF_Prstatus_pr_pid));
+	sprintf(&buf[strlen(buf)], " %5d ", pid);
+	pid = INT(note_buf + XEN_HYPER_OFFSET(ELF_Prstatus_pr_ppid));
+	sprintf(&buf[strlen(buf)], "%5d ", pid);
+	pid = INT(note_buf + XEN_HYPER_OFFSET(ELF_Prstatus_pr_pgrp));
+	sprintf(&buf[strlen(buf)], "%5d ", pid);
+	pid = INT(note_buf + XEN_HYPER_OFFSET(ELF_Prstatus_pr_sid));
+	sprintf(&buf[strlen(buf)], "%5d", pid);
+#endif
+	strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1);
+	mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|LONG_HEX|RJUST,
+		MKSTR(dic->note));
+	if (xhdit->note_ver >= XEN_HYPER_ELF_NOTE_V2) {
+		strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1);
+		mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|LONG_HEX|RJUST,
+		MKSTR(dic->note + xhdit->core_size));
+	}
+	if (xhdit->note_ver >= XEN_HYPER_ELF_NOTE_V3) {
+		strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1);
+		if (xhdit->xen_info_cpu == dic->pcpu_id)
+			mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|LONG_HEX|RJUST,
+			MKSTR(dic->note + xhdit->core_size + xhdit->xen_core_size));
+		else
+			mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|RJUST, "--");
+
+	}
+
+	fprintf(fp, "%s\n", buf);
+
+	if (flag & XEN_HYPER_DUMPINFO_TIME) {
+		sprintf(buf, "             ");
+		mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|RJUST, "tv_sec");
+		strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1);
+		mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|RJUST, "tv_usec");
+		fprintf(fp, "%s\n", buf);
+
+		addr = (ulong)note_buf +
+			XEN_HYPER_OFFSET(ELF_Prstatus_pr_utime);
+		for (i = 0; i < 4; i++, addr += XEN_HYPER_SIZE(ELF_Timeval)) {
+			switch (i)
+			{
+			case 0: 
+				sprintf(buf, "  pr_utime   ");
+				break;
+			case 1: 
+				sprintf(buf, "  pr_stime   ");
+				break;
+			case 2: 
+				sprintf(buf, "  pr_cutime  ");
+				break;
+			case 3: 
+				sprintf(buf, "  pr_cstime  ");
+				break;
+			}
+			tv_sec = LONG(addr +
+				XEN_HYPER_OFFSET(ELF_Timeval_tv_sec));
+			tv_usec = LONG(addr +
+				XEN_HYPER_OFFSET(ELF_Timeval_tv_sec) +
+				XEN_HYPER_OFFSET(ELF_Timeval_tv_usec));
+			mkstring(&buf[strlen(buf)], LONG_PRLEN, CENTER|LONG_HEX|RJUST,
+				MKSTR(tv_sec));
+			strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1);
+			mkstring(&buf[strlen(buf)], LONG_PRLEN, CENTER|LONG_HEX|RJUST,
+				MKSTR(tv_usec));
+			fprintf(fp, "%s\n", buf);
+		}
+	}
+
+	if (flag & XEN_HYPER_DUMPINFO_REGS) {
+		regcnt = XEN_HYPER_SIZE(ELF_Gregset) / sizeof(long);
+		addr = (ulong)note_buf +
+			XEN_HYPER_OFFSET(ELF_Prstatus_pr_reg);
+		regs = (ulong *)addr;
+		fprintf(fp, "Register information(%lx):\n",
+			dic->note + xhdit->core_offset + XEN_HYPER_OFFSET(ELF_Prstatus_pr_reg));
+		for (i = 0; i < regcnt; i++, regs++) {
+			if (xhregt[i] == NULL) {
+				break;
+			}
+			fprintf(fp, "  %s = ", xhregt[i]);
+			fprintf(fp, "0x%s\n",
+				mkstring(buf, LONG_PRLEN, LONG_HEX|LJUST, MKSTR(*regs)));
+		}
+	}
+}
+
+/*
+ * Dump the Xen conring in chronological order.
+ */
+void
+xen_hyper_cmd_log(void)
+{
+	int c;
+
+        while ((c = getopt(argcnt, args, "")) != EOF) {
+                switch(c)
+                {
+                default:
+                        argerrs++;
+                        break;
+                }
+        }
+
+        if (argerrs)
+                cmd_usage(pc->curcmd, SYNOPSIS);
+	
+	xen_hyper_dump_log();
+}
+
+void
+xen_hyper_dump_log(void)
+{
+	uint conringc, conringp;
+	uint warp, start, len, idx, i;
+	ulong conring;
+	char *buf;
+	char last;
+
+	conring = symbol_value("conring");
+	get_symbol_data("conringc", sizeof(uint), &conringc);
+	get_symbol_data("conringp", sizeof(uint), &conringp);
+	warp = FALSE;
+	if (conringp >= XEN_HYPER_CONRING_SIZE) {
+		if ((start = conringp & (XEN_HYPER_CONRING_SIZE - 1))) {
+			warp = TRUE;
+		}
+	} else {
+		start = 0;
+	}
+
+	buf = GETBUF(XEN_HYPER_CONRING_SIZE);
+	readmem(conring, KVADDR, buf, XEN_HYPER_CONRING_SIZE,
+		"conring contents", FAULT_ON_ERROR);
+	idx = start;
+	len = XEN_HYPER_CONRING_SIZE;
+
+wrap_around:
+	for (i = idx; i < len; i++) {
+		if (buf[i]) {
+			fputc(ascii(buf[i]) ? buf[i] : '.', fp);
+			last = buf[i];
+		}
+	}
+	if (warp) {
+		len = idx;
+		idx = 0;
+		warp = FALSE;
+		goto wrap_around;
+	}
+	if (last != '\n') {
+		fprintf(fp, "\n");
+	}
+	FREEBUF(buf);
+}
+
+/*
+ *  Display physical cpu information.
+ */
+void
+xen_hyper_cmd_pcpus(void)
+{
+	struct xen_hyper_cmd_args pca;
+	struct xen_hyper_pcpu_context *pcc;
+	ulong flag;
+	ulong val;
+        int c, cnt, type, bogus;
+
+	BZERO(&pca, sizeof(struct xen_hyper_cmd_args));
+	flag= 0;
+        while ((c = getopt(argcnt, args, "rt")) != EOF) {
+                switch(c)
+                {
+		case 'r':
+			flag |= XEN_HYPER_PCPUS_REGS;
+			break;
+		case 't':
+			flag |= XEN_HYPER_PCPUS_TSS;
+			break;
+                default:
+                        argerrs++;
+                        break;
+                }
+        }
+
+        if (argerrs)
+                cmd_usage(pc->curcmd, SYNOPSIS);
+
+	cnt = bogus = 0;
+        while (args[optind]) {
+		if (IS_A_NUMBER(args[optind])) {
+			type = xen_hyper_str_to_pcpu_context(args[optind], &val, &pcc);
+			switch (type) {
+			case XEN_HYPER_STR_PCID:
+			case XEN_HYPER_STR_PCPU:
+				pca.value[cnt] = val;
+				pca.type[cnt] = type;
+				pca.addr[cnt] = pcc->pcpu;
+				pca.context[cnt] = pcc;
+				cnt++;
+				break;
+			case XEN_HYPER_STR_INVALID:
+				error(INFO, "invalid pcpu or id value: %s\n\n",
+					args[optind]);
+				bogus++;
+			}
+		} else {
+			error(FATAL, "invalid address: %s\n",
+				args[optind]);
+		}
+		optind++;
+	}
+	pca.cnt = cnt;
+	if (bogus && !cnt) {
+		return;
+	}
+	
+	xen_hyper_do_pcpus(flag, &pca);
+}
+
+/*
+ *  Do the work requested by xen_hyper_cmd_pcpu().
+ */
+static void
+xen_hyper_do_pcpus(ulong flag, struct xen_hyper_cmd_args *pca)
+{
+	struct xen_hyper_pcpu_context *pcc;
+	uint cpuid;
+	int i;
+
+	if (pca->cnt) {
+		for (i = 0; i < pca->cnt; i++) {
+			xen_hyper_show_pcpus(flag, pca->context[i]);
+			flag |= XEN_HYPER_PCPUS_1STCALL;
+		}
+	} else {
+		for_cpu_indexes(i, cpuid)
+		{
+			pcc = xen_hyper_id_to_pcpu_context(cpuid);
+			xen_hyper_show_pcpus(flag, pcc);
+			flag |= XEN_HYPER_PCPUS_1STCALL;
+		}
+	}
+}
+
+static void
+xen_hyper_show_pcpus(ulong flag, struct xen_hyper_pcpu_context *pcc)
+{
+	char *act = "  ";
+	char buf[XEN_HYPER_CMD_BUFSIZE];
+
+	if (!(pcc->pcpu)) {
+		return;
+	}
+	if (XEN_HYPER_CRASHING_CPU() == pcc->processor_id) {
+		act = " *";
+	}
+	if ((flag & XEN_HYPER_PCPUS_REGS) || (flag & XEN_HYPER_PCPUS_TSS) ||
+	!(flag & XEN_HYPER_PCPUS_1STCALL)) {
+		if (((flag & XEN_HYPER_PCPUS_REGS) || (flag & XEN_HYPER_PCPUS_TSS)) &&
+		(flag & XEN_HYPER_PCPUS_1STCALL)) {
+			fprintf(fp, "\n");
+		}
+		sprintf(buf, "   PCID ");
+		mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|RJUST, "PCPU");
+		strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1);
+		mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|RJUST, "CUR-VCPU");
+		strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1);
+		mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|RJUST, "TSS");
+		fprintf(fp, "%s\n", buf);
+	}
+
+	sprintf(buf, "%s%5d ", act, pcc->processor_id);
+	mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|LONG_HEX|RJUST, MKSTR(pcc->pcpu));
+	strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1);
+	mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|LONG_HEX|RJUST,
+		MKSTR(pcc->current_vcpu));
+	strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1);
+	mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|LONG_HEX|RJUST,
+		MKSTR(pcc->init_tss));
+	fprintf(fp, "%s\n", buf);
+	if (flag & XEN_HYPER_PCPUS_REGS) {
+		fprintf(fp, "Register information:\n");
+		dump_struct("cpu_user_regs", pcc->guest_cpu_user_regs, 0);
+	}
+	if (flag & XEN_HYPER_PCPUS_TSS) {
+		fprintf(fp, "init_tss information:\n");
+		dump_struct("tss_struct", pcc->init_tss, 0);
+	}
+}
+
+/*
+ *  Display schedule info.
+ */
+void
+xen_hyper_cmd_sched(void)
+{
+	struct xen_hyper_cmd_args scha;
+	struct xen_hyper_pcpu_context *pcc;
+	ulong flag;
+	ulong val;
+        int c, cnt, type, bogus;
+
+	BZERO(&scha, sizeof(struct xen_hyper_cmd_args));
+	flag = 0;
+        while ((c = getopt(argcnt, args, "v")) != EOF) {
+                switch(c)
+                {
+		case 'v':
+			flag |= XEN_HYPER_SCHED_VERBOSE;
+			break;
+
+                default:
+                        argerrs++;
+                        break;
+                }
+        }
+
+        if (argerrs)
+                cmd_usage(pc->curcmd, SYNOPSIS);
+
+	cnt = bogus = 0;
+        while (args[optind]) {
+		if (IS_A_NUMBER(args[optind])) {
+			type = xen_hyper_str_to_pcpu_context(args[optind], &val, &pcc);
+			switch (type) {
+			case XEN_HYPER_STR_PCID:
+				scha.value[cnt] = val;
+				scha.type[cnt] = type;
+				scha.context[cnt] = &xhscht->sched_context_array[val];
+				cnt++;
+				break;
+			case XEN_HYPER_STR_PCPU:
+			case XEN_HYPER_STR_INVALID:
+				error(INFO, "invalid pcpu id value: %s\n\n",
+					args[optind]);
+				bogus++;
+			}
+		} else {
+			error(FATAL, "invalid address: %s\n",
+				args[optind]);
+		}
+		optind++;
+	}
+	scha.cnt = cnt;
+	if (bogus && !cnt) {
+		return;
+	}
+	
+	xen_hyper_do_sched(flag, &scha);
+}
+
+/*
+ *  Do the work requested by xen_hyper_cmd_pcpu().
+ */
+static void
+xen_hyper_do_sched(ulong flag, struct xen_hyper_cmd_args *scha)
+{
+	struct xen_hyper_sched_context *schc;
+	uint cpuid;
+	int i;
+
+	fprintf(fp, "Scheduler name : %s\n\n", xhscht->name);
+
+	if (scha->cnt) {
+		for (i = 0; i < scha->cnt; i++) {
+			xen_hyper_show_sched(flag, scha->context[i]);
+			flag |= XEN_HYPER_SCHED_1STCALL;
+		}
+	} else {
+		for_cpu_indexes(i, cpuid)
+		{
+			schc = &xhscht->sched_context_array[cpuid];
+			xen_hyper_show_sched(flag, schc);
+			flag |= XEN_HYPER_SCHED_1STCALL;
+		}
+	}
+}
+
+static void
+xen_hyper_show_sched(ulong flag, struct xen_hyper_sched_context *schc)
+{
+	char buf[XEN_HYPER_CMD_BUFSIZE];
+
+	if (!(schc->schedule_data)) {
+		return;
+	}
+	if ((flag & XEN_HYPER_SCHED_VERBOSE) ||
+	!(flag & XEN_HYPER_SCHED_1STCALL)) {
+		if ((flag & XEN_HYPER_SCHED_1STCALL) &&
+		(flag & XEN_HYPER_SCHED_VERBOSE)) {
+			fprintf(fp, "\n");
+		}
+		sprintf(buf, "  CPU  ");
+		mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|RJUST, "SCH-DATA");
+		strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1);
+		mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|RJUST, "SCH-PRIV");
+		strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1);
+		mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|RJUST, "CUR-VCPU");
+		strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1);
+		mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|RJUST, "IDL-VCPU");
+		if (XEN_HYPER_VALID_MEMBER(schedule_data_tick)) {
+			strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1);
+			mkstring(&buf[strlen(buf)], LONG_PRLEN, CENTER|RJUST, "TICK");
+		}
+		fprintf(fp, "%s\n", buf);
+	}
+
+	sprintf(buf, "%5d  ", schc->cpu_id);
+	mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|LONG_HEX|RJUST,
+		MKSTR(schc->schedule_data));
+	strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1);
+	mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|LONG_HEX|RJUST,
+		MKSTR(schc->sched_priv));
+	strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1);
+	mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|LONG_HEX|RJUST,
+		MKSTR(schc->curr));
+	strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1);
+	mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|LONG_HEX|RJUST,
+		MKSTR(schc->idle));
+	if (XEN_HYPER_VALID_MEMBER(schedule_data_tick)) {
+		strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1);
+		mkstring(&buf[strlen(buf)], LONG_PRLEN, CENTER|LONG_HEX|RJUST,
+			MKSTR(schc->tick));
+	}
+	fprintf(fp, "%s\n", buf);
+	if (flag & XEN_HYPER_SCHED_VERBOSE) {
+		;
+	}
+}
+
+/*
+ *  Display general system info.
+ */
+void
+xen_hyper_cmd_sys(void)
+{
+        int c;
+	ulong sflag;
+
+	sflag = FALSE;
+
+        while ((c = getopt(argcnt, args, "c")) != EOF) {
+                switch(c)
+                {
+		case 'c':
+			sflag = TRUE;
+			break;
+
+                default:
+                        argerrs++;
+                        break;
+                }
+        }
+
+        if (argerrs)
+                cmd_usage(pc->curcmd, SYNOPSIS);
+
+        if (!args[optind]) {
+		if (sflag)
+			fprintf(fp, "No support argument\n");
+			/* display config info here. */
+		else
+			xen_hyper_display_sys_stats();
+		return;
+	}
+}
+
+/*
+ *  Display system stats at init-time or for the sys command.
+ */
+void
+xen_hyper_display_sys_stats(void)
+{
+        struct new_utsname *uts;
+        char buf1[XEN_HYPER_CMD_BUFSIZE];
+        char buf2[XEN_HYPER_CMD_BUFSIZE];
+	ulong mhz;
+	int len, flag;
+
+	uts = &xht->utsname;
+	len = 11;
+	flag = XEN_HYPER_PRI_R;
+
+        /*
+         *  It's now safe to unlink the remote namelist.
+         */
+        if (pc->flags & UNLINK_NAMELIST) {
+                unlink(pc->namelist);
+                pc->flags &= ~UNLINK_NAMELIST;
+                pc->flags |= NAMELIST_UNLINKED;
+        }
+
+	if (REMOTE()) {
+		switch (pc->flags & 
+			(NAMELIST_LOCAL|NAMELIST_UNLINKED|NAMELIST_SAVED))
+		{
+		case NAMELIST_UNLINKED:
+			XEN_HYPER_PRI(fp, len, "KERNEL: ", buf1, flag,
+				(buf1, "%s  (temporary)\n", pc->namelist));
+			break;
+
+		case (NAMELIST_UNLINKED|NAMELIST_SAVED):
+		case NAMELIST_LOCAL:
+			XEN_HYPER_PRI(fp, len, "KERNEL: ", buf1, flag,
+				(buf1, "%s\n", pc->namelist));
+			break;
+
+		}
+	} else {
+        	if (pc->system_map) {
+			XEN_HYPER_PRI(fp, len, "SYSTEM MAP: ", buf1, flag,
+				(buf1, "%s\n", pc->system_map));
+			XEN_HYPER_PRI(fp, len, "DEBUG KERNEL: ", buf1, flag,
+				(buf1, "%s\n", pc->namelist));
+		} else {
+			XEN_HYPER_PRI(fp, len, "KERNEL: ", buf1, flag,
+				(buf1, "%s\n", pc->namelist));
+		}
+	}
+
+	if (pc->debuginfo_file) {
+		XEN_HYPER_PRI(fp, len, "DEBUGINFO: ", buf1, flag,
+			(buf1, "%s\n", pc->debuginfo_file));
+	} else if (pc->namelist_debug) {
+		XEN_HYPER_PRI(fp, len, "DEBUG KERNEL: ", buf1, flag,
+			(buf1, "%s\n", pc->namelist_debug));
+	}
+
+	XEN_HYPER_PRI_CONST(fp, len, "DUMPFILE: ", flag);
+        if (ACTIVE()) {
+		if (REMOTE_ACTIVE()) 
+			fprintf(fp, "%s@%s  (remote live system)\n",
+			    	pc->server_memsrc, pc->server);
+		else
+                	fprintf(fp, "%s\n", pc->live_memsrc);
+	} else {
+		if (REMOTE_DUMPFILE())
+                	fprintf(fp, "%s@%s  (remote dumpfile)", 
+				pc->server_memsrc, pc->server);
+		else
+                	fprintf(fp, "%s", pc->dumpfile);
+
+		fprintf(fp, "\n");
+	}
+
+	XEN_HYPER_PRI(fp, len, "CPUS: ", buf1, flag,
+		(buf1, "%d\n", XEN_HYPER_NR_PCPUS()));
+	XEN_HYPER_PRI(fp, len, "DOMAINS: ", buf1, flag,
+		(buf1, "%d\n", XEN_HYPER_NR_DOMAINS()));
+	/* !!!Display a date here if it can be found. */
+	XEN_HYPER_PRI(fp, len, "UPTIME: ", buf1, flag,
+		(buf1, "%s\n", (xen_hyper_get_uptime_hyper() ? 
+		 convert_time(xen_hyper_get_uptime_hyper(), buf2) : "--:--:--")));
+	/* !!!Display a version here if it can be found. */
+	XEN_HYPER_PRI_CONST(fp, len, "MACHINE: ", flag);
+	if (strlen(uts->machine)) {
+		fprintf(fp, "%s  ", uts->machine);
+	} else {
+		fprintf(fp, "unknown  ");
+	}
+	if ((mhz = machdep->processor_speed()))
+		fprintf(fp, "(%ld Mhz)\n", mhz);
+	else
+		fprintf(fp, "(unknown Mhz)\n");
+	XEN_HYPER_PRI(fp, len, "MEMORY: ", buf1, flag,
+		(buf1, "%s\n", get_memory_size(buf2)));
+	if (XENDUMP_DUMPFILE() && (kt->xen_flags & XEN_SUSPEND))
+		return;
+}
+
+/*
+ *  Display vcpu struct.
+ */
+void
+xen_hyper_cmd_vcpu(void)
+{
+	struct xen_hyper_cmd_args vca;
+	struct xen_hyper_vcpu_context *vcc;
+	ulong flag;
+	ulong valvc, valdom;
+        int c, cnt, type, bogus;
+
+	BZERO(&vca, sizeof(struct xen_hyper_cmd_args));
+	flag = 0;
+        while ((c = getopt(argcnt, args, "i")) != EOF) {
+                switch(c)
+                {
+		case 'i':
+			flag |= XEN_HYPER_VCPUS_ID;
+                        break;
+                default:
+                        argerrs++;
+                        break;
+                }
+        }
+
+        if (argerrs)
+                cmd_usage(pc->curcmd, SYNOPSIS);
+
+	cnt = bogus = 0;
+        while (args[optind]) {
+		if (IS_A_NUMBER(args[optind])) {
+			if (flag & XEN_HYPER_VCPUS_ID) {
+				type = xen_hyper_strid_to_vcpu_context(
+					args[optind], args[optind+1],
+					&valdom, &valvc, &vcc);
+			} else {
+				type = xen_hyper_strvcpu_to_vcpu_context(
+					args[optind], &valvc, &vcc);
+			}
+			switch (type) {
+			case XEN_HYPER_STR_VCID:
+			case XEN_HYPER_STR_VCPU:
+				vca.value[cnt] = valvc;
+				vca.type[cnt] = type;
+				vca.addr[cnt] = vcc->vcpu;
+				vca.context[cnt] = vcc;
+				cnt++;
+				break;
+			case XEN_HYPER_STR_INVALID:
+				error(INFO, "invalid vcpu or id value: %s\n\n",
+					args[optind]);
+				bogus++;
+			}
+		} else {
+			error(FATAL, "invalid address: %s\n",
+				args[optind]);
+		}
+		optind++;
+		if (flag & XEN_HYPER_VCPUS_ID) optind++;
+	}
+	vca.cnt = cnt;
+	if (bogus && !cnt) {
+		return;
+	}
+	
+	xen_hyper_do_vcpu(&vca);
+}
+
+/*
+ *  Do the work requested by xen_hyper_cmd_vcpu().
+ */
+static void
+xen_hyper_do_vcpu(struct xen_hyper_cmd_args *vca)
+{
+	int i;
+
+	if (vca->cnt) {
+		if (vca->cnt == 1) {
+			xhvct->last = vca->context[0];
+		}
+		for (i = 0; i < vca->cnt; i++) {
+			dump_struct("vcpu", vca->addr[i], 0);
+		}
+	} else {
+		dump_struct("vcpu", xhvct->last->vcpu, 0);
+	}
+}
+
+/*
+ *  Display vcpu status.
+ */
+void
+xen_hyper_cmd_vcpus(void)
+{
+	struct xen_hyper_cmd_args vca;
+	struct xen_hyper_vcpu_context *vcc;
+	ulong flag;
+	ulong valvc, valdom;
+        int c, cnt, type, bogus;
+
+	BZERO(&vca, sizeof(struct xen_hyper_cmd_args));
+	flag = 0;
+        while ((c = getopt(argcnt, args, "i")) != EOF) {
+                switch(c)
+                {
+		case 'i':
+			flag |= XEN_HYPER_VCPUS_ID;
+                        break;
+                default:
+                        argerrs++;
+                        break;
+                }
+        }
+
+        if (argerrs)
+                cmd_usage(pc->curcmd, SYNOPSIS);
+
+	cnt = bogus = 0;
+        while (args[optind]) {
+		if (IS_A_NUMBER(args[optind])) {
+			if (flag & XEN_HYPER_VCPUS_ID) {
+				type = xen_hyper_strid_to_vcpu_context(
+					args[optind], args[optind+1],
+					&valdom, &valvc, &vcc);
+			} else {
+				type = xen_hyper_strvcpu_to_vcpu_context(
+					args[optind], &valvc, &vcc);
+			}
+			switch (type) {
+			case XEN_HYPER_STR_VCID:
+			case XEN_HYPER_STR_VCPU:
+				vca.value[cnt] = valvc;
+				vca.type[cnt] = type;
+				vca.addr[cnt] = vcc->vcpu;
+				vca.context[cnt] = vcc;
+				cnt++;
+				break;
+			case XEN_HYPER_STR_INVALID:
+				error(INFO, "invalid vcpu or id value: %s\n\n",
+					args[optind]);
+				bogus++;
+			}
+		} else {
+			error(FATAL, "invalid address: %s\n",
+				args[optind]);
+		}
+		optind++;
+	}
+	vca.cnt = cnt;
+	if (bogus && !cnt) {
+		return;
+	}
+	
+	xen_hyper_do_vcpus(&vca);
+}
+
+/*
+ *  Do the work requested by xen_hyper_cmd_vcpus().
+ */
+static void
+xen_hyper_do_vcpus(struct xen_hyper_cmd_args *vca)
+{
+	struct xen_hyper_vcpu_context_array *vcca;
+	struct xen_hyper_vcpu_context *vcc;
+	char buf1[XEN_HYPER_CMD_BUFSIZE];
+	char buf2[XEN_HYPER_CMD_BUFSIZE];
+	int i, j;
+
+	fprintf(fp, "   VCID  PCID %s ST T DOMID %s\n",
+		mkstring(buf1, VADDR_PRLEN, CENTER|RJUST, "VCPU"),
+		mkstring(buf2, VADDR_PRLEN, CENTER|RJUST, "DOMAIN"));
+	if (vca->cnt) {
+		for (i = 0; i < vca->cnt; i++) {
+			xen_hyper_show_vcpus(vca->context[i]);
+		}
+	} else {
+		for (i = 0, vcca = xhvct->vcpu_context_arrays;
+			i < XEN_HYPER_NR_DOMAINS(); i++, vcca++) {
+			for (j = 0, vcc = vcca->context_array;
+				j < vcca->context_array_valid; j++, vcc++) {
+				xen_hyper_show_vcpus(vcc);
+			}
+		}
+	}
+}
+
+static void
+xen_hyper_show_vcpus(struct xen_hyper_vcpu_context *vcc)
+{
+	int type;
+	char *act, *crash;
+	char buf[XEN_HYPER_CMD_BUFSIZE];
+	struct xen_hyper_pcpu_context *pcc;
+	domid_t domid;
+
+	if (!(vcc->vcpu)) {
+		return;
+	}
+	if((pcc = xen_hyper_id_to_pcpu_context(vcc->processor))) {
+		if (pcc->current_vcpu == vcc->vcpu) {
+			act = ">";
+		} else {
+			act = " ";
+		}
+	} else {
+		act = " ";
+	}
+	if (xht->crashing_vcc && vcc->vcpu == xht->crashing_vcc->vcpu) {
+		crash = "*";
+	} else {
+		crash = " ";
+	}
+	sprintf(buf, "%s%s%5d %5d ", act, crash, vcc->vcpu_id, vcc->processor);
+	mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|LONG_HEX|RJUST,
+		MKSTR(vcc->vcpu));
+	strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1);
+	xen_hyper_vcpu_state_string(vcc, &buf[strlen(buf)], !VERBOSE);
+	strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1);
+	xen_hyper_domain_to_type(vcc->domain, &type, &buf[strlen(buf)], !VERBOSE);
+	if ((domid = xen_hyper_domain_to_id(vcc->domain)) == XEN_HYPER_DOMAIN_ID_INVALID) {
+		sprintf(&buf[strlen(buf)], " ????? ");
+	} else {
+		sprintf(&buf[strlen(buf)], " %5d ", domid);
+	}
+	mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|LONG_HEX|RJUST,
+		MKSTR(vcc->domain));
+	fprintf(fp, "%s\n", buf);
+}
+
+
+
+/*
+ *  Get string for domain status.
+ *  - This may need some data in domain struct.
+ */
+char *
+xen_hyper_domain_state_string(struct xen_hyper_domain_context *dc,
+	char *buf, int verbose)
+{
+	ulong stat;
+
+	stat = xen_hyper_domain_state(dc);
+
+	if (stat == XEN_HYPER_DOMF_ERROR) {
+		sprintf(buf, verbose ? "(unknown)" : "??");
+	} else if (XEN_HYPER_VALID_MEMBER(domain_domain_flags)) {
+		if (stat & XEN_HYPER_DOMF_shutdown) {
+			sprintf(buf, verbose ? "DOMAIN_SHUTDOWN" : "SF");
+		} else if (stat & XEN_HYPER_DOMF_dying) {
+			sprintf(buf, verbose ? "DOMAIN_DYING" : "DY");
+		} else if (stat & XEN_HYPER_DOMF_ctrl_pause) {
+			sprintf(buf, verbose ? "DOMAIN_CTRL_PAUSE" : "CP");
+		} else if (stat & XEN_HYPER_DOMF_polling) {
+			sprintf(buf, verbose ? "DOMAIN_POLLING" : "PO");
+		} else if (stat & XEN_HYPER_DOMF_paused) {
+			sprintf(buf, verbose ? "DOMAIN_PAUSED" : "PA");
+		} else {
+			sprintf(buf, verbose ? "DOMAIN_RUNNING" : "RU");
+		}
+	} else {
+		if (stat & XEN_HYPER_DOMS_shutdown) {
+			sprintf(buf, verbose ? "DOMAIN_SHUTDOWN" : "SF");
+		} else if (stat & XEN_HYPER_DOMS_shuttingdown) {
+			sprintf(buf, verbose ? "DOMAIN_SHUTTINGDOWN" : "SH");
+		} else if (stat & XEN_HYPER_DOMS_dying) {
+			sprintf(buf, verbose ? "DOMAIN_DYING" : "DY");
+		} else if (stat & XEN_HYPER_DOMS_ctrl_pause) {
+			sprintf(buf, verbose ? "DOMAIN_CTRL_PAUSE" : "CP");
+		} else if (stat & XEN_HYPER_DOMS_polling) {
+			sprintf(buf, verbose ? "DOMAIN_POLLING" : "PO");
+		} else {
+			sprintf(buf, verbose ? "DOMAIN_RUNNING" : "RU");
+		}
+	}
+
+	return buf;
+}
+
+/*
+ *  Get string for vcpu status.
+ *  - This may need some data in vcpu struct.
+ */
+char *
+xen_hyper_vcpu_state_string(struct xen_hyper_vcpu_context *vcc,
+	char *buf, int verbose)
+{
+	int stat;
+
+	stat = xen_hyper_vcpu_state(vcc);
+
+	if (stat == XEN_HYPER_RUNSTATE_ERROR) {
+		sprintf(buf, verbose ? "(unknown)" : "??");
+	} else if (stat == XEN_HYPER_RUNSTATE_running ||
+		stat == XEN_HYPER_RUNSTATE_runnable) {
+		sprintf(buf, verbose ? "VCPU_RUNNING" : "RU");
+	} else if (stat == XEN_HYPER_RUNSTATE_blocked) {
+		sprintf(buf, verbose ? "VCPU_BLOCKED" : "BL");
+	} else if (stat == XEN_HYPER_RUNSTATE_offline) {
+		sprintf(buf, verbose ? "VCPU_OFFLINE" : "OF");
+	} else {
+		sprintf(buf, verbose ? "(unknown)" : "??");
+	}
+
+	return buf;
+}
+
+/*
+ *  Get domain type from domain address.
+ */
+static char *
+xen_hyper_domain_to_type(ulong domain, int *type, char *buf, int verbose)
+{
+	struct xen_hyper_domain_context *dc;
+
+	if ((dc = xen_hyper_domain_to_domain_context(domain)) == NULL) {
+		error(WARNING, "cannot get context from domain address.\n");
+		return NULL;
+	}
+	return xen_hyper_domain_context_to_type(dc, type, buf, verbose);
+}
+
+/*
+ *  Get domain type from domain context.
+ */
+static char *
+xen_hyper_domain_context_to_type(struct xen_hyper_domain_context *dc, int *type,
+	char *buf, int verbose)
+{
+	if (!dc) {
+		*type = XEN_HYPER_DOMAIN_TYPE_INVALID;
+		return NULL;
+	} else if (dc->domain_id == XEN_HYPER_DOMID_IO) {
+		*type = XEN_HYPER_DOMAIN_TYPE_IO;
+		sprintf(buf, verbose ? "dom_io" : "O");
+	} else if (dc->domain_id == XEN_HYPER_DOMID_XEN) {
+		*type = XEN_HYPER_DOMAIN_TYPE_XEN;
+		sprintf(buf, verbose ? "dom_xen" : "X");
+	} else if (dc->domain_id == XEN_HYPER_DOMID_IDLE) {
+		*type = XEN_HYPER_DOMAIN_TYPE_IDLE;
+		sprintf(buf, verbose ? "idle domain" : "I");
+	} else if (dc == xhdt->dom0) {
+		*type = XEN_HYPER_DOMAIN_TYPE_DOM0;
+		sprintf(buf, verbose ? "domain 0" : "0");
+	} else {
+		*type = XEN_HYPER_DOMAIN_TYPE_GUEST;
+		sprintf(buf, verbose ? "domain U" : "U");
+	}
+	return buf;
+}
+
+/*
+ * Check a type for value. And return domain context.
+ */
+static int
+xen_hyper_str_to_domain_context(char *string, ulong *value,
+	struct xen_hyper_domain_context **dcp)
+{
+	ulong dvalue, hvalue;
+	int found, type;
+	char *s;
+	struct xen_hyper_domain_context *dc_did, *dc_ddc, *dc_hid, *dc_hdc;
+
+	if (string == NULL) {
+		error(INFO, "received NULL string\n");
+		return STR_INVALID;
+	}
+
+	s = string;
+        dvalue = hvalue = BADADDR;
+
+        if (decimal(s, 0))
+                dvalue = dtol(s, RETURN_ON_ERROR, NULL);
+
+        if (hexadecimal(s, 0)) {
+        	if (STRNEQ(s, "0x") || STRNEQ(s, "0X"))
+                	s += 2;
+		if (strlen(s) <= MAX_HEXADDR_STRLEN) 
+                	hvalue = htol(s, RETURN_ON_ERROR, NULL);
+	}
+
+        found = 0;
+        dc_did = dc_ddc = dc_hid = dc_hdc = NULL;
+	type = XEN_HYPER_STR_INVALID;
+
+	if (dvalue != BADADDR) {
+		if ((dc_did = xen_hyper_id_to_domain_context(dvalue)))
+			found++;
+	        if ((dc_ddc = xen_hyper_domain_to_domain_context(dvalue)))
+			found++;
+	}
+
+	if ((hvalue != BADADDR) && (dvalue != hvalue)) {
+	        if ((dc_hid = xen_hyper_id_to_domain_context(hvalue)))
+			found++;
+	        if ((dc_hdc = xen_hyper_domain_to_domain_context(hvalue)))
+			found++;
+	}
+
+	switch (found) 
+	{
+	case 2: 
+		if (dc_did && dc_hid) {      
+                	*dcp = dc_did;      
+                	*value = dvalue;   
+                	type = STR_PID;
+		}
+		break;
+
+	case 1: 
+		if (dc_did) {
+			*dcp = dc_did;
+			*value = dvalue;
+			type = XEN_HYPER_STR_DID;
+		}
+
+		if (dc_ddc) {
+			*dcp = dc_ddc;
+			*value = dvalue;
+			type = XEN_HYPER_STR_DOMAIN;
+		}
+
+		if (dc_hid) {
+			*dcp = dc_hid;
+			*value = hvalue;
+			type = XEN_HYPER_STR_DID;
+		}
+
+		if (dc_hdc) {
+			*dcp = dc_hdc;
+			*value = hvalue;
+			type = XEN_HYPER_STR_DOMAIN;
+		}
+		break;
+	}
+
+	return type;
+}
+
+
+
+/*
+ *  Display a vcpu context.
+ */
+void
+xen_hyper_show_vcpu_context(struct xen_hyper_vcpu_context *vcc)
+{
+	char buf[XEN_HYPER_CMD_BUFSIZE];
+	struct xen_hyper_pcpu_context *pcc;
+	struct xen_hyper_domain_context *dc;
+	int len, flag;
+
+	len = 6;
+	len += pc->flags & RUNTIME ? 0 : 5;
+	flag = XEN_HYPER_PRI_R;
+
+	if (!(pcc = xen_hyper_id_to_pcpu_context(vcc->processor))) {
+		error(WARNING, "cannot get pcpu context vcpu belongs.\n");
+		return;
+	}
+	if (!(dc = xen_hyper_domain_to_domain_context(vcc->domain))) {
+		error(WARNING, "cannot get domain context vcpu belongs.\n");
+		return;
+	}
+	XEN_HYPER_PRI(fp, len, "PCPU-ID: ", buf, flag,
+		(buf, "%d\n", vcc->processor));
+	XEN_HYPER_PRI(fp, len, "PCPU: ", buf, flag,
+		(buf, "%lx\n", pcc->pcpu));
+	XEN_HYPER_PRI(fp, len, "VCPU-ID: ", buf, flag,
+		(buf, "%d\n", vcc->vcpu_id));
+	XEN_HYPER_PRI(fp, len, "VCPU: ", buf, flag,
+		(buf, "%lx  ", vcc->vcpu));
+	fprintf(fp, "(%s)\n", xen_hyper_vcpu_state_string(vcc, buf, VERBOSE));
+	XEN_HYPER_PRI(fp, len, "DOMAIN-ID: ", buf, flag,
+		(buf, "%d\n", dc->domain_id));
+	XEN_HYPER_PRI(fp, len, "DOMAIN: ", buf, flag,
+		(buf, "%lx  ", vcc->domain));
+	fprintf(fp, "(%s)\n", xen_hyper_domain_state_string(dc, buf, VERBOSE));
+	XEN_HYPER_PRI_CONST(fp, len, "STATE: ", flag);
+	if (machdep->flags & HWRESET) {
+		fprintf(fp, "HARDWARE RESET");
+	} else if (machdep->flags & INIT) {
+		fprintf(fp, "INIT");
+	} else if (xen_hyper_is_vcpu_crash(vcc)) {
+		fprintf(fp, "CRASH");
+	} else {
+		fprintf(fp, "ACTIVE");
+	}
+
+	fprintf(fp, "\n");
+}
+
+/*
+ * Check a type for value. And return dump information context address.
+ */
+static int
+xen_hyper_str_to_dumpinfo_context(char *string, ulong *value,
+	struct xen_hyper_dumpinfo_context **dicp)
+{
+	ulong dvalue, hvalue;
+	struct xen_hyper_dumpinfo_context *note_did, *note_hid;
+	struct xen_hyper_dumpinfo_context *note_dad, *note_had;
+	int found, type;
+	char *s;
+
+	if (string == NULL) {
+		error(INFO, "received NULL string\n");
+		return STR_INVALID;
+	}
+
+	s = string;
+	dvalue = hvalue = BADADDR;
+
+	if (decimal(s, 0))
+		dvalue = dtol(s, RETURN_ON_ERROR, NULL);
+	if (hexadecimal(s, 0)) {
+		if (STRNEQ(s, "0x") || STRNEQ(s, "0X"))
+			s += 2;
+		if (strlen(s) <= MAX_HEXADDR_STRLEN)
+			hvalue = htol(s, RETURN_ON_ERROR, NULL);
+	}
+
+	found = 0;
+	note_did = note_hid = note_dad = note_had = 0;
+	type = XEN_HYPER_STR_INVALID;
+
+	if (dvalue != BADADDR) {
+		if (dvalue > XEN_HYPER_MAX_CPUS()) {
+			note_dad = xen_hyper_note_to_dumpinfo_context(dvalue);
+		} else {
+			note_did = xen_hyper_id_to_dumpinfo_context(dvalue);
+		}
+		found++;
+	}
+	if ((hvalue != BADADDR)) {
+		if (hvalue > XEN_HYPER_MAX_CPUS()) {
+			note_had = xen_hyper_note_to_dumpinfo_context(hvalue);
+		} else {
+			note_hid = xen_hyper_id_to_dumpinfo_context(hvalue);
+		}
+		found++;
+	}
+
+	switch (found)
+	{
+	case 2:
+		if (note_did && note_hid) {
+			*value = dvalue;
+			*dicp = note_did;
+			type = XEN_HYPER_STR_PCID;
+		}
+		break;
+	case 1:
+		if (note_did) {
+			*value = dvalue;
+			*dicp = note_did;
+			type = XEN_HYPER_STR_PCID;
+		}
+
+		if (note_hid) {
+			*value = hvalue;
+			*dicp = note_hid;
+			type = XEN_HYPER_STR_PCID;
+		}
+
+		if (note_dad) {
+			*value = dvalue;
+			*dicp = note_dad;
+			type = XEN_HYPER_STR_ADDR;
+		}
+
+		if (note_had) {
+			*value = hvalue;
+			*dicp = note_had;
+			type = XEN_HYPER_STR_ADDR;
+		}
+		break;
+	}
+
+	return type;
+}
+
+/*
+ * Check a type for value. And return vcpu context.
+ */
+static int
+xen_hyper_strvcpu_to_vcpu_context(char *string, ulong *value,
+	struct xen_hyper_vcpu_context **vccp)
+{
+	ulong dvalue, hvalue;
+	int found, type;
+	char *s;
+	struct xen_hyper_vcpu_context *vcc_dvc, *vcc_hvc;
+
+	if (string == NULL) {
+		error(INFO, "received NULL string\n");
+		return STR_INVALID;
+	}
+
+	s = string;
+        dvalue = hvalue = BADADDR;
+
+        if (decimal(s, 0))
+                dvalue = dtol(s, RETURN_ON_ERROR, NULL);
+
+        if (hexadecimal(s, 0)) {
+        	if (STRNEQ(s, "0x") || STRNEQ(s, "0X"))
+                	s += 2;
+		if (strlen(s) <= MAX_HEXADDR_STRLEN) 
+                	hvalue = htol(s, RETURN_ON_ERROR, NULL);
+	}
+
+        found = 0;
+        vcc_dvc = vcc_hvc = NULL;
+	type = XEN_HYPER_STR_INVALID;
+
+	if (dvalue != BADADDR) {
+	        if ((vcc_dvc = xen_hyper_vcpu_to_vcpu_context(dvalue)))
+			found++;
+	}
+
+	if ((hvalue != BADADDR) && (dvalue != hvalue)) {
+	        if ((vcc_hvc = xen_hyper_vcpu_to_vcpu_context(hvalue)))
+			found++;
+	}
+
+	switch (found) 
+	{
+	case 1: 
+		if (vcc_dvc) {
+			*vccp = vcc_dvc;
+			*value = dvalue;
+			type = XEN_HYPER_STR_VCPU;
+		}
+
+		if (vcc_hvc) {
+			*vccp = vcc_hvc;
+			*value = hvalue;
+			type = XEN_HYPER_STR_VCPU;
+		}
+		break;
+	}
+
+	return type;
+}
+
+/*
+ * Check a type for id value. And return vcpu context.
+ */
+static int
+xen_hyper_strid_to_vcpu_context(char *strdom, char *strvc, ulong *valdom,
+	ulong *valvc, struct xen_hyper_vcpu_context **vccp)
+{
+	ulong dvalue, hvalue;
+	int found, type;
+	char *s;
+	struct xen_hyper_vcpu_context *vcc_did, *vcc_hid;
+	struct xen_hyper_domain_context *dc;
+
+	if (strdom == NULL || strvc == NULL) {
+		error(INFO, "received NULL string\n");
+		return STR_INVALID;
+	}
+
+	if (xen_hyper_str_to_domain_context(strdom, valdom, &dc) ==
+	XEN_HYPER_STR_INVALID) {
+		error(INFO, "invalid domain id string.\n");
+		return STR_INVALID;
+	}
+
+	s = strvc;
+        dvalue = hvalue = BADADDR;
+        if (decimal(s, 0))
+                dvalue = dtol(s, RETURN_ON_ERROR, NULL);
+
+        if (hexadecimal(s, 0)) {
+        	if (STRNEQ(s, "0x") || STRNEQ(s, "0X"))
+                	s += 2;
+		if (strlen(s) <= MAX_HEXADDR_STRLEN) 
+                	hvalue = htol(s, RETURN_ON_ERROR, NULL);
+	}
+
+        found = 0;
+        vcc_did = vcc_hid = NULL;
+	type = XEN_HYPER_STR_INVALID;
+
+	if (dvalue != BADADDR) {
+	        if ((vcc_did = xen_hyper_id_to_vcpu_context(dc->domain,
+		XEN_HYPER_DOMAIN_ID_INVALID, dvalue)))
+			found++;
+	}
+
+	if ((hvalue != BADADDR) && (dvalue != hvalue)) {
+	        if ((vcc_hid = xen_hyper_id_to_vcpu_context(dc->domain,
+		XEN_HYPER_DOMAIN_ID_INVALID, hvalue)))
+			found++;
+	}
+
+	switch (found) 
+	{
+	case 2:
+		if (vcc_did && vcc_hid) {
+			*vccp = vcc_did;
+			*valvc = dvalue;
+			type = XEN_HYPER_STR_VCID;
+		}
+		break;
+	case 1: 
+		if (vcc_did) {
+			*vccp = vcc_did;
+			*valvc = dvalue;
+			type = XEN_HYPER_STR_VCID;
+		}
+
+		if (vcc_hid) {
+			*vccp = vcc_hid;
+			*valvc = hvalue;
+			type = XEN_HYPER_STR_VCID;
+		}
+		break;
+	}
+
+	return type;
+}
+
+/*
+ * Check a type for value. And return pcpu context.
+ */
+static int
+xen_hyper_str_to_pcpu_context(char *string, ulong *value,
+	struct xen_hyper_pcpu_context **pccp)
+{
+	ulong dvalue, hvalue;
+	int found, type;
+	char *s;
+	struct xen_hyper_pcpu_context *pcc_did, *pcc_dpc, *pcc_hid, *pcc_hpc;
+
+	if (string == NULL) {
+		error(INFO, "received NULL string\n");
+		return STR_INVALID;
+	}
+
+	s = string;
+        dvalue = hvalue = BADADDR;
+
+        if (decimal(s, 0))
+                dvalue = dtol(s, RETURN_ON_ERROR, NULL);
+
+        if (hexadecimal(s, 0)) {
+        	if (STRNEQ(s, "0x") || STRNEQ(s, "0X"))
+                	s += 2;
+		if (strlen(s) <= MAX_HEXADDR_STRLEN) 
+                	hvalue = htol(s, RETURN_ON_ERROR, NULL);
+	}
+
+        found = 0;
+        pcc_did = pcc_dpc = pcc_hid = pcc_hpc = NULL;
+	type = XEN_HYPER_STR_INVALID;
+
+	if (dvalue != BADADDR) {
+		if ((pcc_did = xen_hyper_id_to_pcpu_context(dvalue)))
+			found++;
+	        if ((pcc_dpc = xen_hyper_pcpu_to_pcpu_context(dvalue)))
+			found++;
+	}
+
+	if ((hvalue != BADADDR) && (dvalue != hvalue)) {
+	        if ((pcc_hid = xen_hyper_id_to_pcpu_context(hvalue)))
+			found++;
+	        if ((pcc_hpc = xen_hyper_pcpu_to_pcpu_context(hvalue)))
+			found++;
+	}
+
+	switch (found) 
+	{
+	case 2: 
+		if (pcc_did && pcc_hid) {      
+                	*pccp = pcc_did;      
+                	*value = dvalue;   
+                	type = STR_PID;
+		}
+		break;
+
+	case 1: 
+		if (pcc_did) {
+			*pccp = pcc_did;
+			*value = dvalue;
+			type = XEN_HYPER_STR_PCID;
+		}
+
+		if (pcc_dpc) {
+			*pccp = pcc_dpc;
+			*value = dvalue;
+			type = XEN_HYPER_STR_PCPU;
+		}
+
+		if (pcc_hid) {
+			*pccp = pcc_hid;
+			*value = hvalue;
+			type = XEN_HYPER_STR_PCID;
+		}
+
+		if (pcc_hpc) {
+			*pccp = pcc_hpc;
+			*value = hvalue;
+			type = XEN_HYPER_STR_PCPU;
+		}
+		break;
+	}
+
+	return type;
+}
+
+#endif
--- crash/netdump.h.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/netdump.h	2009-01-27 11:42:12.000000000 -0500
@@ -1,7 +1,7 @@
 /* netdump.h
  *
- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson
- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 David Anderson
+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 Red Hat, Inc. All rights reserved.
  *
  * This software may be freely redistributed under the terms of the
  * GNU General Public License.
@@ -24,3 +24,100 @@
 
 #define NT_TASKSTRUCT 4
 #define NT_DISKDUMP   0x70000001
+
+#ifdef NOTDEF
+/*
+ *  Note: Based upon the original, abandoned, proposal for
+ *  its contents -- keep around for potential future use.
+ */
+#ifndef NT_KDUMPINFO
+#define NT_KDUMPINFO 7
+#endif
+
+#endif  /* NOTDEF */
+
+struct pt_load_segment {
+	off_t file_offset;
+	physaddr_t phys_start;
+	physaddr_t phys_end;
+	physaddr_t zero_fill;
+};
+
+struct vmcore_data {
+	ulong flags;
+	int ndfd;
+	FILE *ofp;
+	uint header_size;
+	char *elf_header;
+	uint num_pt_load_segments;
+	struct pt_load_segment *pt_load_segments;
+        Elf32_Ehdr *elf32;
+        Elf32_Phdr *notes32;
+        Elf32_Phdr *load32;
+        Elf64_Ehdr *elf64;
+        Elf64_Phdr *notes64;
+        Elf64_Phdr *load64;
+        void *nt_prstatus;
+        void *nt_prpsinfo;
+        void *nt_taskstruct;
+	ulong task_struct;
+	uint page_size;
+	ulong switch_stack;
+	uint num_prstatus_notes;
+	void *nt_prstatus_percpu[NR_CPUS];
+	struct xen_kdump_data *xen_kdump_data;
+	void *vmcoreinfo;
+	uint size_vmcoreinfo;
+};
+
+/*
+ *  ELF note types for Xen dom0/hypervisor kdumps.
+ *  The comments below are from xen/include/public/elfnote.h.
+ */
+
+/*
+ * System information exported through crash notes.
+ *
+ * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_INFO
+ * note in case of a system crash. This note will contain various
+ * information about the system, see xen/include/xen/elfcore.h.
+ */
+#define XEN_ELFNOTE_CRASH_INFO 0x1000001
+
+/*
+ * System registers exported through crash notes.
+ *
+ * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_REGS
+ * note per cpu in case of a system crash. This note is architecture
+ * specific and will contain registers not saved in the "CORE" note.
+ * See xen/include/xen/elfcore.h for more information.
+ */
+#define XEN_ELFNOTE_CRASH_REGS 0x1000002
+
+
+/* 
+ * For (temporary) backwards compatibility.
+ */
+#define NT_XEN_KDUMP_CR3 0x10000001
+
+struct xen_kdump_data {
+	ulong flags;
+	ulong cr3;
+	ulong p2m_mfn;
+	char *page;
+	ulong last_mfn_read;
+	ulong last_pmd_read;
+	ulong cache_hits;
+	ulong accesses;
+	int p2m_frames;
+        ulong *p2m_mfn_frame_list;
+	ulong xen_phys_start;
+	int xen_major_version;
+	int xen_minor_version;
+};
+
+#define KDUMP_P2M_INIT  (0x1)
+#define KDUMP_CR3       (0x2)
+#define KDUMP_MFN_LIST  (0x4)
+
+#define P2M_FAILURE ((physaddr_t)(0xffffffffffffffffLL))
--- crash/lkcd_fix_mem.h.orig	2009-02-12 09:31:03.000000000 -0500
+++ crash/lkcd_fix_mem.h	2007-10-30 10:51:55.000000000 -0400
@@ -1,3 +1,5 @@
+/* OBSOLETE */
+
 #ifdef IA64
 
 #define UTSNAME_ENTRY_SZ 65