"Initial check in for Widevine Third Party source"

git-svn-id: https://widevine-mirrored-source.googlecode.com/svn/trunk@8959 8780d7d1-120b-16fd-4e4d-63eabaf331ad
diff --git a/xnu-792.18.15/osfmk/kdp/ml/i386/kdp_machdep.c b/xnu-792.18.15/osfmk/kdp/ml/i386/kdp_machdep.c
new file mode 100644
index 0000000..8d91941
--- /dev/null
+++ b/xnu-792.18.15/osfmk/kdp/ml/i386/kdp_machdep.c
@@ -0,0 +1,555 @@
+/*
+ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
+ * 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ * 
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ * 
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
+ */
+ 
+#include <mach_kdp.h>
+#include <mach/mach_types.h>
+#include <mach/machine.h>
+#include <mach/exception_types.h>
+#include <kern/cpu_data.h>
+#include <i386/trap.h>
+#include <i386/mp.h>
+#include <kdp/kdp_internal.h>
+#include <mach-o/loader.h>
+#include <mach-o/nlist.h>
+#include <IOKit/IOPlatformExpert.h> /* for PE_halt_restart */
+#include <kern/machine.h> /* for halt_all_cpus */
+
+#include <kern/thread.h>
+#include <i386/thread.h>
+#include <vm/vm_map.h>
+#include <i386/pmap.h>
+
+#define KDP_TEST_HARNESS 0
+#if KDP_TEST_HARNESS
+#define dprintf(x) printf x
+#else
+#define dprintf(x)
+#endif
+
+extern cpu_type_t cpuid_cputype(void);
+extern cpu_subtype_t cpuid_cpusubtype(void);
+
+void		print_saved_state(void *);
+void		kdp_call(void);
+int		kdp_getc(void);
+boolean_t	kdp_call_kdb(void);
+void		kdp_getstate(i386_thread_state_t *);
+void		kdp_setstate(i386_thread_state_t *);
+void		kdp_print_phys(int);
+
+int
+machine_trace_thread(thread_t thread, uint32_t tracepos, uint32_t tracebound, int nframes, boolean_t user_p);
+
+int
+machine_trace_thread64(thread_t thread, uint32_t tracepos, uint32_t tracebound, int nframes, boolean_t user_p);
+
+extern unsigned kdp_vm_read(caddr_t src, caddr_t dst, unsigned len);
+
+void
+kdp_exception(
+    unsigned char	*pkt,
+    int	*len,
+    unsigned short	*remote_port,
+    unsigned int	exception,
+    unsigned int	code,
+    unsigned int	subcode
+)
+{
+    kdp_exception_t	*rq = (kdp_exception_t *)pkt;
+
+    rq->hdr.request = KDP_EXCEPTION;
+    rq->hdr.is_reply = 0;
+    rq->hdr.seq = kdp.exception_seq;
+    rq->hdr.key = 0;
+    rq->hdr.len = sizeof (*rq);
+    
+    rq->n_exc_info = 1;
+    rq->exc_info[0].cpu = 0;
+    rq->exc_info[0].exception = exception;
+    rq->exc_info[0].code = code;
+    rq->exc_info[0].subcode = subcode;
+    
+    rq->hdr.len += rq->n_exc_info * sizeof (kdp_exc_info_t);
+    
+    bcopy((char *)rq, (char *)pkt, rq->hdr.len);
+
+    kdp.exception_ack_needed = TRUE;
+    
+    *remote_port = kdp.exception_port;
+    *len = rq->hdr.len;
+}
+
+boolean_t
+kdp_exception_ack(
+    unsigned char	*pkt,
+    int			len
+)
+{
+    kdp_exception_ack_t	*rq = (kdp_exception_ack_t *)pkt;
+
+    if (((unsigned int) len) < sizeof (*rq))
+	return(FALSE);
+	
+    if (!rq->hdr.is_reply || rq->hdr.request != KDP_EXCEPTION)
+    	return(FALSE);
+	
+    dprintf(("kdp_exception_ack seq %x %x\n", rq->hdr.seq, kdp.exception_seq));
+	
+    if (rq->hdr.seq == kdp.exception_seq) {
+	kdp.exception_ack_needed = FALSE;
+	kdp.exception_seq++;
+    }
+    return(TRUE);
+}
+
+void
+kdp_getstate(
+    x86_thread_state32_t	*state
+)
+{
+    static x86_thread_state32_t	null_state;
+    x86_saved_state32_t	*saved_state;
+    
+    saved_state = (x86_saved_state32_t *)kdp.saved_state;
+    
+    *state = null_state;	
+    state->eax = saved_state->eax;
+    state->ebx = saved_state->ebx;
+    state->ecx = saved_state->ecx;
+    state->edx = saved_state->edx;
+    state->edi = saved_state->edi;
+    state->esi = saved_state->esi;
+    state->ebp = saved_state->ebp;
+
+    if ((saved_state->cs & 0x3) == 0){	/* Kernel State */
+    	state->esp = (unsigned int) &saved_state->uesp;
+        state->ss = KERNEL_DS;
+    } else {
+    	state->esp = saved_state->uesp;
+    	state->ss = saved_state->ss;
+    }
+
+    state->eflags = saved_state->efl;
+    state->eip = saved_state->eip;
+    state->cs = saved_state->cs;
+    state->ds = saved_state->ds;
+    state->es = saved_state->es;
+    state->fs = saved_state->fs;
+    state->gs = saved_state->gs;
+}
+
+
+void
+kdp_setstate(
+    x86_thread_state32_t	*state
+)
+{
+    x86_saved_state32_t		*saved_state;
+    
+    saved_state = (x86_saved_state32_t *)kdp.saved_state;
+
+    saved_state->eax = state->eax;
+    saved_state->ebx = state->ebx;
+    saved_state->ecx = state->ecx;
+    saved_state->edx = state->edx;
+    saved_state->edi = state->edi;
+    saved_state->esi = state->esi;
+    saved_state->ebp = state->ebp;
+    saved_state->efl = state->eflags;
+#if	0
+    saved_state->frame.eflags &= ~( EFL_VM | EFL_NT | EFL_IOPL | EFL_CLR );
+    saved_state->frame.eflags |=  ( EFL_IF | EFL_SET );
+#endif
+    saved_state->eip = state->eip;
+    saved_state->fs = state->fs;
+    saved_state->gs = state->gs;
+}
+
+
+kdp_error_t
+kdp_machine_read_regs(
+    __unused unsigned int cpu,
+    __unused unsigned int flavor,
+    char *data,
+    __unused int *size
+)
+{
+    static struct i386_float_state  null_fpstate;
+
+    switch (flavor) {
+
+    case OLD_i386_THREAD_STATE:
+    case x86_THREAD_STATE32:
+	dprintf(("kdp_readregs THREAD_STATE\n"));
+	kdp_getstate((x86_thread_state32_t *)data);
+	*size = sizeof (x86_thread_state32_t);
+	return KDPERR_NO_ERROR;
+	
+    case x86_FLOAT_STATE32:
+	dprintf(("kdp_readregs THREAD_FPSTATE\n"));
+	*(x86_float_state32_t *)data = null_fpstate;
+	*size = sizeof (x86_float_state32_t);
+	return KDPERR_NO_ERROR;
+	
+    default:
+	dprintf(("kdp_readregs bad flavor %d\n", flavor));
+	*size = 0;
+	return KDPERR_BADFLAVOR;
+    }
+}
+
+kdp_error_t
+kdp_machine_write_regs(
+    __unused unsigned int cpu,
+    unsigned int flavor,
+    char *data,
+    __unused int *size
+)
+{
+    switch (flavor) {
+
+    case OLD_i386_THREAD_STATE:
+    case x86_THREAD_STATE32:
+	dprintf(("kdp_writeregs THREAD_STATE\n"));
+	kdp_setstate((x86_thread_state32_t *)data);
+	return KDPERR_NO_ERROR;
+	
+    case x86_FLOAT_STATE32:
+	dprintf(("kdp_writeregs THREAD_FPSTATE\n"));
+	return KDPERR_NO_ERROR;
+	
+    default:
+	dprintf(("kdp_writeregs bad flavor %d\n"));
+	return KDPERR_BADFLAVOR;
+    }
+}
+
+
+
+void
+kdp_machine_hostinfo(
+    kdp_hostinfo_t *hostinfo
+)
+{
+    int			i;
+
+    hostinfo->cpus_mask = 0;
+
+    for (i = 0; i < machine_info.max_cpus; i++) {
+	if (cpu_data_ptr[i] == NULL)
+            continue;
+	
+        hostinfo->cpus_mask |= (1 << i);
+    }
+
+    hostinfo->cpu_type = cpuid_cputype();
+    hostinfo->cpu_subtype = cpuid_cpusubtype();
+}
+
+void
+kdp_panic(
+    const char		*msg
+)
+{
+    kprintf("kdp panic: %s\n", msg);    
+    __asm__ volatile("hlt");	
+}
+
+
+void
+kdp_reboot(void)
+{
+	printf("Attempting system restart...");
+	/* Call the platform specific restart*/
+	if (PE_halt_restart)
+		(*PE_halt_restart)(kPERestartCPU);
+	/* If we do reach this, give up */
+	halt_all_cpus(TRUE);
+}
+
+int
+kdp_intr_disbl(void)
+{
+   return splhigh();
+}
+
+void
+kdp_intr_enbl(int s)
+{
+	splx(s);
+}
+
+int
+kdp_getc()
+{
+	return	cnmaygetc();
+}
+
+void
+kdp_us_spin(int usec)
+{
+    delay(usec/100);
+}
+
+void print_saved_state(void *state)
+{
+    x86_saved_state32_t		*saved_state;
+
+    saved_state = state;
+
+	kprintf("pc = 0x%x\n", saved_state->eip);
+	kprintf("cr2= 0x%x\n", saved_state->cr2);
+	kprintf("rp = TODO FIXME\n");
+	kprintf("sp = 0x%x\n", saved_state);
+
+}
+
+void
+kdp_sync_cache()
+{
+	return;	/* No op here. */
+}
+
+void
+kdp_call()
+{
+	__asm__ volatile ("int	$3");	/* Let the processor do the work */
+}
+
+
+typedef struct _cframe_t {
+    struct _cframe_t	*prev;
+    unsigned		caller;
+    unsigned		args[0];
+} cframe_t;
+
+#include <i386/pmap.h>
+extern pt_entry_t *DMAP2;
+extern caddr_t DADDR2;
+
+void
+kdp_print_phys(int src)
+{
+	unsigned int   *iptr;
+	int             i;
+
+	*(int *) DMAP2 = 0x63 | (src & 0xfffff000);
+	invlpg((u_int) DADDR2);
+	iptr = (unsigned int *) DADDR2;
+	for (i = 0; i < 100; i++) {
+		kprintf("0x%x ", *iptr++);
+		if ((i % 8) == 0)
+			kprintf("\n");
+	}
+	kprintf("\n");
+	*(int *) DMAP2 = 0;
+
+}
+
+boolean_t
+kdp_i386_trap(
+    unsigned int	trapno,
+    x86_saved_state32_t	*saved_state,
+    kern_return_t	result,
+    vm_offset_t		va
+)
+{
+    unsigned int exception, subcode = 0, code;
+
+    if (trapno != T_INT3 && trapno != T_DEBUG) {
+    	kprintf("unexpected kernel trap 0x%x eip 0x%x cr2 0x%x \n",
+		trapno, saved_state->eip, saved_state->cr2);
+	if (!kdp.is_conn)
+	    return FALSE;
+    }	
+
+    mp_kdp_enter();
+
+    switch (trapno) {
+    
+    case T_DIVIDE_ERROR:
+	exception = EXC_ARITHMETIC;
+	code = EXC_I386_DIVERR;
+	break;
+    
+    case T_OVERFLOW:
+	exception = EXC_SOFTWARE;
+	code = EXC_I386_INTOFLT;
+	break;
+    
+    case T_OUT_OF_BOUNDS:
+	exception = EXC_ARITHMETIC;
+	code = EXC_I386_BOUNDFLT;
+	break;
+    
+    case T_INVALID_OPCODE:
+	exception = EXC_BAD_INSTRUCTION;
+	code = EXC_I386_INVOPFLT;
+	break;
+    
+    case T_SEGMENT_NOT_PRESENT:
+	exception = EXC_BAD_INSTRUCTION;
+	code = EXC_I386_SEGNPFLT;
+	subcode	= saved_state->err;
+	break;
+    
+    case T_STACK_FAULT:
+	exception = EXC_BAD_INSTRUCTION;
+	code = EXC_I386_STKFLT;
+	subcode	= saved_state->err;
+	break;
+    
+    case T_GENERAL_PROTECTION:
+	exception = EXC_BAD_INSTRUCTION;
+	code = EXC_I386_GPFLT;
+	subcode	= saved_state->err;
+	break;
+	
+    case T_PAGE_FAULT:
+    	exception = EXC_BAD_ACCESS;
+	code = result;
+	subcode = va;
+	break;
+    
+    case T_WATCHPOINT:
+	exception = EXC_SOFTWARE;
+	code = EXC_I386_ALIGNFLT;
+	break;
+	
+    case T_DEBUG:
+    case T_INT3:
+	exception = EXC_BREAKPOINT;
+	code = EXC_I386_BPTFLT;
+	break;
+
+    default:
+    	exception = EXC_BAD_INSTRUCTION;
+	code = trapno;
+	break;
+    }
+
+    kdp_raise_exception(exception, code, subcode, saved_state);
+
+    mp_kdp_exit();
+
+    return TRUE;
+}
+
+boolean_t 
+kdp_call_kdb(
+        void) 
+{       
+        return(FALSE);
+}
+
+unsigned int
+kdp_ml_get_breakinsn(void)
+{
+  return 0xcc;
+}
+extern pmap_t kdp_pmap;
+
+#define RETURN_OFFSET 4
+int
+machine_trace_thread(thread_t thread, uint32_t tracepos, uint32_t tracebound, int nframes, boolean_t user_p)
+{
+	uint32_t *tracebuf = (uint32_t *)tracepos;
+	uint32_t fence = 0;
+	uint32_t stackptr = 0;
+	uint32_t stacklimit = 0xfc000000;
+	int framecount = 0;
+	uint32_t init_eip = 0;
+	uint32_t prevsp = 0;
+	uint32_t framesize = 2 * sizeof(vm_offset_t);
+	
+	if (user_p) {
+	        x86_saved_state32_t	*iss32;
+		
+		iss32 = USER_REGS32(thread);
+
+		init_eip = iss32->eip;
+		stackptr = iss32->ebp;
+
+		/* This bound isn't useful, but it doesn't hinder us*/
+		stacklimit = 0xffffffff;
+		kdp_pmap = thread->task->map->pmap;
+	}
+	else {
+		/*Examine the i386_saved_state at the base of the kernel stack*/
+		stackptr = STACK_IKS(thread->kernel_stack)->k_ebp;
+		init_eip = STACK_IKS(thread->kernel_stack)->k_eip;
+	}
+
+	*tracebuf++ = init_eip;
+
+	for (framecount = 0; framecount < nframes; framecount++) {
+
+		if ((tracebound - ((uint32_t) tracebuf)) < (4 * framesize)) {
+			tracebuf--;
+			break;
+		}
+
+		*tracebuf++ = stackptr;
+/* Invalid frame, or hit fence */
+		if (!stackptr || (stackptr == fence)) {
+			break;
+		}
+		/* Stack grows downward */
+		if (stackptr < prevsp) {
+			break;
+		}
+		/* Unaligned frame */
+		if (stackptr & 0x0000003) {
+			break;
+		}
+		if (stackptr > stacklimit) {
+			break;
+		}
+
+		if (kdp_vm_read((caddr_t) (stackptr + RETURN_OFFSET), (caddr_t) tracebuf, sizeof(caddr_t)) != sizeof(caddr_t)) {
+			break;
+		}
+		tracebuf++;
+		
+		prevsp = stackptr;
+		if (kdp_vm_read((caddr_t) stackptr, (caddr_t) &stackptr, sizeof(caddr_t)) != sizeof(caddr_t)) {
+			*tracebuf++ = 0;
+			break;
+		}
+	}
+
+	kdp_pmap = 0;
+
+	return ((uint32_t) tracebuf - tracepos);
+}
+
+/* This is a stub until the x86 64-bit model becomes clear */
+int
+machine_trace_thread64(__unused thread_t thread, __unused uint32_t tracepos, __unused uint32_t tracebound, __unused int nframes, __unused boolean_t user_p) {
+	return 0;
+}
diff --git a/xnu-792.18.15/osfmk/kdp/ml/i386/kdp_vm.c b/xnu-792.18.15/osfmk/kdp/ml/i386/kdp_vm.c
new file mode 100644
index 0000000..bc23863
--- /dev/null
+++ b/xnu-792.18.15/osfmk/kdp/ml/i386/kdp_vm.c
@@ -0,0 +1,542 @@
+/*
+ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
+ * 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ * 
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ * 
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
+ */
+#include <mach/mach_types.h>
+#include <mach/vm_attributes.h>
+#include <mach/vm_param.h>
+#include <libsa/types.h>
+
+#include <vm/vm_map.h>
+#include <i386/pmap.h>
+
+#include <kdp/kdp_core.h>
+#include <kdp/kdp_internal.h>
+#include <mach-o/loader.h>
+#include <mach/vm_map.h>
+#include <mach/vm_statistics.h>
+#include <mach/thread_status.h>
+#include <i386/thread.h>
+
+#include <vm/vm_protos.h>
+#include <vm/vm_kern.h>
+
+unsigned kdp_vm_read( caddr_t, caddr_t, unsigned);
+unsigned kdp_vm_write( caddr_t, caddr_t, unsigned);
+
+boolean_t kdp_trans_off = 0;
+uint32_t kdp_src_high32 = 0;
+extern pmap_paddr_t avail_start, avail_end;
+
+extern void bcopy_phys(addr64_t from, addr64_t to, int size);
+static addr64_t kdp_vtophys(pmap_t pmap, addr64_t va);
+
+pmap_t kdp_pmap = 0;
+
+unsigned int not_in_kdp = 1; /* Cleared when we begin to access vm functions in kdp */
+
+extern vm_offset_t sectTEXTB, sectDATAB, sectLINKB, sectPRELINKB;
+extern int sectSizeTEXT, sectSizeDATA, sectSizeLINK, sectSizePRELINK;
+
+int	kern_dump(void);
+int	kdp_dump_trap(int type, x86_saved_state32_t *regs);
+
+typedef struct {
+	int	flavor;			/* the number for this flavor */
+	mach_msg_type_number_t	count;	/* count of ints in this flavor */
+} mythread_state_flavor_t;
+
+static mythread_state_flavor_t thread_flavor_array [] = { 
+	{x86_THREAD_STATE32, x86_THREAD_STATE32_COUNT}
+};
+
+static int kdp_mynum_flavors = 1;
+static int MAX_TSTATE_FLAVORS = 1;
+
+typedef struct {
+	vm_offset_t header; 
+	int  hoffset;
+	mythread_state_flavor_t *flavors;
+	int tstate_size;
+} tir_t;
+
+char command_buffer[512];
+
+static addr64_t
+kdp_vtophys(
+	pmap_t pmap,
+	addr64_t va)
+{
+	addr64_t    pa;
+	ppnum_t pp;
+/* Clear high 32 - pmap_find_phys() may panic() otherwise */
+	va &= 0xFFFFFFFFULL;
+	pp = pmap_find_phys(pmap, va);
+	if(!pp) return 0;
+	
+	pa = ((addr64_t)pp << 12) | (va & 0x0000000000000FFFULL);
+	return(pa);
+}
+
+/*
+ *
+ */
+unsigned kdp_vm_read(
+	caddr_t src, 
+	caddr_t dst, 
+	unsigned len)
+{
+	addr64_t cur_virt_src = (addr64_t)((unsigned int)src | (((uint64_t)kdp_src_high32) << 32));
+	addr64_t cur_virt_dst = (addr64_t)((unsigned int)dst);
+	addr64_t cur_phys_dst, cur_phys_src;
+	unsigned resid = len;
+	unsigned cnt = 0;
+	pmap_t src_pmap = kernel_pmap;
+
+/* If a different pmap has been specified with kdp_pmap, use it to translate the
+ * source (cur_virt_src); otherwise, the source is translated using the
+ * kernel_pmap.
+ */
+	if (kdp_pmap)
+		src_pmap = kdp_pmap;
+
+	while (resid != 0) {
+/* Translate, unless kdp_trans_off is set */
+		if (!kdp_trans_off) {
+			if (!(cur_phys_src = kdp_vtophys(src_pmap,
+				    cur_virt_src)))
+				goto exit;
+		}
+		else
+			cur_phys_src = cur_virt_src;
+
+/* Always translate the destination buffer using the kernel_pmap */
+		if(!(cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst)))
+			goto exit;
+
+		/* Validate physical page numbers when performing a crashdump */
+		if (not_in_kdp == 0)
+			if (!pmap_valid_page(i386_btop(cur_phys_dst)) || !pmap_valid_page(i386_btop(cur_phys_src)))
+				goto exit;
+
+/* Get length left on page */
+		cnt = PAGE_SIZE - (cur_phys_src & PAGE_MASK);
+		if (cnt > (PAGE_SIZE - (cur_phys_dst & PAGE_MASK)))
+			cnt = PAGE_SIZE - (cur_phys_dst & PAGE_MASK);
+		if (cnt > resid)
+			cnt = resid;
+
+/* Do a physical copy */
+		bcopy_phys(cur_phys_src, cur_phys_dst, cnt);
+
+		cur_virt_src += cnt;
+		cur_virt_dst += cnt;
+		resid -= cnt;
+	}
+exit:
+	return (len - resid);
+}
+
+/*
+ * 
+ */
+unsigned kdp_vm_write(
+        caddr_t src,
+        caddr_t dst,
+        unsigned len)
+{       
+	addr64_t cur_virt_src, cur_virt_dst;
+	addr64_t cur_phys_src, cur_phys_dst;
+	unsigned resid, cnt, cnt_src, cnt_dst;
+
+#ifdef KDP_VM_WRITE_DEBUG
+	printf("kdp_vm_write: src %x dst %x len %x - %08X %08X\n", src, dst, len, ((unsigned long *)src)[0], ((unsigned long *)src)[1]);
+#endif
+
+	cur_virt_src = (addr64_t)((unsigned int)src);
+	cur_virt_dst = (addr64_t)((unsigned int)dst);
+
+	resid = len;
+
+	while (resid != 0) {
+		if ((cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst)) == 0) 
+			goto exit;
+
+		if ((cur_phys_src = kdp_vtophys(kernel_pmap, cur_virt_src)) == 0) 
+			goto exit;
+
+		cnt_src = ((cur_phys_src + PAGE_SIZE) & (PAGE_MASK)) - cur_phys_src;
+		cnt_dst = ((cur_phys_dst + PAGE_SIZE) & (PAGE_MASK)) - cur_phys_dst;
+
+		if (cnt_src > cnt_dst)
+			cnt = cnt_dst;
+		else
+			cnt = cnt_src;
+		if (cnt > resid) 
+			cnt = resid;
+
+		bcopy_phys(cur_phys_src, cur_phys_dst, cnt);		/* Copy stuff over */
+
+		cur_virt_src +=cnt;
+		cur_virt_dst +=cnt;
+		resid -= cnt;
+	}
+exit:
+	return (len - resid);
+}
+
+static void
+kern_collectth_state(thread_t thread, tir_t *t)
+{
+	vm_offset_t	header;
+	int  hoffset, i ;
+	mythread_state_flavor_t *flavors;
+	struct thread_command	*tc;
+	/*
+	 *	Fill in thread command structure.
+	 */
+	header = t->header;
+	hoffset = t->hoffset;
+	flavors = t->flavors;
+	
+	tc = (struct thread_command *) (header + hoffset);
+	tc->cmd = LC_THREAD;
+	tc->cmdsize = sizeof(struct thread_command) + t->tstate_size;
+	hoffset += sizeof(struct thread_command);
+	/*
+	 * Follow with a struct thread_state_flavor and
+	 * the appropriate thread state struct for each
+	 * thread state flavor.
+	 */
+	for (i = 0; i < kdp_mynum_flavors; i++) {
+		*(mythread_state_flavor_t *)(header+hoffset) =
+		    flavors[i];
+		hoffset += sizeof(mythread_state_flavor_t);
+		/* Locate and obtain the non-volatile register context
+		 * for this kernel thread. This should ideally be
+		 * encapsulated in machine_thread_get_kern_state()
+		 * but that routine appears to have been co-opted
+		 * by CHUD to obtain pre-interrupt state.
+		 */
+		if (flavors[i].flavor == x86_THREAD_STATE32) {
+			x86_thread_state32_t *tstate = (x86_thread_state32_t *) (header + hoffset);
+			vm_offset_t kstack;
+			bzero(tstate, x86_THREAD_STATE32_COUNT * sizeof(int));
+			if ((kstack = thread->kernel_stack) != 0){
+				struct x86_kernel_state32 *iks = STACK_IKS(kstack);
+				tstate->ebx = iks->k_ebx;
+				tstate->esp = iks->k_esp;
+				tstate->ebp = iks->k_ebp;
+				tstate->edi = iks->k_edi;
+				tstate->esi = iks->k_esi;
+				tstate->eip = iks->k_eip;
+		}
+		}
+		else if (machine_thread_get_kern_state(thread,
+			flavors[i].flavor, (thread_state_t) (header+hoffset),
+			&flavors[i].count) != KERN_SUCCESS)
+			printf ("Failure in machine_thread_get_kern_state()\n");
+		hoffset += flavors[i].count*sizeof(int);
+	}
+
+	t->hoffset = hoffset;
+}
+
+/* Intended to be called from the kernel trap handler if an unrecoverable fault
+ * occurs during a crashdump (which shouldn't happen since we validate mappings
+ * and so on). This should be reworked to attempt some form of recovery.
+ */
+int
+kdp_dump_trap(
+	int type,
+	__unused x86_saved_state32_t	*saved_state)
+{
+	printf ("An unexpected trap (type %d) occurred during the system dump, terminating.\n", type);
+	kdp_send_crashdump_pkt (KDP_EOF, NULL, 0, ((void *) 0));
+	abort_panic_transfer();
+	kdp_flag &= ~KDP_PANIC_DUMP_ENABLED;
+	kdp_flag &= ~PANIC_CORE_ON_NMI;
+	kdp_flag &= ~PANIC_LOG_DUMP;
+
+	kdp_reset();
+
+	kdp_raise_exception(EXC_BAD_ACCESS, 0, 0, kdp.saved_state);
+	return( 0 );
+}
+
+int
+kern_dump(void)
+{
+	vm_map_t	map;
+	unsigned int	thread_count, segment_count;
+	unsigned int	command_size = 0, header_size = 0, tstate_size = 0;
+	unsigned int	hoffset = 0, foffset = 0, nfoffset = 0,  vmoffset = 0;
+	unsigned int	max_header_size = 0;
+	vm_offset_t	header;
+	struct mach_header	*mh;
+	struct segment_command	*sc;
+	vm_size_t	size;
+	vm_prot_t	prot = 0;
+	vm_prot_t	maxprot = 0;
+	vm_inherit_t	inherit = 0;
+	mythread_state_flavor_t flavors[MAX_TSTATE_FLAVORS];
+	vm_size_t	nflavors;
+	vm_size_t	i;
+	uint32_t	nesting_depth = 0;
+	kern_return_t	kret = 0;
+	struct vm_region_submap_info_64	vbr;
+	mach_msg_type_number_t	vbrcount  = 0;
+	tir_t tir1;
+
+	int error = 0;
+	int panic_error = 0;
+	unsigned int txstart = 0;
+	unsigned int mach_section_count = 4;
+	unsigned int num_sects_txed = 0;
+
+	map = kernel_map;
+
+	not_in_kdp = 0; /* Signal vm functions not to acquire locks */
+
+	thread_count = 1;
+	segment_count = get_vmmap_entries(map); 
+  
+	printf("Kernel map has %d entries\n", segment_count);
+
+	nflavors = kdp_mynum_flavors;
+	bcopy((char *)thread_flavor_array,(char *) flavors,sizeof(thread_flavor_array));
+
+	for (i = 0; i < nflavors; i++)
+		tstate_size += sizeof(mythread_state_flavor_t) +
+		    (flavors[i].count * sizeof(int));
+
+	command_size = (segment_count + mach_section_count) *
+	    sizeof(struct segment_command) +
+	    thread_count * sizeof(struct thread_command) +
+	    tstate_size * thread_count;
+
+	header_size = command_size + sizeof(struct mach_header);
+	header = (vm_offset_t) command_buffer;
+	
+	/*
+	 *	Set up Mach-O header for currently executing 32 bit kernel.
+	 */
+	printf ("Generated Mach-O header size was %d\n", header_size);
+
+	mh = (struct mach_header *) header;
+	mh->magic = MH_MAGIC;
+	mh->cputype = cpu_type();
+	mh->cpusubtype = cpu_subtype();
+	mh->filetype = MH_CORE;
+	mh->ncmds = segment_count + thread_count + mach_section_count;
+	mh->sizeofcmds = command_size;
+	mh->flags = 0;
+
+	hoffset = sizeof(struct mach_header);	/* offset into header */
+	foffset = round_page_32(header_size);	/* offset into file */
+	/* Padding */
+	if ((foffset - header_size) < (4*sizeof(struct segment_command))) {
+		foffset += ((4*sizeof(struct segment_command)) - (foffset-header_size)); 
+	}
+
+	max_header_size = foffset;
+
+	vmoffset = VM_MIN_ADDRESS;		/* offset into VM */
+
+	/* Transmit the Mach-O MH_CORE header, and seek forward past the 
+	 * area reserved for the segment and thread commands 
+	 * to begin data transmission 
+	 */
+
+	if ((panic_error = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(nfoffset) , &nfoffset)) < 0) { 
+		printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error);
+		error = panic_error;
+		goto out;
+	} 
+
+	if ((panic_error = kdp_send_crashdump_data (KDP_DATA, NULL, sizeof(struct mach_header), (caddr_t) mh) < 0)) {
+		printf ("kdp_send_crashdump_data failed with error %d\n", panic_error);
+		error = panic_error;
+		goto out;
+	}
+
+	if ((panic_error = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(foffset) , &foffset) < 0)) {
+		printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error);
+		error = panic_error;
+		goto out;
+	}
+	printf ("Transmitting kernel state, please wait: ");
+
+	while ((segment_count > 0) || (kret == KERN_SUCCESS)){
+		/* Check if we've transmitted all the kernel sections */
+		if (num_sects_txed == mach_section_count) {
+
+			while (1) {
+
+				/*
+				 *	Get region information for next region.
+				 */
+
+				vbrcount = VM_REGION_SUBMAP_INFO_COUNT_64;
+				if((kret = vm_region_recurse_64(map, 
+					    &vmoffset, &size, &nesting_depth, 
+					    (vm_region_recurse_info_t)&vbr,
+					    &vbrcount)) != KERN_SUCCESS) {
+					break;
+				}
+
+				if(vbr.is_submap) {
+					nesting_depth++;
+					continue;
+				} else {
+					break;
+				}
+			}
+
+			if(kret != KERN_SUCCESS)
+				break;
+
+			prot = vbr.protection;
+			maxprot = vbr.max_protection;
+			inherit = vbr.inheritance;
+		}
+		else
+		{
+			switch (num_sects_txed) {
+			case 0:
+				/* Transmit the kernel text section */
+				vmoffset = sectTEXTB;
+				size = sectSizeTEXT;
+				break;
+			case 1:
+				vmoffset = sectDATAB;
+				size = sectSizeDATA;
+				break;
+			case 2:
+				vmoffset = sectPRELINKB;
+				size = sectSizePRELINK;
+				break;
+			case 3:
+				vmoffset = sectLINKB;
+				size = sectSizeLINK;
+				break;
+			}
+			num_sects_txed++;
+		}
+		/*
+		 *	Fill in segment command structure.
+		 */
+    
+		if (hoffset > max_header_size)
+			break;
+		sc = (struct segment_command *) (header);
+		sc->cmd = LC_SEGMENT;
+		sc->cmdsize = sizeof(struct segment_command);
+		sc->segname[0] = 0;
+		sc->vmaddr = vmoffset;
+		sc->vmsize = size;
+		sc->fileoff = foffset;
+		sc->filesize = size;
+		sc->maxprot = maxprot;
+		sc->initprot = prot;
+		sc->nsects = 0;
+
+		if ((panic_error = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(hoffset) , &hoffset)) < 0) { 
+			printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error);
+			error = panic_error;
+			goto out;
+		} 
+    
+		if ((panic_error = kdp_send_crashdump_data (KDP_DATA, NULL, sizeof(struct segment_command) , (caddr_t) sc)) < 0) {
+			printf ("kdp_send_crashdump_data failed with error %d\n", panic_error);
+			error = panic_error;
+			goto out;
+		}
+
+		/* Do not transmit memory tagged VM_MEMORY_IOKIT - instead,
+		 * seek past that region on the server - this creates a
+		 * hole in the file.
+		 */
+
+		if ((vbr.user_tag != VM_MEMORY_IOKIT)) {
+      
+			if ((panic_error = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(foffset) , &foffset)) < 0) {
+				printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error);
+				error = panic_error;
+				goto out;
+			}
+
+			txstart = vmoffset;
+
+			if ((panic_error = kdp_send_crashdump_data (KDP_DATA, NULL, size, (caddr_t) txstart)) < 0)	{
+				printf ("kdp_send_crashdump_data failed with error %d\n", panic_error);
+				error = panic_error;
+				goto out;
+			}
+		}
+
+		hoffset += sizeof(struct segment_command);
+		foffset += size;
+		vmoffset += size;
+		segment_count--;
+	}
+	tir1.header = header;
+	tir1.hoffset = 0;
+	tir1.flavors = flavors;
+	tir1.tstate_size = tstate_size;
+
+	/* Now send out the LC_THREAD load command, with the thread information
+	 * for the current activation.
+	 * Note that the corefile can contain LC_SEGMENT commands with file
+	 * offsets that point past the edge of the corefile, in the event that
+	 * the last N VM regions were all I/O mapped or otherwise
+	 * non-transferable memory,  not followed by a normal VM region;
+	 * i.e. there will be no hole that reaches to the end of the core file.
+	 */
+	kern_collectth_state (current_thread(), &tir1);
+
+	if ((panic_error = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(hoffset) , &hoffset)) < 0) { 
+		printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error);
+		error = panic_error;
+		goto out;
+	}
+  
+	if ((panic_error = kdp_send_crashdump_data (KDP_DATA, NULL, tir1.hoffset , (caddr_t) header)) < 0) {
+		printf ("kdp_send_crashdump_data failed with error %d\n", panic_error);
+		error = panic_error;
+		goto out;
+	}
+    
+	/* last packet */
+	if ((panic_error = kdp_send_crashdump_pkt (KDP_EOF, NULL, 0, ((void *) 0))) < 0)
+	{
+		printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error);
+		error = panic_error;
+		goto out;
+	}
+out:
+	return (error);
+}