diff --git a/exec/internal/compile/allocator.go b/exec/internal/compile/allocator.go
index 0dc2da34..23611479 100644
--- a/exec/internal/compile/allocator.go
+++ b/exec/internal/compile/allocator.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// +build !appengine
+// +build !appengine,amd64
 
 package compile
 
diff --git a/exec/internal/compile/backend.go b/exec/internal/compile/backend.go
index 9af5fc38..9fb982b6 100644
--- a/exec/internal/compile/backend.go
+++ b/exec/internal/compile/backend.go
@@ -4,6 +4,8 @@
 
 package compile
 
+import "github.com/twitchyliquid64/golang-asm/obj"
+
 type dirtyState uint8
 
 const (
@@ -13,3 +15,7 @@ const (
 	stateLocalFirstElem                      // Caches a pointer to the locals array.
 	stateGlobalSliceHeader                   // Caches a pointer to the globals slice header.
 )
+
+type Backend interface {
+	paramsForMemoryOp(op byte) (size uint, inst obj.As)
+}
diff --git a/exec/internal/compile/backend_amd64.go b/exec/internal/compile/backend_amd64.go
index cb18ae1e..2cda198d 100644
--- a/exec/internal/compile/backend_amd64.go
+++ b/exec/internal/compile/backend_amd64.go
@@ -15,6 +15,8 @@ import (
 	"github.com/twitchyliquid64/golang-asm/obj/x86"
 )
 
+type PlatformBackend = AMD64Backend
+
 var rhsConstOptimizable = map[byte]bool{
 	ops.I64Add:  true,
 	ops.I64Sub:  true,
diff --git a/exec/internal/compile/defaultBackend.go b/exec/internal/compile/defaultBackend.go
new file mode 100644
index 00000000..308f22ee
--- /dev/null
+++ b/exec/internal/compile/defaultBackend.go
@@ -0,0 +1,26 @@
+// +build !amd64
+
+package compile
+
+import (
+	ops "github.com/go-interpreter/wagon/wasm/operators"
+	"github.com/twitchyliquid64/golang-asm/obj"
+	"github.com/twitchyliquid64/golang-asm/obj/x86"
+)
+
+type PlatformBackend struct {
+}
+
+func (b *PlatformBackend) paramsForMemoryOp(op byte) (size uint, inst obj.As) {
+	switch op {
+	case ops.I64Load, ops.F64Load:
+		return 8, x86.AMOVQ
+	case ops.I32Load, ops.F32Load:
+		return 4, x86.AMOVL
+	case ops.I64Store, ops.F64Store:
+		return 8, x86.AMOVQ
+	case ops.I32Store, ops.F32Store:
+		return 4, x86.AMOVL
+	}
+	panic("unreachable")
+}
diff --git a/exec/internal/compile/native.go b/exec/internal/compile/native.go
index e1d7a13b..b3ee2c67 100644
--- a/exec/internal/compile/native.go
+++ b/exec/internal/compile/native.go
@@ -33,16 +33,6 @@ const (
 	CompletionFatalInternalError
 )
 
-func makeExitIndex(idx int) CompletionStatus {
-	return CompletionStatus((idx << 8) & exitIndexMask)
-}
-
-const (
-	statusMask    = 15
-	exitIndexMask = 0x00000000ffffff00
-	unknownIndex  = 0xffffff
-)
-
 // JITExitSignal is the value returned from the execution of a native section.
 // The bits of this packed 64bit value is encoded as follows:
 // [00:04] Completion Status
@@ -50,14 +40,3 @@ const (
 // [08:32] Index of the WASM instruction where the exit occurred.
 // [32:64] Status-specific 32bit value.
 type JITExitSignal uint64
-
-// CompletionStatus decodes and returns the completion status of the exit.
-func (s JITExitSignal) CompletionStatus() CompletionStatus {
-	return CompletionStatus(s & statusMask)
-}
-
-// Index returns the index to the instruction where the exit happened.
-// 0xffffff is returned if the exit was due to normal completion.
-func (s JITExitSignal) Index() int {
-	return (int(s) & exitIndexMask) >> 8
-}
diff --git a/exec/internal/compile/native_completion_amd64.go b/exec/internal/compile/native_completion_amd64.go
new file mode 100644
index 00000000..3c88978a
--- /dev/null
+++ b/exec/internal/compile/native_completion_amd64.go
@@ -0,0 +1,22 @@
+package compile
+
+func makeExitIndex(idx int) CompletionStatus {
+	return CompletionStatus((idx << 8) & exitIndexMask)
+}
+
+const (
+	statusMask    = 15
+	exitIndexMask = 0x00000000ffffff00
+	unknownIndex  = 0xffffff
+)
+
+// CompletionStatus decodes and returns the completion status of the exit.
+func (s JITExitSignal) CompletionStatus() CompletionStatus {
+	return CompletionStatus(s & statusMask)
+}
+
+// Index returns the index to the instruction where the exit happened.
+// 0xffffff is returned if the exit was due to normal completion.
+func (s JITExitSignal) Index() int {
+	return (int(s) & exitIndexMask) >> 8
+}
diff --git a/exec/internal/compile/native_exec.go b/exec/internal/compile/native_exec.go
index 0c78b45a..e0ef97a5 100644
--- a/exec/internal/compile/native_exec.go
+++ b/exec/internal/compile/native_exec.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// +build !appengine
+// +build !appengine,amd64
 
 package compile
 
diff --git a/exec/internal/compile/scanner.go b/exec/internal/compile/scanner.go
index 8dbd3e4a..194a4393 100644
--- a/exec/internal/compile/scanner.go
+++ b/exec/internal/compile/scanner.go
@@ -92,12 +92,12 @@ func (s *scanner) ScanFunc(bytecode []byte, meta *BytecodeMetadata) ([]Compilati
 		// TODO: Add to this table as backends support more opcodes.
 		switch inst.Op {
 		case ops.I64Load, ops.I32Load, ops.F64Load, ops.F32Load:
-			fakeBE := &AMD64Backend{}
+			fakeBE := &PlatformBackend{}
 			memSize, _ := fakeBE.paramsForMemoryOp(inst.Op)
 			inProgress.Metrics.MemoryReads += memSize
 			inProgress.Metrics.StackWrites++
 		case ops.I64Store, ops.I32Store, ops.F64Store, ops.F32Store:
-			fakeBE := &AMD64Backend{}
+			fakeBE := &PlatformBackend{}
 			memSize, _ := fakeBE.paramsForMemoryOp(inst.Op)
 			inProgress.Metrics.MemoryWrites += memSize
 			inProgress.Metrics.StackReads += 2
diff --git a/exec/native_compile.go b/exec/native_compile.go
index 0e4fe775..2f17fe9b 100644
--- a/exec/native_compile.go
+++ b/exec/native_compile.go
@@ -1,35 +1,11 @@
-// Copyright 2019 The go-interpreter Authors.  All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
+// +build !amd64
 
 package exec
 
 import (
-	"encoding/binary"
-	"fmt"
-	"runtime"
-
 	"github.com/go-interpreter/wagon/exec/internal/compile"
-	ops "github.com/go-interpreter/wagon/wasm/operators"
-)
-
-// Parameters that decide whether a sequence should be compiled.
-// TODO: Expose some way for these to be customized at runtime
-// via VMOptions.
-const (
-	// NOTE: must never be less than 5, as room is needed to pack the
-	// wagon.nativeExec instruction and its parameter.
-	minInstBytes                = 5
-	minArithInstructionSequence = 2
 )
 
-var supportedNativeArchs []nativeArch
-
-type nativeArch struct {
-	Arch, OS string
-	make     func(endianness binary.ByteOrder) *nativeCompiler
-}
-
 // nativeCompiler represents a backend for native code generation + execution.
 type nativeCompiler struct {
 	Scanner   sequenceScanner
@@ -40,6 +16,21 @@ type nativeCompiler struct {
 func (c *nativeCompiler) Close() error {
 	return c.allocator.Close()
 }
+func nativeBackend() (bool, *nativeCompiler) {
+	return false, nil
+}
+
+func (vm *VM) tryNativeCompile() error {
+	return nil
+}
+
+// nativeCodeInvocation calls into one of the assembled code blocks.
+// Assembled code blocks expect the following two pieces of
+// information on the stack:
+// [fp:fp+pointerSize]: sliceHeader for the stack.
+// [fp+pointerSize:fp+pointerSize*2]: sliceHeader for locals variables.
+func (vm *VM) nativeCodeInvocation(asmIndex uint32) {
+}
 
 // pageAllocator is responsible for the efficient allocation of
 // executable, aligned regions of executable memory.
@@ -62,153 +53,3 @@ type instructionBuilder interface {
 	// Build compiles the specified bytecode into native instructions.
 	Build(candidate compile.CompilationCandidate, code []byte, meta *compile.BytecodeMetadata) ([]byte, error)
 }
-
-// NativeCompilationError represents a failure to compile a sequence
-// of instructions to native code.
-type NativeCompilationError struct {
-	Start, End uint
-	FuncIndex  int
-	Err        error
-}
-
-func (e NativeCompilationError) Error() string {
-	return fmt.Sprintf("exec: native compilation failed on vm.funcs[%d].code[%d:%d]: %v", e.FuncIndex, e.Start, e.End, e.Err)
-}
-
-func nativeBackend() (bool, *nativeCompiler) {
-	for _, c := range supportedNativeArchs {
-		if c.Arch == runtime.GOARCH && c.OS == runtime.GOOS {
-			backend := c.make(endianess)
-			return true, backend
-		}
-	}
-	return false, nil
-}
-
-func (vm *VM) tryNativeCompile() error {
-	if vm.nativeBackend == nil {
-		return nil
-	}
-
-	for i := range vm.funcs {
-		if _, isGoFunc := vm.funcs[i].(*goFunction); isGoFunc {
-			continue
-		}
-
-		fn := vm.funcs[i].(compiledFunction)
-		candidates, err := vm.nativeBackend.Scanner.ScanFunc(fn.code, fn.codeMeta)
-		if err != nil {
-			return fmt.Errorf("exec: AOT scan failed on vm.funcs[%d]: %v", i, err)
-		}
-
-		for _, candidate := range candidates {
-			if (candidate.Metrics.IntegerOps + candidate.Metrics.FloatOps) < minArithInstructionSequence {
-				continue
-			}
-			lower, upper := candidate.Bounds()
-			if (upper - lower) < minInstBytes {
-				continue
-			}
-
-			asm, err := vm.nativeBackend.Builder.Build(candidate, fn.code, fn.codeMeta)
-			if err != nil {
-				return NativeCompilationError{
-					Err:       err,
-					Start:     lower,
-					End:       upper,
-					FuncIndex: i,
-				}
-			}
-			unit, err := vm.nativeBackend.allocator.AllocateExec(asm)
-			if err != nil {
-				return fmt.Errorf("exec: allocator.AllocateExec() failed: %v", err)
-			}
-			fn.asm = append(fn.asm, asmBlock{
-				nativeUnit: unit,
-				resumePC:   upper,
-			})
-
-			// Patch the wasm opcode stream to call into the native section.
-			// The number of bytes touched here must always be equal to
-			// nativeExecPrologueSize and <= minInstructionSequence.
-			fn.code[lower] = ops.WagonNativeExec
-			endianess.PutUint32(fn.code[lower+1:], uint32(len(fn.asm)-1))
-			// make the remainder of the recompiled instructions
-			// unreachable: this should trap the program in the event that
-			// a bug in code offsets & candidate sequence detection results in
-			// a jump to the middle of re-compiled code.
-			// This conservative behaviour is the least likely to result in
-			// bugs becoming security issues.
-			for i := lower + 5; i < upper-1; i++ {
-				fn.code[i] = ops.Unreachable
-			}
-		}
-		vm.funcs[i] = fn
-	}
-
-	return nil
-}
-
-// nativeCodeInvocation calls into one of the assembled code blocks.
-// Assembled code blocks expect the following two pieces of
-// information on the stack:
-// [fp:fp+pointerSize]: sliceHeader for the stack.
-// [fp+pointerSize:fp+pointerSize*2]: sliceHeader for locals variables.
-func (vm *VM) nativeCodeInvocation(asmIndex uint32) {
-	block := vm.ctx.asm[asmIndex]
-	finishSignal := block.nativeUnit.Invoke(&vm.ctx.stack, &vm.ctx.locals, &vm.globals, &vm.memory)
-
-	switch finishSignal.CompletionStatus() {
-	case compile.CompletionOK:
-	case compile.CompletionFatalInternalError:
-		panic("fatal error in native execution")
-	case compile.CompletionBadBounds:
-		panic("exec: out of bounds memory access")
-	}
-	vm.ctx.pc = int64(block.resumePC)
-}
-
-// CompileStats returns statistics about native compilation performed on
-// the VM.
-func (vm *VM) CompileStats() NativeCompileStats {
-	out := NativeCompileStats{
-		Ops: map[byte]*OpStats{},
-	}
-
-	for i := range vm.funcs {
-		if _, isGoFunc := vm.funcs[i].(*goFunction); isGoFunc {
-			continue
-		}
-
-		fn := vm.funcs[i].(compiledFunction)
-		out.NumCompiledBlocks += len(fn.asm)
-
-		for _, inst := range fn.codeMeta.Instructions {
-			if _, exists := out.Ops[inst.Op]; !exists {
-				out.Ops[inst.Op] = &OpStats{}
-			}
-
-			// Instructions which are native-compiled are re-written to the
-			// ops.WagonNativeExec opcode, so a mismatch indicates native compilation.
-			if fn.code[inst.Start] == inst.Op {
-				out.Ops[inst.Op].Interpreted++
-			} else {
-				out.Ops[inst.Op].Compiled++
-			}
-		}
-	}
-
-	return out
-}
-
-type OpStats struct {
-	Interpreted int
-	Compiled    int
-}
-
-// NativeCompileStats encapsulates statistics about any native
-// compilation performed on the VM.
-type NativeCompileStats struct {
-	Ops               map[byte]*OpStats
-	NumCompiledBlocks int
-}
diff --git a/exec/native_compile_amd64.go b/exec/native_compile_amd64.go
new file mode 100644
index 00000000..0e4fe775
--- /dev/null
+++ b/exec/native_compile_amd64.go
@@ -0,0 +1,214 @@
+// Copyright 2019 The go-interpreter Authors.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package exec
+
+import (
+	"encoding/binary"
+	"fmt"
+	"runtime"
+
+	"github.com/go-interpreter/wagon/exec/internal/compile"
+	ops "github.com/go-interpreter/wagon/wasm/operators"
+)
+
+// Parameters that decide whether a sequence should be compiled.
+// TODO: Expose some way for these to be customized at runtime
+// via VMOptions.
+const (
+	// NOTE: must never be less than 5, as room is needed to pack the
+	// wagon.nativeExec instruction and its parameter.
+	minInstBytes                = 5
+	minArithInstructionSequence = 2
+)
+
+var supportedNativeArchs []nativeArch
+
+type nativeArch struct {
+	Arch, OS string
+	make     func(endianness binary.ByteOrder) *nativeCompiler
+}
+
+// nativeCompiler represents a backend for native code generation + execution.
+type nativeCompiler struct {
+	Scanner   sequenceScanner
+	Builder   instructionBuilder
+	allocator pageAllocator
+}
+
+func (c *nativeCompiler) Close() error {
+	return c.allocator.Close()
+}
+
+// pageAllocator is responsible for the efficient allocation of
+// executable, aligned regions of executable memory.
+type pageAllocator interface {
+	AllocateExec(asm []byte) (compile.NativeCodeUnit, error)
+	Close() error
+}
+
+// sequenceScanner is responsible for detecting runs of supported opcodes
+// that could benefit from compilation into native instructions.
+type sequenceScanner interface {
+	// ScanFunc returns an ordered, non-overlapping set of
+	// sequences to compile into native code.
+	ScanFunc(bytecode []byte, meta *compile.BytecodeMetadata) ([]compile.CompilationCandidate, error)
+}
+
+// instructionBuilder is responsible for compiling wasm opcodes into
+// native instructions.
+type instructionBuilder interface {
+	// Build compiles the specified bytecode into native instructions.
+	Build(candidate compile.CompilationCandidate, code []byte, meta *compile.BytecodeMetadata) ([]byte, error)
+}
+
+// NativeCompilationError represents a failure to compile a sequence
+// of instructions to native code.
+type NativeCompilationError struct {
+	Start, End uint
+	FuncIndex  int
+	Err        error
+}
+
+func (e NativeCompilationError) Error() string {
+	return fmt.Sprintf("exec: native compilation failed on vm.funcs[%d].code[%d:%d]: %v", e.FuncIndex, e.Start, e.End, e.Err)
+}
+
+func nativeBackend() (bool, *nativeCompiler) {
+	for _, c := range supportedNativeArchs {
+		if c.Arch == runtime.GOARCH && c.OS == runtime.GOOS {
+			backend := c.make(endianess)
+			return true, backend
+		}
+	}
+	return false, nil
+}
+
+func (vm *VM) tryNativeCompile() error {
+	if vm.nativeBackend == nil {
+		return nil
+	}
+
+	for i := range vm.funcs {
+		if _, isGoFunc := vm.funcs[i].(*goFunction); isGoFunc {
+			continue
+		}
+
+		fn := vm.funcs[i].(compiledFunction)
+		candidates, err := vm.nativeBackend.Scanner.ScanFunc(fn.code, fn.codeMeta)
+		if err != nil {
+			return fmt.Errorf("exec: AOT scan failed on vm.funcs[%d]: %v", i, err)
+		}
+
+		for _, candidate := range candidates {
+			if (candidate.Metrics.IntegerOps + candidate.Metrics.FloatOps) < minArithInstructionSequence {
+				continue
+			}
+			lower, upper := candidate.Bounds()
+			if (upper - lower) < minInstBytes {
+				continue
+			}
+
+			asm, err := vm.nativeBackend.Builder.Build(candidate, fn.code, fn.codeMeta)
+			if err != nil {
+				return NativeCompilationError{
+					Err:       err,
+					Start:     lower,
+					End:       upper,
+					FuncIndex: i,
+				}
+			}
+			unit, err := vm.nativeBackend.allocator.AllocateExec(asm)
+			if err != nil {
+				return fmt.Errorf("exec: allocator.AllocateExec() failed: %v", err)
+			}
+			fn.asm = append(fn.asm, asmBlock{
+				nativeUnit: unit,
+				resumePC:   upper,
+			})
+
+			// Patch the wasm opcode stream to call into the native section.
+			// The number of bytes touched here must always be equal to
+			// nativeExecPrologueSize and <= minInstructionSequence.
+			fn.code[lower] = ops.WagonNativeExec
+			endianess.PutUint32(fn.code[lower+1:], uint32(len(fn.asm)-1))
+			// make the remainder of the recompiled instructions
+			// unreachable: this should trap the program in the event that
+			// a bug in code offsets & candidate sequence detection results in
+			// a jump to the middle of re-compiled code.
+			// This conservative behaviour is the least likely to result in
+			// bugs becoming security issues.
+			for i := lower + 5; i < upper-1; i++ {
+				fn.code[i] = ops.Unreachable
+			}
+		}
+		vm.funcs[i] = fn
+	}
+
+	return nil
+}
+
+// nativeCodeInvocation calls into one of the assembled code blocks.
+// Assembled code blocks expect the following two pieces of
+// information on the stack:
+// [fp:fp+pointerSize]: sliceHeader for the stack.
+// [fp+pointerSize:fp+pointerSize*2]: sliceHeader for locals variables.
+func (vm *VM) nativeCodeInvocation(asmIndex uint32) {
+	block := vm.ctx.asm[asmIndex]
+	finishSignal := block.nativeUnit.Invoke(&vm.ctx.stack, &vm.ctx.locals, &vm.globals, &vm.memory)
+
+	switch finishSignal.CompletionStatus() {
+	case compile.CompletionOK:
+	case compile.CompletionFatalInternalError:
+		panic("fatal error in native execution")
+	case compile.CompletionBadBounds:
+		panic("exec: out of bounds memory access")
+	}
+	vm.ctx.pc = int64(block.resumePC)
+}
+
+// CompileStats returns statistics about native compilation performed on
+// the VM.
+func (vm *VM) CompileStats() NativeCompileStats {
+	out := NativeCompileStats{
+		Ops: map[byte]*OpStats{},
+	}
+
+	for i := range vm.funcs {
+		if _, isGoFunc := vm.funcs[i].(*goFunction); isGoFunc {
+			continue
+		}
+
+		fn := vm.funcs[i].(compiledFunction)
+		out.NumCompiledBlocks += len(fn.asm)
+
+		for _, inst := range fn.codeMeta.Instructions {
+			if _, exists := out.Ops[inst.Op]; !exists {
+				out.Ops[inst.Op] = &OpStats{}
+			}
+
+			// Instructions which are native-compiled are re-written to the
+			// ops.WagonNativeExec opcode, so a mismatch indicates native compilation.
+			if fn.code[inst.Start] == inst.Op {
+				out.Ops[inst.Op].Interpreted++
+			} else {
+				out.Ops[inst.Op].Compiled++
+			}
+		}
+	}
+
+	return out
+}
+
+type OpStats struct {
+	Interpreted int
+	Compiled    int
+}
+
+// NativeCompileStats encapsulates statistics about any native
+// compilation performed on the VM.
+type NativeCompileStats struct {
+	Ops               map[byte]*OpStats
+	NumCompiledBlocks int
+}
diff --git a/exec/native_compile_nogae.go b/exec/native_compile_nogae_amd64.go
similarity index 100%
rename from exec/native_compile_nogae.go
rename to exec/native_compile_nogae_amd64.go
diff --git a/exec/native_compile_test.go b/exec/native_compile_test.go
index 295c63ba..f9bfb198 100644
--- a/exec/native_compile_test.go
+++ b/exec/native_compile_test.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// +build !appengine
+// +build !appengine,amd64
 
 package exec