Blob Blame History Raw
diff -up v8-3.14.5.10/SConstruct.ppc-harder v8-3.14.5.10/SConstruct
--- v8-3.14.5.10/SConstruct.ppc-harder	2012-10-22 09:09:53.000000000 -0400
+++ v8-3.14.5.10/SConstruct	2017-03-01 12:47:36.529605806 -0500
@@ -143,6 +143,12 @@ LIBRARY_FLAGS = {
       'CCFLAGS':      ['-m32'],
       'LINKFLAGS':    ['-m32']
     },
+    'arch:ppc': {
+      'CPPDEFINES':   ['V8_TARGET_ARCH_PPC'],
+    },
+    'arch:ppc64': {
+      'CPPDEFINES':   ['V8_TARGET_ARCH_PPC64', 'V8_TARGET_ARCH_PPC'],
+    },
     'arch:arm': {
       'CPPDEFINES':   ['V8_TARGET_ARCH_ARM'],
       'unalignedaccesses:on' : {
@@ -994,7 +1000,7 @@ def GuessStrictAliasing(env):
 
 PLATFORM_OPTIONS = {
   'arch': {
-    'values': ['arm', 'ia32', 'x64', 'mips'],
+    'values': ['arm', 'ia32', 'x64', 'mips', 'ppc64', 'ppc'],
     'guess': GuessArch,
     'help': 'the architecture to build for'
   },
diff -up v8-3.14.5.10/src/ppc/assembler-ppc.cc.ppc-harder v8-3.14.5.10/src/ppc/assembler-ppc.cc
--- v8-3.14.5.10/src/ppc/assembler-ppc.cc.ppc-harder	2017-03-01 12:47:36.471607257 -0500
+++ v8-3.14.5.10/src/ppc/assembler-ppc.cc	2017-03-01 12:47:36.516606131 -0500
@@ -72,7 +72,7 @@ static bool is_processor(const char* p)
 
     read_tried = true;
     if (fd != -1) {
-#if V8_TARGET_ARCH_PPC64
+#if defined(V8_TARGET_ARCH_PPC64)
       static Elf64_auxv_t buffer[16];
       Elf64_auxv_t *auxv_element;
 #else
@@ -359,7 +359,7 @@ Register Assembler::GetRB(Instr instr) {
   return reg;
 }
 
-#if V8_TARGET_ARCH_PPC64
+#if defined(V8_TARGET_ARCH_PPC64)
 // This code assumes a FIXED_SEQUENCE for 64bit loads (lis/ori)
 bool Assembler::Is64BitLoadIntoR12(Instr instr1, Instr instr2,
                              Instr instr3, Instr instr4, Instr instr5) {
@@ -392,7 +392,7 @@ bool Assembler::IsRlwinm(Instr instr) {
   return ((instr & kOpcodeMask) == RLWINMX);
 }
 
-#if V8_TARGET_ARCH_PPC64
+#if defined(V8_TARGET_ARCH_PPC64)
 bool Assembler::IsRldicl(Instr instr) {
   return (((instr & kOpcodeMask) == EXT5) &&
           ((instr & kExt5OpcodeMask) == RLDICL));
@@ -903,7 +903,7 @@ void Assembler::orx(Register dst, Regist
 
 void Assembler::cmpi(Register src1, const Operand& src2, CRegister cr) {
   intptr_t imm16 = src2.imm_;
-#if V8_TARGET_ARCH_PPC64
+#if defined(V8_TARGET_ARCH_PPC64)
   int L = 1;
 #else
   int L = 0;
@@ -916,7 +916,7 @@ void Assembler::cmpi(Register src1, cons
 
 void Assembler::cmpli(Register src1, const Operand& src2, CRegister cr) {
   uintptr_t uimm16 = src2.imm_;
-#if V8_TARGET_ARCH_PPC64
+#if defined(V8_TARGET_ARCH_PPC64)
   int L = 1;
 #else
   int L = 0;
@@ -928,7 +928,7 @@ void Assembler::cmpli(Register src1, con
 }
 
 void Assembler::cmp(Register src1, Register src2, CRegister cr) {
-#if V8_TARGET_ARCH_PPC64
+#if defined(V8_TARGET_ARCH_PPC64)
   int L = 1;
 #else
   int L = 0;
@@ -939,7 +939,7 @@ void Assembler::cmp(Register src1, Regis
 }
 
 void Assembler::cmpl(Register src1, Register src2, CRegister cr) {
-#if V8_TARGET_ARCH_PPC64
+#if defined(V8_TARGET_ARCH_PPC64)
   int L = 1;
 #else
   int L = 0;
@@ -1027,7 +1027,7 @@ void Assembler::lwzux(Register rt, const
 }
 
 void Assembler::lwa(Register dst, const MemOperand &src) {
-#if V8_TARGET_ARCH_PPC64
+#if defined(V8_TARGET_ARCH_PPC64)
   int offset = src.offset();
   ASSERT(!src.ra_.is(r0));
   ASSERT(!(offset & 3) && is_int16(offset));
@@ -1116,7 +1116,7 @@ void Assembler::andc(Register dst, Regis
   x_form(EXT2 | ANDCX, dst, src1, src2, rc);
 }
 
-#if V8_TARGET_ARCH_PPC64
+#if defined(V8_TARGET_ARCH_PPC64)
 // 64bit specific instructions
 void Assembler::ld(Register rd, const MemOperand &src) {
   int offset = src.offset();
@@ -1273,7 +1273,7 @@ void Assembler::marker_asm(int mcode) {
 // TOC and static chain are ignored and set to 0.
 void Assembler::function_descriptor() {
   RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
-#if V8_TARGET_ARCH_PPC64
+#if defined(V8_TARGET_ARCH_PPC64)
   uint64_t value = reinterpret_cast<uint64_t>(pc_) + 3 * kPointerSize;
 #if __BYTE_ORDER == __LITTLE_ENDIAN
   emit(static_cast<uint32_t>(value & 0xFFFFFFFF));
@@ -1307,7 +1307,7 @@ void Assembler::mov(Register dst, const
     RecordRelocInfo(src.rmode_, src.imm_);
   }
 
-#if V8_TARGET_ARCH_PPC64
+#if defined(V8_TARGET_ARCH_PPC64)
   int64_t value = src.immediate();
   int32_t hi_32 = static_cast<int64_t>(value) >> 32;
   int32_t lo_32 = static_cast<int32_t>(value);
@@ -1394,7 +1394,7 @@ void Assembler::info(const char* msg, Co
                      CRegister cr) {
   if (::v8::internal::FLAG_trace_sim_stubs) {
     emit(0x7d9ff808);
-#if V8_TARGET_ARCH_PPC64
+#if defined(V8_TARGET_ARCH_PPC64)
     uint64_t value = reinterpret_cast<uint64_t>(msg);
     emit(static_cast<uint32_t>(value >> 32));
     emit(static_cast<uint32_t>(value & 0xFFFFFFFF));
@@ -1759,7 +1759,7 @@ void Assembler::GrowBuffer() {
   // buffer nor pc absolute pointing inside the code buffer, so there is no need
   // to relocate any emitted relocation entries.
 
-#if ABI_USES_FUNCTION_DESCRIPTORS
+#if defined(ABI_USES_FUNCTION_DESCRIPTORS)
   // Relocate runtime entries.
   for (RelocIterator it(desc); !it.done(); it.next()) {
     RelocInfo::Mode rmode = it.rinfo()->rmode();
diff -up v8-3.14.5.10/src/ppc/assembler-ppc-inl.h.ppc-harder v8-3.14.5.10/src/ppc/assembler-ppc-inl.h
diff -up v8-3.14.5.10/src/ppc/builtins-ppc.cc.ppc-harder v8-3.14.5.10/src/ppc/builtins-ppc.cc
--- v8-3.14.5.10/src/ppc/builtins-ppc.cc.ppc-harder	2017-03-01 12:47:36.473607207 -0500
+++ v8-3.14.5.10/src/ppc/builtins-ppc.cc	2017-03-01 12:47:36.516606131 -0500
@@ -1412,7 +1412,7 @@ void Builtins::Generate_FunctionCall(Mac
     __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
     __ lwz(r6, FieldMemOperand(r5, SharedFunctionInfo::kCompilerHintsOffset));
     __ TestBit(r6,
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
                SharedFunctionInfo::kStrictModeFunction,
 #else
                SharedFunctionInfo::kStrictModeFunction + kSmiTagSize,
@@ -1422,7 +1422,7 @@ void Builtins::Generate_FunctionCall(Mac
 
     // Do not transform the receiver for native (Compilerhints already in r6).
     __ TestBit(r6,
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
                SharedFunctionInfo::kNative,
 #else
                SharedFunctionInfo::kNative + kSmiTagSize,
@@ -1650,7 +1650,7 @@ void Builtins::Generate_FunctionApply(Ma
     Label call_to_object, use_global_receiver;
     __ lwz(r5, FieldMemOperand(r5, SharedFunctionInfo::kCompilerHintsOffset));
     __ TestBit(r5,
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
                SharedFunctionInfo::kStrictModeFunction,
 #else
                SharedFunctionInfo::kStrictModeFunction + kSmiTagSize,
@@ -1660,7 +1660,7 @@ void Builtins::Generate_FunctionApply(Ma
 
     // Do not transform the receiver for strict mode functions.
     __ TestBit(r5,
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
                SharedFunctionInfo::kNative,
 #else
                SharedFunctionInfo::kNative + kSmiTagSize,
diff -up v8-3.14.5.10/src/ppc/codegen-ppc.cc.ppc-harder v8-3.14.5.10/src/ppc/codegen-ppc.cc
--- v8-3.14.5.10/src/ppc/codegen-ppc.cc.ppc-harder	2017-03-01 12:47:36.474607182 -0500
+++ v8-3.14.5.10/src/ppc/codegen-ppc.cc	2017-03-01 12:47:36.516606131 -0500
@@ -181,7 +181,7 @@ void ElementsTransitionGenerator::Genera
   __ addi(r10, r9, Operand(FixedDoubleArray::kHeaderSize));
   __ SmiToDoubleArrayOffset(r9, r8);
   __ add(r9, r10, r9);
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   __ mov(r7, Operand(kHoleNanInt64));
 #else
   __ mov(r7, Operand(kHoleNanLower32));
@@ -236,7 +236,7 @@ void ElementsTransitionGenerator::Genera
     __ CompareRoot(r22, Heap::kTheHoleValueRootIndex);
     __ Assert(eq, "object found in smi-only array");
   }
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   __ std(r7, MemOperand(r10, 0));
 #else
 #if __FLOAT_WORD_ORDER == __LITTLE_ENDIAN
@@ -330,7 +330,7 @@ void ElementsTransitionGenerator::Genera
   // Non-hole double, copy value into a heap number.
   __ AllocateHeapNumber(r5, r3, r4, r22, &gc_required);
   // r5: new heap number
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   __ ld(r3, MemOperand(r7, -8));
   __ addi(r4, r5, Operand(-1));  // subtract tag for std
   __ std(r3, MemOperand(r4, HeapNumber::kValueOffset));
diff -up v8-3.14.5.10/src/ppc/code-stubs-ppc.cc.ppc-harder v8-3.14.5.10/src/ppc/code-stubs-ppc.cc
--- v8-3.14.5.10/src/ppc/code-stubs-ppc.cc.ppc-harder	2017-03-01 12:47:36.477607107 -0500
+++ v8-3.14.5.10/src/ppc/code-stubs-ppc.cc	2017-03-01 12:47:36.517606106 -0500
@@ -660,7 +660,7 @@ void FloatingPointHelper::ConvertIntToDo
   __ subi(sp, sp, Operand(8));  // reserve one temporary double on the stack
 
   // sign-extend src to 64-bit and store it to temp double on the stack
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   __ extsw(r0, src);
   __ std(r0, MemOperand(sp, 0));
 #else
@@ -692,7 +692,7 @@ void FloatingPointHelper::ConvertUnsigne
   __ subi(sp, sp, Operand(8));  // reserve one temporary double on the stack
 
   // zero-extend src to 64-bit and store it to temp double on the stack
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   __ clrldi(r0, src, Operand(32));
   __ std(r0, MemOperand(sp, 0));
 #else
@@ -722,7 +722,7 @@ void FloatingPointHelper::ConvertIntToFl
   __ subi(sp, sp, Operand(8));  // reserve one temporary double on the stack
 
   // sign-extend src to 64-bit and store it to temp double on the stack
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   __ extsw(int_scratch, src);
   __ std(int_scratch, MemOperand(sp, 0));
 #else
@@ -1559,7 +1559,7 @@ void ToBooleanStub::Generate(MacroAssemb
     __ lfd(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset));
     __ li(r0, Operand::Zero());
     __ push(r0);
-#if !V8_TARGET_ARCH_PPC64
+#if !defined(V8_TARGET_ARCH_PPC64)
     __ push(r0);
 #endif
     __ lfd(d2, MemOperand(sp, 0));
@@ -1847,7 +1847,7 @@ void UnaryOpStub::GenerateHeapNumberCode
   // Do the bitwise operation and check if the result fits in a smi.
   __ notx(r4, r4);
 
-#if !V8_TARGET_ARCH_PPC64
+#if !defined(V8_TARGET_ARCH_PPC64)
   Label try_float;
   __ JumpIfNotSmiCandidate(r4, r5, &try_float);
 #endif
@@ -1856,7 +1856,7 @@ void UnaryOpStub::GenerateHeapNumberCode
   __ SmiTag(r3, r4);
   __ Ret();
 
-#if !V8_TARGET_ARCH_PPC64
+#if !defined(V8_TARGET_ARCH_PPC64)
   // Try to store the result in a heap number.
   __ bind(&try_float);
   if (mode_ == UNARY_NO_OVERWRITE) {
@@ -2073,7 +2073,7 @@ void BinaryOpStub::GenerateSmiSmiOperati
     }
     case Token::MUL: {
       Label mul_zero, mul_neg_zero;
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
       // Remove tag from both operands.
       __ SmiUntag(ip, right);
       __ SmiUntag(r0, left);
@@ -2102,7 +2102,7 @@ void BinaryOpStub::GenerateSmiSmiOperati
       // Go slow on zero result to handle -0.
       __ cmpi(scratch1, Operand::Zero());
       __ beq(&mul_zero);
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
       __ SmiTag(right, scratch1);
 #else
       __ mr(right, scratch1);
@@ -2160,7 +2160,7 @@ void BinaryOpStub::GenerateSmiSmiOperati
       __ sub(scratch1, ip, scratch1, LeaveOE, SetRC);
       // If the result is 0, we need to check for the -0 case.
       __ beq(&check_neg_zero, cr0);
-#if !V8_TARGET_ARCH_PPC64
+#if !defined(V8_TARGET_ARCH_PPC64)
       // Check that the signed result fits in a Smi.
       __ JumpIfNotSmiCandidate(scratch1, scratch2, &not_smi_result);
 #endif
@@ -2212,7 +2212,7 @@ void BinaryOpStub::GenerateSmiSmiOperati
       __ SmiUntag(scratch1, left);
       __ GetLeastBitsFromSmi(scratch2, right, 5);
       __ ShiftLeft(scratch1, scratch1, scratch2);
-#if !V8_TARGET_ARCH_PPC64
+#if !defined(V8_TARGET_ARCH_PPC64)
       // Check that the signed result fits in a Smi.
       __ JumpIfNotSmiCandidate(scratch1, scratch2, &not_smi_result);
 #endif
@@ -2358,7 +2358,7 @@ void BinaryOpStub::GenerateFPOperation(M
           // The code below for writing into heap numbers isn't capable of
           // writing the register as an unsigned int so we go to slow case if we
           // hit this case.
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
           const Condition cond = ne;
           __ srw(r5, r6, r5);
           __ TestSignBit32(r5, r0);
@@ -2378,7 +2378,7 @@ void BinaryOpStub::GenerateFPOperation(M
           UNREACHABLE();
       }
 
-#if !V8_TARGET_ARCH_PPC64
+#if !defined(V8_TARGET_ARCH_PPC64)
       // Check that the *signed* result fits in a smi.
       __ JumpIfNotSmiCandidate(r5, r6, &result_not_a_smi);
 #endif
@@ -2631,7 +2631,7 @@ void BinaryOpStub::GenerateInt32Stub(Mac
                             &transition : &return_heap_number);
         __ bne(not_int32);
 
-#if !V8_TARGET_ARCH_PPC64
+#if !defined(V8_TARGET_ARCH_PPC64)
         // Check if the result fits in a smi.
         // If not try to return a heap number.
         __ JumpIfNotSmiCandidate(scratch1, scratch2, &return_heap_number);
@@ -2643,7 +2643,7 @@ void BinaryOpStub::GenerateInt32Stub(Mac
 
         __ subi(sp, sp, Operand(8));
         __ stfd(d1, MemOperand(sp, 0));
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
         __ ld(scratch2, MemOperand(sp, 0));
 #else
 #if __FLOAT_WORD_ORDER == __LITTLE_ENDIAN
@@ -2743,7 +2743,7 @@ void BinaryOpStub::GenerateInt32Stub(Mac
           // We only get a negative result if the shift value (r5) is 0.
           // This result cannot be respresented as a signed 32-bit integer, try
           // to return a heap number if we can.
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
           const Condition cond = ne;
           __ srw(r5, r6, r5);
           __ TestSignBit32(r5, r0);
@@ -2764,7 +2764,7 @@ void BinaryOpStub::GenerateInt32Stub(Mac
           UNREACHABLE();
       }
 
-#if !V8_TARGET_ARCH_PPC64
+#if !defined(V8_TARGET_ARCH_PPC64)
       // Check if the result fits in a smi.
       // If not try to return a heap number. (We know the result is an int32.)
       __ JumpIfNotSmiCandidate(r5, scratch1, &return_heap_number);
@@ -3084,7 +3084,7 @@ void TranscendentalCacheStub::Generate(M
     char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
     char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
     // Two uint_32's and a pointer.
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
     CHECK_EQ(16, static_cast<int>(elem2_start - elem_start));
 #else
     CHECK_EQ(12, static_cast<int>(elem2_start - elem_start));
@@ -3095,7 +3095,7 @@ void TranscendentalCacheStub::Generate(M
   }
 #endif
 
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   // Find the address of the r4'th entry in the cache, i.e., &r3[r4*16].
   __ ShiftLeftImm(scratch0, r4, Operand(4));
 #else
@@ -3598,12 +3598,12 @@ void CEntryStub::GenerateCore(MacroAssem
 
   __ mov(isolate_reg, Operand(ExternalReference::isolate_address()));
 
-#if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR)
+#if defined(ABI_USES_FUNCTION_DESCRIPTORS) && !defined(USE_SIMULATOR)
   // Native AIX/PPC64 Linux use a function descriptor.
   __ LoadP(ToRegister(2), MemOperand(r15, kPointerSize));  // TOC
   __ LoadP(ip, MemOperand(r15, 0));  // Instruction address
   Register target = ip;
-#elif ABI_TOC_ADDRESSABILITY_VIA_IP
+#elif defined(ABI_TOC_ADDRESSABILITY_VIA_IP)
   Register target = ip;
   __ Move(ip, r15);
 #else
@@ -3814,7 +3814,7 @@ void JSEntryStub::GenerateBody(MacroAsse
   Label invoke, handler_entry, exit;
 
   // Called from C
-#if ABI_USES_FUNCTION_DESCRIPTORS
+#ifdef ABI_USES_FUNCTION_DESCRIPTORS
   __ function_descriptor();
 #endif
 
@@ -3993,7 +3993,7 @@ void InstanceofStub::Generate(MacroAssem
   const Register scratch2 = r8;
   Register scratch3 = no_reg;
 
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   const int32_t kDeltaToLoadBoolResult = 9 * Assembler::kInstrSize;
 #else
   const int32_t kDeltaToLoadBoolResult = 5 * Assembler::kInstrSize;
@@ -4875,7 +4875,7 @@ void RegExpExecStub::Generate(MacroAssem
   __ addi(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
 
 
-#if ABI_USES_FUNCTION_DESCRIPTORS && defined(USE_SIMULATOR)
+#if defined(ABI_USES_FUNCTION_DESCRIPTORS) && defined(USE_SIMULATOR)
   // Even Simulated AIX/PPC64 Linux uses a function descriptor for the
   // RegExp routine.  Extract the instruction address here since
   // DirectCEntryStub::GenerateCall will not do it for calls out to
@@ -6777,12 +6777,12 @@ void DirectCEntryStub::GenerateCall(Macr
 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
                                     Register target) {
   Register scratch = r11;
-#if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR)
+#if defined(ABI_USES_FUNCTION_DESCRIPTORS) && !defined(USE_SIMULATOR)
   Register dest = ip;
   // Native AIX/PPC64 Linux use a function descriptor.
   __ LoadP(ToRegister(2), MemOperand(target, kPointerSize));  // TOC
   __ LoadP(ip, MemOperand(target, 0));  // Instruction address
-#elif ABI_TOC_ADDRESSABILITY_VIA_IP
+#elif defined(ABI_TOC_ADDRESSABILITY_VIA_IP)
   Register dest = ip;
   __ Move(ip, target);
 #else
@@ -7411,7 +7411,7 @@ void StoreArrayLiteralElementStub::Gener
   __ LoadP(r8, FieldMemOperand(r4, JSObject::kElementsOffset));
   __ SmiToPtrArrayOffset(r9, r6);
   __ add(r9, r8, r9);
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   // add due to offset alignment requirements of StorePU
   __ addi(r9, r9, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
   __ StoreP(r3, MemOperand(r9));
@@ -7485,11 +7485,11 @@ void ProfileEntryHookStub::Generate(Macr
   __ mov(ip, Operand(reinterpret_cast<intptr_t>(&entry_hook_)));
   __ LoadP(ip, MemOperand(ip));
 
-#if ABI_USES_FUNCTION_DESCRIPTORS
+#ifdef ABI_USES_FUNCTION_DESCRIPTORS
   // Function descriptor
   __ LoadP(ToRegister(2), MemOperand(ip, kPointerSize));
   __ LoadP(ip, MemOperand(ip, 0));
-#elif ABI_TOC_ADDRESSABILITY_VIA_IP
+#elif defined(ABI_TOC_ADDRESSABILITY_VIA_IP)
   // ip already set.
 #endif
 
diff -up v8-3.14.5.10/src/ppc/deoptimizer-ppc.cc.ppc-harder v8-3.14.5.10/src/ppc/deoptimizer-ppc.cc
--- v8-3.14.5.10/src/ppc/deoptimizer-ppc.cc.ppc-harder	2017-03-01 12:47:36.480607032 -0500
+++ v8-3.14.5.10/src/ppc/deoptimizer-ppc.cc	2017-03-01 12:47:36.518606081 -0500
@@ -42,7 +42,7 @@ const int Deoptimizer::table_entry_size_
 
 
 int Deoptimizer::patch_size() {
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   const int kCallInstructionSizeInWords = 7;
 #else
   const int kCallInstructionSizeInWords = 4;
@@ -121,7 +121,7 @@ void Deoptimizer::DeoptimizeFunction(JSF
 }
 
 
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
 static const int32_t kBranchBeforeStackCheck = 0x409c0020;
 static const int32_t kBranchBeforeInterrupt =  0x409c0044;
 #else
@@ -154,7 +154,7 @@ void Deoptimizer::PatchStackCheckCodeAt(
   ASSERT(Memory::int32_at(pc_after - 2 * kInstrSize) == 0x7d8803a6);
   ASSERT(Memory::int32_at(pc_after - kInstrSize) == 0x4e800021);
 
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   ASSERT(Assembler::Is64BitLoadIntoR12(
       Assembler::instr_at(pc_after - 7 * kInstrSize),
       Assembler::instr_at(pc_after - 6 * kInstrSize),
@@ -188,7 +188,7 @@ void Deoptimizer::PatchStackCheckCodeAt(
   // 7d8803a6       mtlr    r12
   // 4e800021       blrl
 
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   CodePatcher patcher(pc_after - 8 * kInstrSize, 6);
 
   // Assemble the 64 bit value from the five part load and verify
@@ -222,7 +222,7 @@ void Deoptimizer::PatchStackCheckCodeAt(
   patcher.masm()->mov(ip,
     Operand(reinterpret_cast<uintptr_t>(replacement_code->entry())));
 
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
       unoptimized_code, pc_after - 7 * kInstrSize, replacement_code);
 #else
@@ -242,7 +242,7 @@ void Deoptimizer::RevertStackCheckCodeAt
   ASSERT(Memory::int32_at(pc_after - 2 * kInstrSize) == 0x7d8803a6);
   ASSERT(Memory::int32_at(pc_after - kInstrSize) == 0x4e800021);
 
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   ASSERT(Assembler::Is64BitLoadIntoR12(
       Assembler::instr_at(pc_after - 7 * kInstrSize),
       Assembler::instr_at(pc_after - 6 * kInstrSize),
@@ -255,7 +255,7 @@ void Deoptimizer::RevertStackCheckCodeAt
       Assembler::instr_at(pc_after - 3 * kInstrSize)));
 #endif
 
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   // Replace NOP with conditional jump.
   CodePatcher patcher(pc_after - 8 * kInstrSize, 6);
   if (FLAG_count_based_interrupts) {
@@ -285,7 +285,7 @@ void Deoptimizer::RevertStackCheckCodeAt
   }
 #endif
 
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   // Assemble the 64 bit value from the five part load and verify
   // that it is the stack guard code
   uint64_t stack_check_address =
@@ -313,7 +313,7 @@ void Deoptimizer::RevertStackCheckCodeAt
   patcher.masm()->mov(ip,
     Operand(reinterpret_cast<uintptr_t>(check_code->entry())));
 
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
       unoptimized_code, pc_after - 7 * kInstrSize, check_code);
 #else
diff -up v8-3.14.5.10/src/ppc/disasm-ppc.cc.ppc-harder v8-3.14.5.10/src/ppc/disasm-ppc.cc
--- v8-3.14.5.10/src/ppc/disasm-ppc.cc.ppc-harder	2017-03-01 12:47:36.480607032 -0500
+++ v8-3.14.5.10/src/ppc/disasm-ppc.cc	2017-03-01 12:47:36.518606081 -0500
@@ -346,7 +346,7 @@ int Decoder::FormatOption(Instruction* i
        return 2;
      }
     }
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
     case 'd': {  // ds value for offset
       int32_t value = SIGN_EXT_IMM16(instr->Bits(15, 0) & ~3);
       out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
@@ -565,7 +565,7 @@ void Decoder::DecodeExt2(Instruction* in
       Format(instr, "srw'.    'ra, 'rs, 'rb");
       return;
     }
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
     case SRDX: {
       Format(instr, "srd'.    'ra, 'rs, 'rb");
       return;
@@ -575,7 +575,7 @@ void Decoder::DecodeExt2(Instruction* in
       Format(instr, "sraw'.   'ra, 'rs, 'rb");
       return;
     }
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
     case SRAD: {
       Format(instr, "srad'.   'ra, 'rs, 'rb");
       return;
@@ -589,7 +589,7 @@ void Decoder::DecodeExt2(Instruction* in
       Format(instr, "extsh'.  'ra, 'rs");
       return;
     }
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
     case EXTSW: {
       Format(instr, "extsw'.  'ra, 'rs");
       return;
@@ -650,7 +650,7 @@ void Decoder::DecodeExt2(Instruction* in
       Format(instr, "slw'.   'ra, 'rs, 'rb");
       break;
     }
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
     case SLDX: {
       Format(instr, "sld'.   'ra, 'rs, 'rb");
       break;
@@ -668,7 +668,7 @@ void Decoder::DecodeExt2(Instruction* in
       Format(instr, "cntlzw'. 'ra, 'rs");
       break;
     }
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
     case CNTLZDX: {
       Format(instr, "cntlzd'. 'ra, 'rs");
       break;
@@ -710,7 +710,7 @@ void Decoder::DecodeExt2(Instruction* in
       Format(instr, "mullw'o'.  'rt, 'ra, 'rb");
       break;
     }
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
     case MULLD: {
       Format(instr, "mulld'o'.  'rt, 'ra, 'rb");
       break;
@@ -720,7 +720,7 @@ void Decoder::DecodeExt2(Instruction* in
       Format(instr, "divw'o'.   'rt, 'ra, 'rb");
       break;
     }
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
     case DIVD: {
       Format(instr, "divd'o'.   'rt, 'ra, 'rb");
       break;
@@ -814,7 +814,7 @@ void Decoder::DecodeExt2(Instruction* in
       Format(instr, "lhzux   'rt, 'ra, 'rb");
       break;
     }
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
     case LDX: {
       Format(instr, "ldx     'rt, 'ra, 'rb");
       break;
@@ -1210,7 +1210,7 @@ int Decoder::InstructionDecode(byte* ins
       DecodeExt5(instr);
       break;
     }
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
     case LD: {
       switch (instr->Bits(1, 0)) {
         case 0:
diff -up v8-3.14.5.10/src/ppc/full-codegen-ppc.cc.ppc-harder v8-3.14.5.10/src/ppc/full-codegen-ppc.cc
--- v8-3.14.5.10/src/ppc/full-codegen-ppc.cc.ppc-harder	2017-03-01 12:47:36.482606982 -0500
+++ v8-3.14.5.10/src/ppc/full-codegen-ppc.cc	2017-03-01 12:47:36.518606081 -0500
@@ -451,7 +451,7 @@ void FullCodeGenerator::EmitReturnSequen
       masm_->mtlr(r0);
       masm_->Add(sp, sp, (uint32_t)(sp_delta + (2 * kPointerSize)), r0);
       masm_->blr();
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
       // With 64bit we need a couple of nop() instructions to ensure we have
       // enough space to SetDebugBreakAtReturn()
       masm_->nop();
@@ -1974,7 +1974,7 @@ void FullCodeGenerator::EmitInlineSmiBin
     case Token::SHL: {
       __ b(&stub_call);
       __ GetLeastBitsFromSmi(scratch2, right, 5);
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
       __ ShiftLeft(right, left, scratch2);
 #else
       __ SmiUntag(scratch1, left);
@@ -2025,7 +2025,7 @@ void FullCodeGenerator::EmitInlineSmiBin
     }
     case Token::MUL: {
       Label mul_zero;
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
       // Remove tag from both operands.
       __ SmiUntag(ip, right);
       __ SmiUntag(r0, left);
@@ -2046,7 +2046,7 @@ void FullCodeGenerator::EmitInlineSmiBin
       // Go slow on zero result to handle -0.
       __ cmpi(scratch1, Operand::Zero());
       __ beq(&mul_zero);
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
       __ SmiTag(right, scratch1);
 #else
       __ mr(right, scratch1);
@@ -3695,7 +3695,7 @@ void FullCodeGenerator::EmitFastAsciiArr
   // string_length to get the length of the result string.
   __ LoadP(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset));
   __ sub(string_length, string_length, scratch1);
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   __ SmiUntag(scratch1, scratch1);
   __ Mul(scratch2, array_length, scratch1);
   // Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
diff -up v8-3.14.5.10/src/ppc/ic-ppc.cc.ppc-harder v8-3.14.5.10/src/ppc/ic-ppc.cc
--- v8-3.14.5.10/src/ppc/ic-ppc.cc.ppc-harder	2017-03-01 12:47:36.483606957 -0500
+++ v8-3.14.5.10/src/ppc/ic-ppc.cc	2017-03-01 12:47:36.519606056 -0500
@@ -1807,7 +1807,7 @@ void PatchInlinedSmiCode(Address address
     patcher.masm()->TestIfSmi(reg, r0);
   } else {
     ASSERT(check == DISABLE_INLINED_SMI_CHECK);
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
     ASSERT(Assembler::IsRldicl(instr_at_patch));
 #else
     ASSERT(Assembler::IsRlwinm(instr_at_patch));
diff -up v8-3.14.5.10/src/ppc/lithium-codegen-ppc.cc.ppc-harder v8-3.14.5.10/src/ppc/lithium-codegen-ppc.cc
--- v8-3.14.5.10/src/ppc/lithium-codegen-ppc.cc.ppc-harder	2017-03-01 12:47:36.486606882 -0500
+++ v8-3.14.5.10/src/ppc/lithium-codegen-ppc.cc	2017-03-01 12:47:36.519606056 -0500
@@ -922,7 +922,7 @@ void LCodeGen::DoModI(LModI* instr) {
         DeoptimizeIf(eq, instr->environment());
     }
 
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
     __ extsw(scratch, scratch);
 #endif
     __ Mul(scratch, divisor, scratch);
@@ -973,7 +973,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
     __ bind(&left_not_min_int);
   }
 
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   __ extsw(result, result);
 #endif
 
@@ -1049,7 +1049,7 @@ void LCodeGen::DoMathFloorOfDiv(LMathFlo
     // The multiplier is a uint32.
     ASSERT(multiplier > 0 &&
            multiplier < (static_cast<int64_t>(1) << 32));
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
     __ extsw(scratch, dividend);
     if (divisor < 0 &&
         instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -1175,7 +1175,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
 
     if (can_overflow) {
       // scratch:result = left * right.
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
       __ Mul(result, left, right);
       __ TestIfInt32(result, scratch, r0);
       DeoptimizeIf(ne, instr->environment());
@@ -1269,14 +1269,14 @@ void LCodeGen::DoShiftI(LShiftI* instr)
     switch (instr->op()) {
       case Token::SAR:
         __ sraw(result, left, scratch);
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
         __ extsw(result, result);
 #endif
         break;
       case Token::SHR:
         if (instr->can_deopt()) {
           __ srw(result, left, scratch, SetRC);
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
           __ extsw(result, result, SetRC);
 #endif
           DeoptimizeIf(lt, instr->environment(), cr0);
@@ -1286,7 +1286,7 @@ void LCodeGen::DoShiftI(LShiftI* instr)
         break;
       case Token::SHL:
         __ slw(result, left, scratch);
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
         __ extsw(result, result);
 #endif
         break;
@@ -1302,7 +1302,7 @@ void LCodeGen::DoShiftI(LShiftI* instr)
       case Token::SAR:
         if (shift_count != 0) {
           __ srawi(result, left, shift_count);
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
           __ extsw(result, result);
 #endif
         } else {
@@ -1323,7 +1323,7 @@ void LCodeGen::DoShiftI(LShiftI* instr)
       case Token::SHL:
         if (shift_count != 0) {
           __ slwi(result, left, Operand(shift_count));
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
           __ extsw(result, result);
 #endif
         } else {
@@ -1360,7 +1360,7 @@ void LCodeGen::DoSubI(LSubI* instr) {
                               right_reg,
                               scratch0(), r0);
     // Doptimize on overflow
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
     __ extsw(scratch0(), scratch0(), SetRC);
 #endif
     DeoptimizeIf(lt, instr->environment(), cr0);
@@ -1530,7 +1530,7 @@ void LCodeGen::DoAddI(LAddI* instr) {
                               ToRegister(left),
                               right_reg,
                               scratch0(), r0);
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
     __ extsw(scratch0(), scratch0(), SetRC);
 #endif
     // Doptimize on overflow
@@ -2359,7 +2359,7 @@ void LCodeGen::DoDeferredInstanceOfKnown
   Register temp = ToRegister(instr->temp());
   ASSERT(temp.is(r7));
   __ LoadHeapObject(InstanceofStub::right(), instr->function());
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   static const int kAdditionalDelta = 13;
 #else
   static const int kAdditionalDelta = 7;
@@ -2887,7 +2887,7 @@ MemOperand LCodeGen::PrepareKeyedOperand
 
   if (additional_index) {
     if (key_is_tagged) {
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
       // more efficient to just untag
       __ SmiUntag(scratch, key);
       key_is_tagged = false;
@@ -2988,7 +2988,7 @@ void LCodeGen::DoLoadKeyedSpecializedArr
         } else {
           __ lwzx(result, mem_operand);
         }
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
         __ extsw(result, result);
 #endif
         break;
@@ -3096,7 +3096,7 @@ void LCodeGen::DoWrapReceiver(LWrapRecei
   __ lwz(scratch,
          FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
   __ TestBit(scratch,
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
              SharedFunctionInfo::kStrictModeFunction,
 #else
              SharedFunctionInfo::kStrictModeFunction + kSmiTagSize,
@@ -3106,7 +3106,7 @@ void LCodeGen::DoWrapReceiver(LWrapRecei
 
   // Do not transform the receiver to object for builtins.
   __ TestBit(scratch,
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
              SharedFunctionInfo::kNative,
 #else
              SharedFunctionInfo::kNative + kSmiTagSize,
@@ -4402,7 +4402,7 @@ void LCodeGen::DoNumberTagI(LNumberTagI*
   Register dst = ToRegister(instr->result());
 
   DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   __ SmiTag(dst, src);
 #else
   __ SmiTagCheckOverflow(dst, src, r0);
diff -up v8-3.14.5.10/src/ppc/lithium-gap-resolver-ppc.cc.ppc-harder v8-3.14.5.10/src/ppc/lithium-gap-resolver-ppc.cc
--- v8-3.14.5.10/src/ppc/lithium-gap-resolver-ppc.cc.ppc-harder	2017-03-01 12:47:36.487606857 -0500
+++ v8-3.14.5.10/src/ppc/lithium-gap-resolver-ppc.cc	2017-03-01 12:47:36.519606056 -0500
@@ -280,7 +280,7 @@ void LGapResolver::EmitMove(int index) {
       if (in_cycle_) {
         // kSavedDoubleValueRegister was used to break the cycle,
         // but kSavedValueRegister is free.
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
         __ ld(kSavedValueRegister, source_operand);
         __ std(kSavedValueRegister, destination_operand);
 #else
diff -up v8-3.14.5.10/src/ppc/macro-assembler-ppc.cc.ppc-harder v8-3.14.5.10/src/ppc/macro-assembler-ppc.cc
--- v8-3.14.5.10/src/ppc/macro-assembler-ppc.cc.ppc-harder	2017-03-01 12:47:36.491606757 -0500
+++ v8-3.14.5.10/src/ppc/macro-assembler-ppc.cc	2017-03-01 12:47:36.520606031 -0500
@@ -132,7 +132,7 @@ int MacroAssembler::CallSize(
   }
 #else
 
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   movSize = 5;
 #else
   movSize = 2;
@@ -160,7 +160,7 @@ int MacroAssembler::CallSizeNotPredictab
   }
 #else
 
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   movSize = 5;
 #else
   movSize = 2;
@@ -196,7 +196,7 @@ void MacroAssembler::Call(Address target
   mtlr(ip);
   bclr(BA, SetLK);
 
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   ASSERT(kCallTargetAddressOffset == 7 * kInstrSize);
 #else
   ASSERT(kCallTargetAddressOffset == 4 * kInstrSize);
@@ -1773,7 +1773,7 @@ void MacroAssembler::StoreNumberToDouble
                                                  Register scratch4,
                                                  Label* fail) {
   Label smi_value, maybe_nan, have_double_value, is_nan, done;
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   Register double_reg = scratch2;
 #else
   Register mantissa_reg = scratch2;
@@ -1792,7 +1792,7 @@ void MacroAssembler::StoreNumberToDouble
 
   // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
   // in the exponent.
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   mov(scratch1, Operand(kLastNonNaNInt64));
   addi(scratch3, value_reg, Operand(-kHeapObjectTag));
   ld(double_reg, MemOperand(scratch3, HeapNumber::kValueOffset));
@@ -1804,14 +1804,14 @@ void MacroAssembler::StoreNumberToDouble
 #endif
   bge(&maybe_nan);
 
-#if !V8_TARGET_ARCH_PPC64
+#if !defined(V8_TARGET_ARCH_PPC64)
   lwz(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
 #endif
 
   bind(&have_double_value);
   SmiToDoubleArrayOffset(scratch1, key_reg);
   add(scratch1, elements_reg, scratch1);
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   addi(scratch1, scratch1, Operand(-kHeapObjectTag));
   std(double_reg, MemOperand(scratch1, FixedDoubleArray::kHeaderSize));
 #else
@@ -1831,7 +1831,7 @@ void MacroAssembler::StoreNumberToDouble
   // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
   // it's an Infinity, and the non-NaN code path applies.
   bgt(&is_nan);
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   clrldi(r0, double_reg, Operand(32), SetRC);
   beq(&have_double_value, cr0);
 #else
@@ -1843,7 +1843,7 @@ void MacroAssembler::StoreNumberToDouble
   // Load canonical NaN for storing into the double array.
   uint64_t nan_int64 = BitCast<uint64_t>(
       FixedDoubleArray::canonical_not_the_hole_nan_as_double());
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   mov(double_reg, Operand(nan_int64));
 #else
   mov(mantissa_reg, Operand(static_cast<intptr_t>(nan_int64)));
@@ -2036,7 +2036,7 @@ void MacroAssembler::TryGetFunctionProto
     lwz(scratch,
         FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
     TestBit(scratch,
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
             SharedFunctionInfo::kBoundFunction,
 #else
             SharedFunctionInfo::kBoundFunction + kSmiTagSize,
@@ -2122,7 +2122,7 @@ void MacroAssembler::CallApiFunctionAndR
   addi(r29, r29, Operand(1));
   stw(r29, MemOperand(r26, kLevelOffset));
 
-#if !ABI_RETURNS_HANDLES_IN_REGS
+#if !defined(ABI_RETURNS_HANDLES_IN_REGS)
   // PPC LINUX ABI
   // The return value is pointer-sized non-scalar value.
   // Space has already been allocated on the stack which will pass as an
@@ -2136,7 +2136,7 @@ void MacroAssembler::CallApiFunctionAndR
   DirectCEntryStub stub;
   stub.GenerateCall(this, function);
 
-#if !ABI_RETURNS_HANDLES_IN_REGS
+#if !defined(ABI_RETURNS_HANDLES_IN_REGS)
   // Retrieve return value from stack buffer
   LoadP(r3, MemOperand(r3));
 #endif
@@ -2228,7 +2228,7 @@ void MacroAssembler::IndexFromHash(Regis
   STATIC_ASSERT(String::kHashShift == 2);
   STATIC_ASSERT(String::kArrayIndexValueBits == 24);
   // index = SmiTag((hash >> 2) & 0x00FFFFFF);
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   ExtractBitRange(index, hash, 25, 2);
   SmiTag(index);
 #else
@@ -2264,7 +2264,7 @@ void MacroAssembler::ConvertToInt32(Regi
 
   addi(sp, sp, Operand(-kDoubleSize));
   stfd(double_scratch, MemOperand(sp, 0));
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   ld(dest, MemOperand(sp, 0));
 #else
 #if __FLOAT_WORD_ORDER == __LITTLE_ENDIAN
@@ -2279,7 +2279,7 @@ void MacroAssembler::ConvertToInt32(Regi
 
   // The result is not a 32-bit integer when the high 33 bits of the
   // result are not identical.
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   TestIfInt32(dest, scratch, scratch2);
 #else
   TestIfInt32(scratch, dest, scratch2);
@@ -2305,7 +2305,7 @@ void MacroAssembler::EmitVFPTruncate(VFP
 
   addi(sp, sp, Operand(-kDoubleSize));
   stfd(double_scratch, MemOperand(sp, 0));
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   ld(result, MemOperand(sp, 0));
 #else
 #if __FLOAT_WORD_ORDER == __LITTLE_ENDIAN
@@ -2320,7 +2320,7 @@ void MacroAssembler::EmitVFPTruncate(VFP
 
   // The result is a 32-bit integer when the high 33 bits of the
   // result are identical.
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   TestIfInt32(result, scratch, r0);
 #else
   TestIfInt32(scratch, result, r0);
@@ -2439,7 +2439,7 @@ void MacroAssembler::EmitECMATruncate(Re
 
   // reserve a slot on the stack
   stfdu(double_scratch, MemOperand(sp, -8));
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   ld(result, MemOperand(sp, 0));
 #else
 #if __FLOAT_WORD_ORDER == __LITTLE_ENDIAN
@@ -2453,7 +2453,7 @@ void MacroAssembler::EmitECMATruncate(Re
 
   // The result is a 32-bit integer when the high 33 bits of the
   // result are identical.
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   TestIfInt32(result, scratch, r0);
 #else
   TestIfInt32(scratch, result, r0);
@@ -2484,7 +2484,7 @@ void MacroAssembler::EmitECMATruncate(Re
 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
                                          Register src,
                                          int num_least_bits) {
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   rldicl(dst, src, kBitsPerPointer - kSmiShift,
          kBitsPerPointer - num_least_bits);
 #else
@@ -2519,7 +2519,7 @@ void MacroAssembler::CallRuntime(const R
   // smarter.
   mov(r3, Operand(num_arguments));
   mov(r4, Operand(ExternalReference(f, isolate())));
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   CEntryStub stub(f->result_size);
 #else
   CEntryStub stub(1);
@@ -2859,7 +2859,7 @@ void MacroAssembler::JumpIfNotPowerOfTwo
   bne(not_power_of_two, cr0);
 }
 
-#if !V8_TARGET_ARCH_PPC64
+#if !defined(V8_TARGET_ARCH_PPC64)
 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
   ASSERT(!reg.is(overflow));
   mr(overflow, reg);  // Save original value.
@@ -3141,7 +3141,7 @@ void MacroAssembler::CopyBytes(Register
     stb(scratch, MemOperand(dst, 2));
     ShiftRightImm(scratch, scratch, Operand(8));
     stb(scratch, MemOperand(dst, 3));
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
     ShiftRightImm(scratch, scratch, Operand(8));
     stb(scratch, MemOperand(dst, 4));
     ShiftRightImm(scratch, scratch, Operand(8));
@@ -3152,7 +3152,7 @@ void MacroAssembler::CopyBytes(Register
     stb(scratch, MemOperand(dst, 7));
 #endif
 #else
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
     stb(scratch, MemOperand(dst, 7));
     ShiftRightImm(scratch, scratch, Operand(8));
     stb(scratch, MemOperand(dst, 6));
@@ -3356,13 +3356,13 @@ void MacroAssembler::CallCFunctionHelper
   // Just call directly. The function called cannot cause a GC, or
   // allow preemption, so the return address in the link register
   // stays correct.
-#if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR)
+#if defined(ABI_USES_FUNCTION_DESCRIPTORS) && !defined(USE_SIMULATOR)
   // AIX uses a function descriptor. When calling C code be aware
   // of this descriptor and pick up values from it
   Register dest = ip;
   LoadP(ToRegister(2), MemOperand(function, kPointerSize));
   LoadP(dest, MemOperand(function, 0));
-#elif ABI_TOC_ADDRESSABILITY_VIA_IP
+#elif defined(ABI_TOC_ADDRESSABILITY_VIA_IP)
   Register dest = ip;
   Move(ip, function);
 #else
@@ -3427,7 +3427,7 @@ void MacroAssembler::PatchRelocatedValue
   }
 
   // insert new high word into lis instruction
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   srdi(ip, new_value, Operand(32));
   rlwimi(scratch, ip, 16, 16, 31);
 #else
@@ -3446,14 +3446,14 @@ void MacroAssembler::PatchRelocatedValue
   }
 
   // insert new low word into ori instruction
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   rlwimi(scratch, ip, 0, 16, 31);
 #else
   rlwimi(scratch, new_value, 0, 16, 31);
 #endif
   stw(scratch, MemOperand(lis_location, kInstrSize));
 
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   if (emit_debug_code()) {
     lwz(scratch, MemOperand(lis_location, 2*kInstrSize));
     // scratch is now sldi.
@@ -3487,7 +3487,7 @@ void MacroAssembler::PatchRelocatedValue
 #endif
 
   // Update the I-cache so the new lis and addic can be executed.
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   FlushICache(lis_location, 5 * kInstrSize, scratch);
 #else
   FlushICache(lis_location, 2 * kInstrSize, scratch);
@@ -3519,7 +3519,7 @@ void MacroAssembler::GetRelocatedValueLo
   // Copy the low 16bits from ori instruction into result
   rlwimi(result, scratch, 0, 16, 31);
 
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   if (emit_debug_code()) {
     lwz(scratch, MemOperand(lis_location, 2*kInstrSize));
     // scratch is now sldi.
@@ -3698,7 +3698,7 @@ void MacroAssembler::EnsureNotWhite(
   Register map = load_scratch;  // Holds map while checking type.
   Register length = load_scratch;  // Holds length of object after testing type.
   Label is_data_object, maybe_string_object, is_string_object, is_encoded;
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   Label length_computed;
 #endif
 
@@ -3744,11 +3744,11 @@ void MacroAssembler::EnsureNotWhite(
   andi(r0, instance_type, Operand(kStringEncodingMask));
   beq(&is_encoded, cr0);
   SmiUntag(ip);
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   b(&length_computed);
 #endif
   bind(&is_encoded);
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   SmiToShortArrayOffset(ip, ip);
   bind(&length_computed);
 #else
@@ -3932,7 +3932,7 @@ void MacroAssembler::LoadIntLiteral(Regi
 
 void MacroAssembler::LoadSmiLiteral(Register dst, Smi *smi) {
   intptr_t value = reinterpret_cast<intptr_t>(smi);
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   ASSERT((value & 0xffffffff) == 0);
   LoadIntLiteral(dst, value >> 32);
   ShiftLeftImm(dst, dst, Operand(32));
@@ -3949,7 +3949,7 @@ void MacroAssembler::LoadDoubleLiteral(D
   // avoid gcc strict aliasing error using union cast
   union {
     double dval;
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
     intptr_t ival;
 #else
     intptr_t ival[2];
@@ -3957,7 +3957,7 @@ void MacroAssembler::LoadDoubleLiteral(D
   } litVal;
 
   litVal.dval = value;
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   mov(scratch, Operand(litVal.ival));
   std(scratch, MemOperand(sp));
 #else
@@ -4053,7 +4053,7 @@ void MacroAssembler::Xor(Register ra, Re
 
 void MacroAssembler::CmpSmiLiteral(Register src1, Smi *smi, Register scratch,
                                    CRegister cr) {
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   LoadSmiLiteral(scratch, smi);
   cmp(src1, scratch, cr);
 #else
@@ -4063,7 +4063,7 @@ void MacroAssembler::CmpSmiLiteral(Regis
 
 void MacroAssembler::CmplSmiLiteral(Register src1, Smi *smi, Register scratch,
                                    CRegister cr) {
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   LoadSmiLiteral(scratch, smi);
   cmpl(src1, scratch, cr);
 #else
@@ -4073,7 +4073,7 @@ void MacroAssembler::CmplSmiLiteral(Regi
 
 void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi *smi,
                                    Register scratch) {
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   LoadSmiLiteral(scratch, smi);
   add(dst, src, scratch);
 #else
@@ -4083,7 +4083,7 @@ void MacroAssembler::AddSmiLiteral(Regis
 
 void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi *smi,
                                    Register scratch) {
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   LoadSmiLiteral(scratch, smi);
   sub(dst, src, scratch);
 #else
@@ -4093,7 +4093,7 @@ void MacroAssembler::SubSmiLiteral(Regis
 
 void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi *smi,
                                    Register scratch, RCBit rc) {
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   LoadSmiLiteral(scratch, smi);
   and_(dst, src, scratch, rc);
 #else
@@ -4110,13 +4110,13 @@ void MacroAssembler::LoadP(Register dst,
   if (!scratch.is(no_reg) && !is_int16(offset)) {
     /* cannot use d-form */
     LoadIntLiteral(scratch, offset);
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
     ldx(dst, MemOperand(mem.ra(), scratch));
 #else
     lwzx(dst, MemOperand(mem.ra(), scratch));
 #endif
   } else {
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
     int misaligned = (offset & 3);
     if (misaligned) {
       // adjust base to conform to offset alignment requirements
@@ -4141,13 +4141,13 @@ void MacroAssembler::StoreP(Register src
   if (!scratch.is(no_reg) && !is_int16(offset)) {
     /* cannot use d-form */
     LoadIntLiteral(scratch, offset);
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
     stdx(src, MemOperand(mem.ra(), scratch));
 #else
     stwx(src, MemOperand(mem.ra(), scratch));
 #endif
   } else {
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
     int misaligned = (offset & 3);
     if (misaligned) {
       // adjust base to conform to offset alignment requirements
@@ -4176,14 +4176,14 @@ void MacroAssembler::LoadWordArith(Regis
   if (!scratch.is(no_reg) && !is_int16(offset)) {
     /* cannot use d-form */
     LoadIntLiteral(scratch, offset);
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
     // lwax(dst, MemOperand(mem.ra(), scratch));
     ASSERT(0);  // lwax not yet implemented
 #else
     lwzx(dst, MemOperand(mem.ra(), scratch));
 #endif
   } else {
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
     int misaligned = (offset & 3);
     if (misaligned) {
       // adjust base to conform to offset alignment requirements
diff -up v8-3.14.5.10/src/ppc/regexp-macro-assembler-ppc.cc.ppc-harder v8-3.14.5.10/src/ppc/regexp-macro-assembler-ppc.cc
--- v8-3.14.5.10/src/ppc/regexp-macro-assembler-ppc.cc.ppc-harder	2017-03-01 12:47:36.493606707 -0500
+++ v8-3.14.5.10/src/ppc/regexp-macro-assembler-ppc.cc	2017-03-01 12:47:36.521606006 -0500
@@ -139,7 +139,7 @@ RegExpMacroAssemblerPPC::RegExpMacroAsse
   ASSERT_EQ(0, registers_to_save % 2);
 
   // Called from C
-#if ABI_USES_FUNCTION_DESCRIPTORS
+#ifdef ABI_USES_FUNCTION_DESCRIPTORS
   __ function_descriptor();
 #endif
 
@@ -1434,7 +1434,7 @@ void RegExpCEntryStub::Generate(MacroAss
   extra_stack_slots += kNumRequiredStackFrameSlots;
   __ addi(sp, sp, Operand(-extra_stack_slots * kPointerSize));
 
-#if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR)
+#if defined(ABI_USES_FUNCTION_DESCRIPTORS) && !defined(USE_SIMULATOR)
   // Native AIX/PPC64 Linux use a function descriptor.
   __ LoadP(ToRegister(2), MemOperand(r26, kPointerSize));  // TOC
   __ LoadP(ip, MemOperand(r26, 0));  // Instruction address
diff -up v8-3.14.5.10/src/ppc/simulator-ppc.cc.ppc-harder v8-3.14.5.10/src/ppc/simulator-ppc.cc
--- v8-3.14.5.10/src/ppc/simulator-ppc.cc.ppc-harder	2017-03-01 12:47:36.495606657 -0500
+++ v8-3.14.5.10/src/ppc/simulator-ppc.cc	2017-03-01 12:47:36.521606006 -0500
@@ -1332,7 +1332,7 @@ void Simulator::SoftwareInterrupt(Instru
           PrintF("\n");
         }
         CHECK(stack_aligned);
-#if ABI_RETURNS_HANDLES_IN_REGS
+#ifdef ABI_RETURNS_HANDLES_IN_REGS
         intptr_t p0 = arg0;
 #else
         intptr_t p0 = arg1;
@@ -1341,7 +1341,7 @@ void Simulator::SoftwareInterrupt(Instru
         if (::v8::internal::FLAG_trace_sim) {
           PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
         }
-#if ABI_RETURNS_HANDLES_IN_REGS
+#ifdef ABI_RETURNS_HANDLES_IN_REGS
         arg0 = (intptr_t)*result;
 #else
         *(reinterpret_cast<intptr_t*>(arg0)) = (intptr_t) *result;
@@ -1363,21 +1363,21 @@ void Simulator::SoftwareInterrupt(Instru
           PrintF("\n");
         }
         CHECK(stack_aligned);
-#if ABI_RETURNS_HANDLES_IN_REGS
+#ifdef ABI_RETURNS_HANDLES_IN_REGS
         intptr_t p0 = arg0;
         intptr_t p1 = arg1;
 #else
         intptr_t p0 = arg1;
         intptr_t p1 = arg2;
 #endif
-#if !ABI_PASSES_HANDLES_IN_REGS
+#if !defined(ABI_PASSES_HANDLES_IN_REGS)
         p0 = *(reinterpret_cast<intptr_t *>(p0));
 #endif
         v8::Handle<v8::Value> result = target(p0, p1);
         if (::v8::internal::FLAG_trace_sim) {
           PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
         }
-#if ABI_RETURNS_HANDLES_IN_REGS
+#ifdef ABI_RETURNS_HANDLES_IN_REGS
         arg0 = (intptr_t)*result;
 #else
         *(reinterpret_cast<intptr_t*>(arg0)) = (intptr_t) *result;
@@ -1408,7 +1408,7 @@ void Simulator::SoftwareInterrupt(Instru
         }
         CHECK(stack_aligned);
         int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
         if (::v8::internal::FLAG_trace_sim) {
           PrintF("Returned %08" V8PRIxPTR "\n", result);
         }
@@ -1671,7 +1671,7 @@ bool Simulator::DecodeExt2_10bit(Instruc
       }
       break;
     }
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
     case SRDX: {
       int rs = instr->RSValue();
       int ra = instr->RAValue();
@@ -1699,7 +1699,7 @@ bool Simulator::DecodeExt2_10bit(Instruc
       }
       break;
     }
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
     case SRAD: {
       int rs = instr->RSValue();
       int ra = instr->RAValue();
@@ -1726,7 +1726,7 @@ bool Simulator::DecodeExt2_10bit(Instruc
       }
       break;
     }
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
     case EXTSW: {
       const int shift = kBitsPerPointer - 32;
       int ra = instr->RAValue();
@@ -1980,7 +1980,7 @@ void Simulator::DecodeExt2_9bit(Instruct
       }
       break;
     }
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
     case SLDX: {
       int rs = instr->RSValue();
       int ra = instr->RAValue();
@@ -2017,7 +2017,7 @@ void Simulator::DecodeExt2_9bit(Instruct
       }
       break;
     }
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
     case CNTLZDX: {
       int rs = instr->RSValue();
       int ra = instr->RAValue();
@@ -2139,7 +2139,7 @@ void Simulator::DecodeExt2_9bit(Instruct
       // todo - handle OE bit
       break;
     }
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
     case MULLD: {
       int rt = instr->RTValue();
       int ra = instr->RAValue();
@@ -2170,7 +2170,7 @@ void Simulator::DecodeExt2_9bit(Instruct
       // todo - handle OE bit
       break;
     }
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
     case DIVD: {
       int rt = instr->RTValue();
       int ra = instr->RAValue();
@@ -2316,7 +2316,7 @@ void Simulator::DecodeExt2_9bit(Instruct
       }
       break;
     }
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
     case LDX:
     case LDUX: {
       int rt = instr->RTValue();
@@ -2672,7 +2672,7 @@ void Simulator::DecodeExt4(Instruction*
   UNIMPLEMENTED();  // Not used by V8.
 }
 
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
 void Simulator::DecodeExt5(Instruction* instr) {
   switch (instr->Bits(4, 2) << 2) {
     case RLDICL: {
@@ -3195,7 +3195,7 @@ void Simulator::InstructionDecode(Instru
       break;
     }
 
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
     case EXT5: {
       DecodeExt5(instr);
       break;
@@ -3339,7 +3339,7 @@ intptr_t Simulator::Call(byte* entry, in
   set_register(sp, entry_stack);
 
   // Prepare to execute the code at entry
-#if ABI_USES_FUNCTION_DESCRIPTORS
+#ifdef ABI_USES_FUNCTION_DESCRIPTORS
   // entry is the function descriptor
   set_pc(*(reinterpret_cast<intptr_t *>(entry)));
 #else
diff -up v8-3.14.5.10/src/ppc/stub-cache-ppc.cc.ppc-harder v8-3.14.5.10/src/ppc/stub-cache-ppc.cc
--- v8-3.14.5.10/src/ppc/stub-cache-ppc.cc.ppc-harder	2017-03-01 12:47:36.497606607 -0500
+++ v8-3.14.5.10/src/ppc/stub-cache-ppc.cc	2017-03-01 12:47:36.521606006 -0500
@@ -198,7 +198,7 @@ void StubCache::GenerateProbe(MacroAssem
   Isolate* isolate = masm->isolate();
   Label miss;
 
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   // Make sure that code is valid. The multiplying code relies on the
   // entry size being 24.
   ASSERT(sizeof(Entry) == 24);
@@ -239,7 +239,7 @@ void StubCache::GenerateProbe(MacroAssem
   __ lwz(scratch, FieldMemOperand(name, String::kHashFieldOffset));
   __ LoadP(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
   __ add(scratch, scratch, ip);
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   // Use only the low 32 bits of the map pointer.
   __ rldicl(scratch, scratch, 0, 32);
 #endif
@@ -728,7 +728,7 @@ static void GenerateFastApiDirectCall(Ma
   // Prepare arguments.
   __ addi(r5, sp, Operand(3 * kPointerSize));
 
-#if !ABI_RETURNS_HANDLES_IN_REGS
+#if !defined(ABI_RETURNS_HANDLES_IN_REGS)
   bool alloc_return_buf = true;
 #else
   bool alloc_return_buf = false;
@@ -1213,7 +1213,7 @@ void StubCompiler::GenerateLoadCallback(
                                         Handle<AccessorInfo> callback,
                                         Handle<String> name,
                                         Label* miss) {
-#if !ABI_RETURNS_HANDLES_IN_REGS
+#if !defined(ABI_RETURNS_HANDLES_IN_REGS)
   bool alloc_return_buf = true;
 #else
   bool alloc_return_buf = false;
@@ -1263,7 +1263,7 @@ void StubCompiler::GenerateLoadCallback(
   // If alloc_return_buf, we shift the arguments over a register
   // (e.g. r3 -> r4) to allow for the return value buffer in implicit
   // first arg.  CallApiFunctionAndReturn will setup r3.
-#if ABI_PASSES_HANDLES_IN_REGS
+#ifdef ABI_PASSES_HANDLES_IN_REGS
   const int kAccessorInfoSlot = kStackFrameExtraParamSlot +
                                   (alloc_return_buf ? 2 : 1);
 #else
@@ -1281,7 +1281,7 @@ void StubCompiler::GenerateLoadCallback(
   FrameScope frame_scope(masm(), StackFrame::MANUAL);
   __ EnterExitFrame(false, kApiStackSpace);
 
-#if !ABI_PASSES_HANDLES_IN_REGS
+#if !defined(ABI_PASSES_HANDLES_IN_REGS)
   // pass 1st arg by reference
   __ StoreP(arg0, MemOperand(sp, kArg0Slot * kPointerSize));
   __ addi(arg0, sp, Operand(kArg0Slot * kPointerSize));
@@ -2155,7 +2155,7 @@ Handle<Code> CallStubCompiler::CompileMa
     // The frim instruction is only supported on POWER5
     // and higher
     __ frim(d1, d1);
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
     __ fctidz(d1, d1);
 #else
     __ fctiwz(d1, d1);
@@ -2166,7 +2166,7 @@ Handle<Code> CallStubCompiler::CompileMa
     // perf benefit or if we can simply use the compatible sequence
     // always
     __ SetRoundingMode(kRoundToMinusInf);
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
     __ fctid(d1, d1);
 #else
     __ fctiw(d1, d1);
@@ -2175,7 +2175,7 @@ Handle<Code> CallStubCompiler::CompileMa
   }
   // Convert the argument to an integer.
   __ stfdu(d1, MemOperand(sp, -8));
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   __ ld(r3, MemOperand(sp, 0));
 #else
 #if __FLOAT_WORD_ORDER == __LITTLE_ENDIAN
@@ -3623,7 +3623,7 @@ static void GenerateSmiKeyCheck(MacroAss
                      double_scratch1,
                      kCheckForInexactConversion);
   __ bne(fail);
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
   __ SmiTag(key, scratch0);
 #else
   __ SmiTagCheckOverflow(scratch1, scratch0, r0);
@@ -3692,7 +3692,7 @@ void KeyedLoadStubCompiler::GenerateLoad
     case EXTERNAL_INT_ELEMENTS:
       __ SmiToIntArrayOffset(value, key);
       __ lwzx(value, MemOperand(r6, value));
-#if V8_TARGET_ARCH_PPC64
+#ifdef V8_TARGET_ARCH_PPC64
       __ extsw(value, value);
 #endif
       break;
@@ -3731,7 +3731,7 @@ void KeyedLoadStubCompiler::GenerateLoad
     // For the Int and UnsignedInt array types, we need to see whether
     // the value can be represented in a Smi. If not, we need to convert
     // it to a HeapNumber.
-#if !V8_TARGET_ARCH_PPC64
+#if !defined(V8_TARGET_ARCH_PPC64)
     Label box_int;
     // Check that the value fits in a smi.
     __ JumpIfNotSmiCandidate(value, r0, &box_int);
@@ -3740,7 +3740,7 @@ void KeyedLoadStubCompiler::GenerateLoad
     __ SmiTag(r3, value);
     __ Ret();
 
-#if !V8_TARGET_ARCH_PPC64
+#if !defined(V8_TARGET_ARCH_PPC64)
     __ bind(&box_int);
     // Allocate a HeapNumber for the result and perform int-to-double
     // conversion.  Don't touch r3 or r4 as they are needed if allocation
diff -up v8-3.14.5.10/src/SConscript.ppc-harder v8-3.14.5.10/src/SConscript
--- v8-3.14.5.10/src/SConscript.ppc-harder	2012-07-24 03:59:48.000000000 -0400
+++ v8-3.14.5.10/src/SConscript	2017-03-01 14:13:21.244294435 -0500
@@ -204,6 +204,46 @@ SOURCES = {
     ia32/regexp-macro-assembler-ia32.cc
     ia32/stub-cache-ia32.cc
     """),
+  'arch:ppc': Split("""
+    ppc/assembler-ppc.cc
+    ppc/builtins-ppc.cc
+    ppc/code-stubs-ppc.cc
+    ppc/codegen-ppc.cc
+    ppc/constants-ppc.cc
+    ppc/cpu-ppc.cc
+    ppc/debug-ppc.cc
+    ppc/deoptimizer-ppc.cc
+    ppc/disasm-ppc.cc
+    ppc/frames-ppc.cc
+    ppc/full-codegen-ppc.cc
+    ppc/ic-ppc.cc
+    ppc/lithium-codegen-ppc.cc
+    ppc/lithium-gap-resolver-ppc.cc
+    ppc/lithium-ppc.cc
+    ppc/macro-assembler-ppc.cc
+    ppc/regexp-macro-assembler-ppc.cc
+    ppc/stub-cache-ppc.cc
+    """),
+  'arch:ppc64': Split("""
+    ppc/assembler-ppc.cc
+    ppc/builtins-ppc.cc
+    ppc/code-stubs-ppc.cc
+    ppc/codegen-ppc.cc
+    ppc/constants-ppc.cc
+    ppc/cpu-ppc.cc
+    ppc/debug-ppc.cc
+    ppc/deoptimizer-ppc.cc
+    ppc/disasm-ppc.cc
+    ppc/frames-ppc.cc
+    ppc/full-codegen-ppc.cc
+    ppc/ic-ppc.cc
+    ppc/lithium-codegen-ppc.cc
+    ppc/lithium-gap-resolver-ppc.cc
+    ppc/lithium-ppc.cc
+    ppc/macro-assembler-ppc.cc
+    ppc/regexp-macro-assembler-ppc.cc
+    ppc/stub-cache-ppc.cc
+    """),
   'arch:x64': Split("""
     x64/assembler-x64.cc
     x64/builtins-x64.cc
@@ -225,6 +265,8 @@ SOURCES = {
     """),
   'simulator:arm': ['arm/simulator-arm.cc'],
   'simulator:mips': ['mips/simulator-mips.cc'],
+  'simulator:ppc': ['ppc/simulator-ppc.cc'],
+  'simulator:ppc64': ['ppc/simulator-ppc.cc'],
   'os:freebsd': ['platform-freebsd.cc', 'platform-posix.cc'],
   'os:openbsd': ['platform-openbsd.cc', 'platform-posix.cc'],
   'os:linux':   ['platform-linux.cc', 'platform-posix.cc'],