Changeset 38992 in webkit for trunk/JavaScriptCore/jit/JIT.cpp


Ignore:
Timestamp:
Dec 4, 2008, 3:10:21 AM (16 years ago)
Author:
[email protected]
Message:

2008-12-04 Gavin Barraclough <[email protected]>

Reviewed by Oliver Hunt.

Allow JIT to function without property access repatching and arithmetic optimizations.
Controlled by ENABLE_JIT_OPTIMIZE_PROPERTY_ACCESS and ENABLE_JIT_OPTIMIZE_ARITHMETIC switches.

https://bugs.webkit.org/show_bug.cgi?id=22643

  • JavaScriptCore.xcodeproj/project.pbxproj:
  • jit/JIT.cpp: (JSC::JIT::privateCompileMainPass): (JSC::JIT::privateCompileSlowCases):
  • jit/JIT.h:
  • jit/JITArithmetic.cpp: Copied from jit/JIT.cpp. (JSC::JIT::compileBinaryArithOp): (JSC::JIT::compileBinaryArithOpSlowCase):
  • jit/JITPropertyAccess.cpp: Copied from jit/JIT.cpp. (JSC::JIT::compileGetByIdHotPath): (JSC::JIT::compileGetByIdSlowCase): (JSC::JIT::compilePutByIdHotPath): (JSC::JIT::compilePutByIdSlowCase): (JSC::resizePropertyStorage): (JSC::transitionWillNeedStorageRealloc): (JSC::JIT::privateCompilePutByIdTransition): (JSC::JIT::patchGetByIdSelf): (JSC::JIT::patchPutByIdReplace): (JSC::JIT::privateCompilePatchGetArrayLength):
  • wtf/Platform.h:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/JavaScriptCore/jit/JIT.cpp

    r38984 r38992  
    4444
    4545namespace JSC {
    46 
    47 #if PLATFORM(MAC)
    48 
    49 static inline bool isSSE2Present()
    50 {
    51     return true; // All X86 Macs are guaranteed to support at least SSE2
    52 }
    53 
    54 #else
    55 
    56 static bool isSSE2Present()
    57 {
    58     static const int SSE2FeatureBit = 1 << 26;
    59     struct SSE2Check {
    60         SSE2Check()
    61         {
    62             int flags;
    63 #if COMPILER(MSVC)
    64             _asm {
    65                 mov eax, 1 // cpuid function 1 gives us the standard feature set
    66                 cpuid;
    67                 mov flags, edx;
    68             }
    69 #else
    70             flags = 0;
    71             // FIXME: Add GCC code to do above asm
    72 #endif
    73             present = (flags & SSE2FeatureBit) != 0;
    74         }
    75         bool present;
    76     };
    77     static SSE2Check check;
    78     return check.present;
    79 }
    80 
    81 #endif
    8246
    8347COMPILE_ASSERT(CTI_ARGS_code == 0xC, CTI_ARGS_code_is_C);
     
    317281}
    318282
    319 /*
    320   This is required since number representation is canonical - values representable as a JSImmediate should not be stored in a JSNumberCell.
    321  
    322   In the common case, the double value from 'xmmSource' is written to the reusable JSNumberCell pointed to by 'jsNumberCell', then 'jsNumberCell'
    323   is written to the output SF Register 'dst', and then a jump is planted (stored into *wroteJSNumberCell).
    324  
    325   However if the value from xmmSource is representable as a JSImmediate, then the JSImmediate value will be written to the output, and flow
    326   control will fall through from the code planted.
    327 */
    328 void JIT::putDoubleResultToJSNumberCellOrJSImmediate(X86::XMMRegisterID xmmSource, X86::RegisterID jsNumberCell, unsigned dst, JmpSrc* wroteJSNumberCell,  X86::XMMRegisterID tempXmm, X86::RegisterID tempReg1, X86::RegisterID tempReg2)
    329 {
    330     // convert (double -> JSImmediate -> double), and check if the value is unchanged - in which case the value is representable as a JSImmediate.
    331     __ cvttsd2si_rr(xmmSource, tempReg1);
    332     __ addl_rr(tempReg1, tempReg1);
    333     __ sarl_i8r(1, tempReg1);
    334     __ cvtsi2sd_rr(tempReg1, tempXmm);
    335     // Compare & branch if immediate.
    336     __ ucomis_rr(tempXmm, xmmSource);
    337     JmpSrc resultIsImm = __ je();
    338     JmpDst resultLookedLikeImmButActuallyIsnt = __ label();
    339    
    340     // Store the result to the JSNumberCell and jump.
    341     __ movsd_rm(xmmSource, FIELD_OFFSET(JSNumberCell, m_value), jsNumberCell);
    342     if (jsNumberCell != X86::eax)
    343         __ movl_rr(jsNumberCell, X86::eax);
    344     emitPutVirtualRegister(dst);
    345     *wroteJSNumberCell = __ jmp();
    346 
    347     __ link(resultIsImm, __ label());
    348     // value == (double)(JSImmediate)value... or at least, it looks that way...
    349     // ucomi will report that (0 == -0), and will report true if either input in NaN (result is unordered).
    350     __ link(__ jp(), resultLookedLikeImmButActuallyIsnt); // Actually was a NaN
    351     __ pextrw_irr(3, xmmSource, tempReg2);
    352     __ cmpl_i32r(0x8000, tempReg2);
    353     __ link(__ je(), resultLookedLikeImmButActuallyIsnt); // Actually was -0
    354     // Yes it really really really is representable as a JSImmediate.
    355     emitFastArithIntToImmNoCheck(tempReg1);
    356     if (tempReg1 != X86::eax)
    357         __ movl_rr(tempReg1, X86::eax);
    358     emitPutVirtualRegister(dst);
    359 }
    360 
    361 void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes types, unsigned i)
    362 {
    363     Structure* numberStructure = m_globalData->numberStructure.get();
    364     JmpSrc wasJSNumberCell1;
    365     JmpSrc wasJSNumberCell1b;
    366     JmpSrc wasJSNumberCell2;
    367     JmpSrc wasJSNumberCell2b;
    368 
    369     emitGetVirtualRegisters(src1, X86::eax, src2, X86::edx, i);
    370 
    371     if (types.second().isReusable() && isSSE2Present()) {
    372         ASSERT(types.second().mightBeNumber());
    373 
    374         // Check op2 is a number
    375         __ testl_i32r(JSImmediate::TagBitTypeInteger, X86::edx);
    376         JmpSrc op2imm = __ jne();
    377         if (!types.second().definitelyIsNumber()) {
    378             emitJumpSlowCaseIfNotJSCell(X86::edx, i, src2);
    379             __ cmpl_i32m(reinterpret_cast<unsigned>(numberStructure), FIELD_OFFSET(JSCell, m_structure), X86::edx);
    380             m_slowCases.append(SlowCaseEntry(__ jne(), i));
    381         }
    382 
    383         // (1) In this case src2 is a reusable number cell.
    384         //     Slow case if src1 is not a number type.
    385         __ testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
    386         JmpSrc op1imm = __ jne();
    387         if (!types.first().definitelyIsNumber()) {
    388             emitJumpSlowCaseIfNotJSCell(X86::eax, i, src1);
    389             __ cmpl_i32m(reinterpret_cast<unsigned>(numberStructure), FIELD_OFFSET(JSCell, m_structure), X86::eax);
    390             m_slowCases.append(SlowCaseEntry(__ jne(), i));
    391         }
    392 
    393         // (1a) if we get here, src1 is also a number cell
    394         __ movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::eax, X86::xmm0);
    395         JmpSrc loadedDouble = __ jmp();
    396         // (1b) if we get here, src1 is an immediate
    397         __ link(op1imm, __ label());
    398         emitFastArithImmToInt(X86::eax);
    399         __ cvtsi2sd_rr(X86::eax, X86::xmm0);
    400         // (1c)
    401         __ link(loadedDouble, __ label());
    402         if (opcodeID == op_add)
    403             __ addsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
    404         else if (opcodeID == op_sub)
    405             __ subsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
    406         else {
    407             ASSERT(opcodeID == op_mul);
    408             __ mulsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
    409         }
    410 
    411         putDoubleResultToJSNumberCellOrJSImmediate(X86::xmm0, X86::edx, dst, &wasJSNumberCell2, X86::xmm1, X86::ecx, X86::eax);
    412         wasJSNumberCell2b = __ jmp();
    413 
    414         // (2) This handles cases where src2 is an immediate number.
    415         //     Two slow cases - either src1 isn't an immediate, or the subtract overflows.
    416         __ link(op2imm, __ label());
    417         emitJumpSlowCaseIfNotImmNum(X86::eax, i);
    418     } else if (types.first().isReusable() && isSSE2Present()) {
    419         ASSERT(types.first().mightBeNumber());
    420 
    421         // Check op1 is a number
    422         __ testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
    423         JmpSrc op1imm = __ jne();
    424         if (!types.first().definitelyIsNumber()) {
    425             emitJumpSlowCaseIfNotJSCell(X86::eax, i, src1);
    426             __ cmpl_i32m(reinterpret_cast<unsigned>(numberStructure), FIELD_OFFSET(JSCell, m_structure), X86::eax);
    427             m_slowCases.append(SlowCaseEntry(__ jne(), i));
    428         }
    429 
    430         // (1) In this case src1 is a reusable number cell.
    431         //     Slow case if src2 is not a number type.
    432         __ testl_i32r(JSImmediate::TagBitTypeInteger, X86::edx);
    433         JmpSrc op2imm = __ jne();
    434         if (!types.second().definitelyIsNumber()) {
    435             emitJumpSlowCaseIfNotJSCell(X86::edx, i, src2);
    436             __ cmpl_i32m(reinterpret_cast<unsigned>(numberStructure), FIELD_OFFSET(JSCell, m_structure), X86::edx);
    437             m_slowCases.append(SlowCaseEntry(__ jne(), i));
    438         }
    439 
    440         // (1a) if we get here, src2 is also a number cell
    441         __ movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm1);
    442         JmpSrc loadedDouble = __ jmp();
    443         // (1b) if we get here, src2 is an immediate
    444         __ link(op2imm, __ label());
    445         emitFastArithImmToInt(X86::edx);
    446         __ cvtsi2sd_rr(X86::edx, X86::xmm1);
    447         // (1c)
    448         __ link(loadedDouble, __ label());
    449         __ movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::eax, X86::xmm0);
    450         if (opcodeID == op_add)
    451             __ addsd_rr(X86::xmm1, X86::xmm0);
    452         else if (opcodeID == op_sub)
    453             __ subsd_rr(X86::xmm1, X86::xmm0);
    454         else {
    455             ASSERT(opcodeID == op_mul);
    456             __ mulsd_rr(X86::xmm1, X86::xmm0);
    457         }
    458         __ movsd_rm(X86::xmm0, FIELD_OFFSET(JSNumberCell, m_value), X86::eax);
    459         emitPutVirtualRegister(dst);
    460 
    461         putDoubleResultToJSNumberCellOrJSImmediate(X86::xmm0, X86::eax, dst, &wasJSNumberCell1, X86::xmm1, X86::ecx, X86::edx);
    462         wasJSNumberCell1b = __ jmp();
    463 
    464         // (2) This handles cases where src1 is an immediate number.
    465         //     Two slow cases - either src2 isn't an immediate, or the subtract overflows.
    466         __ link(op1imm, __ label());
    467         emitJumpSlowCaseIfNotImmNum(X86::edx, i);
    468     } else
    469         emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
    470 
    471     if (opcodeID == op_add) {
    472         emitFastArithDeTagImmediate(X86::eax);
    473         __ addl_rr(X86::edx, X86::eax);
    474         m_slowCases.append(SlowCaseEntry(__ jo(), i));
    475     } else  if (opcodeID == op_sub) {
    476         __ subl_rr(X86::edx, X86::eax);
    477         m_slowCases.append(SlowCaseEntry(__ jo(), i));
    478         emitFastArithReTagImmediate(X86::eax);
    479     } else {
    480         ASSERT(opcodeID == op_mul);
    481         // convert eax & edx from JSImmediates to ints, and check if either are zero
    482         emitFastArithImmToInt(X86::edx);
    483         JmpSrc op1Zero = emitFastArithDeTagImmediateJumpIfZero(X86::eax);
    484         __ testl_rr(X86::edx, X86::edx);
    485         JmpSrc op2NonZero = __ jne();
    486         __ link(op1Zero, __ label());
    487         // if either input is zero, add the two together, and check if the result is < 0.
    488         // If it is, we have a problem (N < 0), (N * 0) == -0, not representatble as a JSImmediate.
    489         __ movl_rr(X86::eax, X86::ecx);
    490         __ addl_rr(X86::edx, X86::ecx);
    491         m_slowCases.append(SlowCaseEntry(__ js(), i));
    492         // Skip the above check if neither input is zero
    493         __ link(op2NonZero, __ label());
    494         __ imull_rr(X86::edx, X86::eax);
    495         m_slowCases.append(SlowCaseEntry(__ jo(), i));
    496         emitFastArithReTagImmediate(X86::eax);
    497     }
    498     emitPutVirtualRegister(dst);
    499 
    500     if (types.second().isReusable() && isSSE2Present()) {
    501         __ link(wasJSNumberCell2, __ label());
    502         __ link(wasJSNumberCell2b, __ label());
    503     }
    504     else if (types.first().isReusable() && isSSE2Present()) {
    505         __ link(wasJSNumberCell1, __ label());
    506         __ link(wasJSNumberCell1b, __ label());
    507     }
    508 }
    509 
    510 void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned dst, unsigned src1, unsigned src2, OperandTypes types, unsigned i)
    511 {
    512     JmpDst here = __ label();
    513     __ link(iter->from, here);
    514     if (types.second().isReusable() && isSSE2Present()) {
    515         if (!types.first().definitelyIsNumber()) {
    516             if (linkSlowCaseIfNotJSCell(++iter, src1))
    517                 ++iter;
    518             __ link(iter->from, here);
    519         }
    520         if (!types.second().definitelyIsNumber()) {
    521             if (linkSlowCaseIfNotJSCell(++iter, src2))
    522                 ++iter;
    523             __ link(iter->from, here);
    524         }
    525         __ link((++iter)->from, here);
    526     } else if (types.first().isReusable() && isSSE2Present()) {
    527         if (!types.first().definitelyIsNumber()) {
    528             if (linkSlowCaseIfNotJSCell(++iter, src1))
    529                 ++iter;
    530             __ link(iter->from, here);
    531         }
    532         if (!types.second().definitelyIsNumber()) {
    533             if (linkSlowCaseIfNotJSCell(++iter, src2))
    534                 ++iter;
    535             __ link(iter->from, here);
    536         }
    537         __ link((++iter)->from, here);
    538     } else
    539         __ link((++iter)->from, here);
    540 
    541     // additional entry point to handle -0 cases.
    542     if (opcodeID == op_mul)
    543         __ link((++iter)->from, here);
    544 
    545     emitPutCTIArgFromVirtualRegister(src1, 0, X86::ecx);
    546     emitPutCTIArgFromVirtualRegister(src2, 4, X86::ecx);
    547     if (opcodeID == op_add)
    548         emitCTICall(i, Interpreter::cti_op_add);
    549     else if (opcodeID == op_sub)
    550         emitCTICall(i, Interpreter::cti_op_sub);
    551     else {
    552         ASSERT(opcodeID == op_mul);
    553         emitCTICall(i, Interpreter::cti_op_mul);
    554     }
    555     emitPutVirtualRegister(dst);
    556 }
    557 
    558283void JIT::privateCompileMainPass()
    559284{
     
    697422        }
    698423        case op_put_by_id: {
    699             // In order to be able to repatch both the Structure, and the object offset, we store one pointer,
    700             // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
    701             // such that the Structure & offset are always at the same distance from this.
    702 
    703             int baseVReg = instruction[i + 1].u.operand;
    704             emitGetVirtualRegisters(baseVReg, X86::eax, instruction[i + 3].u.operand, X86::edx, i);
    705 
    706             ASSERT(m_codeBlock->propertyAccessInstructions[propertyAccessInstructionIndex].bytecodeIndex == i);
    707 
    708             // Jump to a slow case if either the base object is an immediate, or if the Structure does not match.
    709             emitJumpSlowCaseIfNotJSCell(X86::eax, i, baseVReg);
    710 
    711             JmpDst hotPathBegin = __ label();
    712             m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
    713             ++propertyAccessInstructionIndex;
    714 
    715             // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
    716             __ cmpl_i32m(repatchGetByIdDefaultStructure, FIELD_OFFSET(JSCell, m_structure), X86::eax);
    717             ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, __ label()) == repatchOffsetPutByIdStructure);
    718             m_slowCases.append(SlowCaseEntry(__ jne(), i));
    719 
    720             // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
    721             __ movl_mr(FIELD_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
    722             __ movl_rm(X86::edx, repatchGetByIdDefaultOffset, X86::eax);
    723             ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, __ label()) == repatchOffsetPutByIdPropertyMapOffset);
    724 
     424            compilePutByIdHotPath(instruction[i + 1].u.operand, &(m_codeBlock->identifiers[instruction[i + 2].u.operand]), instruction[i + 3].u.operand, i, propertyAccessInstructionIndex++);
    725425            i += 8;
    726426            break;
    727427        }
    728428        case op_get_by_id: {
    729             // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be repatched.
    730             // Additionally, for get_by_id we need repatch the offset of the branch to the slow case (we repatch this to jump
    731             // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
    732             // to jump back to if one of these trampolies finds a match.
    733 
    734             int baseVReg = instruction[i + 2].u.operand;
    735             emitGetVirtualRegister(baseVReg, X86::eax, i);
    736 
    737             ASSERT(m_codeBlock->propertyAccessInstructions[propertyAccessInstructionIndex].bytecodeIndex == i);
    738 
    739             emitJumpSlowCaseIfNotJSCell(X86::eax, i, baseVReg);
    740 
    741             JmpDst hotPathBegin = __ label();
    742             m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
    743             ++propertyAccessInstructionIndex;
    744 
    745             __ cmpl_i32m(repatchGetByIdDefaultStructure, FIELD_OFFSET(JSCell, m_structure), X86::eax);
    746             ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, __ label()) == repatchOffsetGetByIdStructure);
    747             m_slowCases.append(SlowCaseEntry(__ jne(), i));
    748             ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, __ label()) == repatchOffsetGetByIdBranchToSlowCase);
    749 
    750             __ movl_mr(FIELD_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
    751             __ movl_mr(repatchGetByIdDefaultOffset, X86::eax, X86::eax);
    752             ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, __ label()) == repatchOffsetGetByIdPropertyMapOffset);
    753             emitPutVirtualRegister(instruction[i + 1].u.operand);
    754 
     429            compileGetByIdHotPath(instruction[i + 1].u.operand, instruction[i + 2].u.operand, &(m_codeBlock->identifiers[instruction[i + 3].u.operand]), i, propertyAccessInstructionIndex++);
    755430            i += 8;
    756431            break;
     
    19831658        }
    19841659        case op_put_by_id: {
    1985             if (linkSlowCaseIfNotJSCell(iter, instruction[i + 1].u.operand))
    1986                 ++iter;
    1987             __ link(iter->from, __ label());
    1988 
    1989             Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
    1990             emitPutCTIArgConstant(reinterpret_cast<unsigned>(ident), 4);
    1991             emitPutCTIArg(X86::eax, 0);
    1992             emitPutCTIArg(X86::edx, 8);
    1993             JmpSrc call = emitCTICall(i, Interpreter::cti_op_put_by_id);
    1994 
    1995             // Track the location of the call; this will be used to recover repatch information.
    1996             ASSERT(m_codeBlock->propertyAccessInstructions[propertyAccessInstructionIndex].bytecodeIndex == i);
    1997             m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call;
    1998             ++propertyAccessInstructionIndex;
    1999 
     1660            compilePutByIdSlowCase(instruction[i + 1].u.operand, &(m_codeBlock->identifiers[instruction[i + 2].u.operand]), instruction[i + 3].u.operand, i, iter, propertyAccessInstructionIndex++);
    20001661            i += 8;
    20011662            break;
    20021663        }
    20031664        case op_get_by_id: {
    2004             // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
    2005             // so that we only need track one pointer into the slow case code - we track a pointer to the location
    2006             // of the call (which we can use to look up the repatch information), but should a array-length or
    2007             // prototype access trampoline fail we want to bail out back to here.  To do so we can subtract back
    2008             // the distance from the call to the head of the slow case.
    2009 
    2010             if (linkSlowCaseIfNotJSCell(iter, instruction[i + 2].u.operand))
    2011                 ++iter;
    2012             __ link(iter->from, __ label());
    2013 
    2014 #ifndef NDEBUG
    2015             JmpDst coldPathBegin = __ label();
    2016 #endif       
    2017             emitPutCTIArg(X86::eax, 0);
    2018             Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
    2019             emitPutCTIArgConstant(reinterpret_cast<unsigned>(ident), 4);
    2020             JmpSrc call = emitCTICall(i, Interpreter::cti_op_get_by_id);
    2021             ASSERT(X86Assembler::getDifferenceBetweenLabels(coldPathBegin, call) == repatchOffsetGetByIdSlowCaseCall);
    2022             emitPutVirtualRegister(instruction[i + 1].u.operand);
    2023 
    2024             // Track the location of the call; this will be used to recover repatch information.
    2025             ASSERT(m_codeBlock->propertyAccessInstructions[propertyAccessInstructionIndex].bytecodeIndex == i);
    2026             m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call;
    2027             ++propertyAccessInstructionIndex;
    2028 
     1665            compileGetByIdSlowCase(instruction[i + 1].u.operand, instruction[i + 2].u.operand, &(m_codeBlock->identifiers[instruction[i + 3].u.operand]), i, iter, propertyAccessInstructionIndex++);
    20291666            i += 8;
    20301667            break;
     
    24502087}
    24512088
    2452 void JIT::privateCompileGetByIdSelf(Structure* structure, size_t cachedOffset, void* returnAddress)
    2453 {
    2454     // Check eax is an object of the right Structure.
    2455     __ testl_i32r(JSImmediate::TagMask, X86::eax);
    2456     JmpSrc failureCases1 = __ jne();
    2457     JmpSrc failureCases2 = checkStructure(X86::eax, structure);
    2458 
    2459     // Checks out okay! - getDirectOffset
    2460     __ movl_mr(FIELD_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
    2461     __ movl_mr(cachedOffset * sizeof(JSValue*), X86::eax, X86::eax);
    2462     __ ret();
    2463 
    2464     void* code = __ executableCopy();
    2465 
    2466     X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_self_fail));
    2467     X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_self_fail));
    2468    
    2469     m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
    2470    
    2471     ctiRepatchCallByReturnAddress(returnAddress, code);
    2472 }
    2473 
    2474 void JIT::privateCompileGetByIdProto(Structure* structure, Structure* prototypeStructure, size_t cachedOffset, void* returnAddress, CallFrame* callFrame)
    2475 {
    2476 #if USE(CTI_REPATCH_PIC)
    2477     StructureStubInfo& info = m_codeBlock->getStubInfo(returnAddress);
    2478 
    2479     // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
    2480     ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_list));
    2481 
    2482     // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
    2483     // referencing the prototype object - let's speculatively load it's table nice and early!)
    2484     JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
    2485     PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
    2486     __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
    2487 
    2488     // Check eax is an object of the right Structure.
    2489     JmpSrc failureCases1 = checkStructure(X86::eax, structure);
    2490 
    2491     // Check the prototype object's Structure had not changed.
    2492     Structure** prototypeStructureAddress = &(protoObject->m_structure);
    2493     __ cmpl_i32m(reinterpret_cast<uint32_t>(prototypeStructure), prototypeStructureAddress);
    2494     JmpSrc failureCases2 = __ jne();
    2495 
    2496     // Checks out okay! - getDirectOffset
    2497     __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
    2498 
    2499     JmpSrc success = __ jmp();
    2500 
    2501     void* code = __ executableCopy();
    2502 
    2503     // Use the repatch information to link the failure cases back to the original slow case routine.
    2504     void* slowCaseBegin = reinterpret_cast<char*>(info.callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
    2505     X86Assembler::link(code, failureCases1, slowCaseBegin);
    2506     X86Assembler::link(code, failureCases2, slowCaseBegin);
    2507 
    2508     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
    2509     intptr_t successDest = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
    2510     X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
    2511 
    2512     // Track the stub we have created so that it will be deleted later.
    2513     info.stubRoutine = code;
    2514 
    2515     // Finally repatch the jump to slow case back in the hot path to jump here instead.
    2516     intptr_t jmpLocation = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
    2517     X86Assembler::repatchBranchOffset(jmpLocation, code);
    2518 #else
    2519     // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
    2520     // referencing the prototype object - let's speculatively load it's table nice and early!)
    2521     JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
    2522     PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
    2523     __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
    2524 
    2525     // Check eax is an object of the right Structure.
    2526     __ testl_i32r(JSImmediate::TagMask, X86::eax);
    2527     JmpSrc failureCases1 = __ jne();
    2528     JmpSrc failureCases2 = checkStructure(X86::eax, structure);
    2529 
    2530     // Check the prototype object's Structure had not changed.
    2531     Structure** prototypeStructureAddress = &(protoObject->m_structure);
    2532     __ cmpl_i32m(reinterpret_cast<uint32_t>(prototypeStructure), prototypeStructureAddress);
    2533     JmpSrc failureCases3 = __ jne();
    2534 
    2535     // Checks out okay! - getDirectOffset
    2536     __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
    2537 
    2538     __ ret();
    2539 
    2540     void* code = __ executableCopy();
    2541 
    2542     X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail));
    2543     X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail));
    2544     X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail));
    2545 
    2546     m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
    2547 
    2548     ctiRepatchCallByReturnAddress(returnAddress, code);
    2549 #endif
    2550 }
    2551 
    2552 #if USE(CTI_REPATCH_PIC)
    2553 void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, size_t cachedOffset)
    2554 {
    2555     JmpSrc failureCase = checkStructure(X86::eax, structure);
    2556     __ movl_mr(FIELD_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
    2557     __ movl_mr(cachedOffset * sizeof(JSValue*), X86::eax, X86::eax);
    2558     JmpSrc success = __ jmp();
    2559 
    2560     void* code = __ executableCopy();
    2561     ASSERT(code);
    2562 
    2563     // Use the repatch information to link the failure cases back to the original slow case routine.
    2564     void* lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine;
    2565     if (!lastProtoBegin)
    2566         lastProtoBegin = reinterpret_cast<char*>(stubInfo->callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
    2567 
    2568     X86Assembler::link(code, failureCase, lastProtoBegin);
    2569 
    2570     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
    2571     intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
    2572     X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
    2573 
    2574     structure->ref();
    2575     polymorphicStructures->list[currentIndex].set(cachedOffset, code, structure);
    2576 
    2577     // Finally repatch the jump to slow case back in the hot path to jump here instead.
    2578     intptr_t jmpLocation = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
    2579     X86Assembler::repatchBranchOffset(jmpLocation, code);
    2580 }
    2581 
    2582 void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame)
    2583 {
    2584     // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
    2585     // referencing the prototype object - let's speculatively load it's table nice and early!)
    2586     JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
    2587     PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
    2588     __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
    2589 
    2590     // Check eax is an object of the right Structure.
    2591     JmpSrc failureCases1 = checkStructure(X86::eax, structure);
    2592 
    2593     // Check the prototype object's Structure had not changed.
    2594     Structure** prototypeStructureAddress = &(protoObject->m_structure);
    2595     __ cmpl_i32m(reinterpret_cast<uint32_t>(prototypeStructure), prototypeStructureAddress);
    2596     JmpSrc failureCases2 = __ jne();
    2597 
    2598     // Checks out okay! - getDirectOffset
    2599     __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
    2600 
    2601     JmpSrc success = __ jmp();
    2602 
    2603     void* code = __ executableCopy();
    2604 
    2605     // Use the repatch information to link the failure cases back to the original slow case routine.
    2606     void* lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
    2607     X86Assembler::link(code, failureCases1, lastProtoBegin);
    2608     X86Assembler::link(code, failureCases2, lastProtoBegin);
    2609 
    2610     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
    2611     intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
    2612     X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
    2613 
    2614     structure->ref();
    2615     prototypeStructure->ref();
    2616     prototypeStructures->list[currentIndex].set(cachedOffset, code, structure, prototypeStructure);
    2617 
    2618     // Finally repatch the jump to slow case back in the hot path to jump here instead.
    2619     intptr_t jmpLocation = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
    2620     X86Assembler::repatchBranchOffset(jmpLocation, code);
    2621 }
    2622 
    2623 void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame)
    2624 {
    2625     ASSERT(count);
    2626    
    2627     Vector<JmpSrc> bucketsOfFail;
    2628 
    2629     // Check eax is an object of the right Structure.
    2630     bucketsOfFail.append(checkStructure(X86::eax, structure));
    2631 
    2632     Structure* currStructure = structure;
    2633     RefPtr<Structure>* chainEntries = chain->head();
    2634     JSObject* protoObject = 0;
    2635     for (unsigned i = 0; i < count; ++i) {
    2636         protoObject = asObject(currStructure->prototypeForLookup(callFrame));
    2637         currStructure = chainEntries[i].get();
    2638 
    2639         // Check the prototype object's Structure had not changed.
    2640         Structure** prototypeStructureAddress = &(protoObject->m_structure);
    2641         __ cmpl_i32m(reinterpret_cast<uint32_t>(currStructure), prototypeStructureAddress);
    2642         bucketsOfFail.append(__ jne());
    2643     }
    2644     ASSERT(protoObject);
    2645 
    2646     PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
    2647     __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
    2648     __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
    2649     JmpSrc success = __ jmp();
    2650 
    2651     void* code = __ executableCopy();
    2652 
    2653     // Use the repatch information to link the failure cases back to the original slow case routine.
    2654     void* lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
    2655 
    2656     for (unsigned i = 0; i < bucketsOfFail.size(); ++i)
    2657         X86Assembler::link(code, bucketsOfFail[i], lastProtoBegin);
    2658 
    2659     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
    2660     intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
    2661     X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
    2662 
    2663     // Track the stub we have created so that it will be deleted later.
    2664     structure->ref();
    2665     chain->ref();
    2666     prototypeStructures->list[currentIndex].set(cachedOffset, code, structure, chain);
    2667 
    2668     // Finally repatch the jump to slow case back in the hot path to jump here instead.
    2669     intptr_t jmpLocation = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
    2670     X86Assembler::repatchBranchOffset(jmpLocation, code);
    2671 }
    2672 #endif
    2673 
    2674 void JIT::privateCompileGetByIdChain(Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, void* returnAddress, CallFrame* callFrame)
    2675 {
    2676 #if USE(CTI_REPATCH_PIC)
    2677     StructureStubInfo& info = m_codeBlock->getStubInfo(returnAddress);
    2678 
    2679     // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
    2680     ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_list));
    2681 
    2682     ASSERT(count);
    2683    
    2684     Vector<JmpSrc> bucketsOfFail;
    2685 
    2686     // Check eax is an object of the right Structure.
    2687     bucketsOfFail.append(checkStructure(X86::eax, structure));
    2688 
    2689     Structure* currStructure = structure;
    2690     RefPtr<Structure>* chainEntries = chain->head();
    2691     JSObject* protoObject = 0;
    2692     for (unsigned i = 0; i < count; ++i) {
    2693         protoObject = asObject(currStructure->prototypeForLookup(callFrame));
    2694         currStructure = chainEntries[i].get();
    2695 
    2696         // Check the prototype object's Structure had not changed.
    2697         Structure** prototypeStructureAddress = &(protoObject->m_structure);
    2698         __ cmpl_i32m(reinterpret_cast<uint32_t>(currStructure), prototypeStructureAddress);
    2699         bucketsOfFail.append(__ jne());
    2700     }
    2701     ASSERT(protoObject);
    2702 
    2703     PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
    2704     __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
    2705     __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
    2706     JmpSrc success = __ jmp();
    2707 
    2708     void* code = __ executableCopy();
    2709 
    2710     // Use the repatch information to link the failure cases back to the original slow case routine.
    2711     void* slowCaseBegin = reinterpret_cast<char*>(info.callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
    2712 
    2713     for (unsigned i = 0; i < bucketsOfFail.size(); ++i)
    2714         X86Assembler::link(code, bucketsOfFail[i], slowCaseBegin);
    2715 
    2716     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
    2717     intptr_t successDest = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
    2718     X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
    2719 
    2720     // Track the stub we have created so that it will be deleted later.
    2721     info.stubRoutine = code;
    2722 
    2723     // Finally repatch the jump to slow case back in the hot path to jump here instead.
    2724     intptr_t jmpLocation = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
    2725     X86Assembler::repatchBranchOffset(jmpLocation, code);
    2726 #else
    2727     ASSERT(count);
    2728    
    2729     Vector<JmpSrc> bucketsOfFail;
    2730 
    2731     // Check eax is an object of the right Structure.
    2732     __ testl_i32r(JSImmediate::TagMask, X86::eax);
    2733     bucketsOfFail.append(__ jne());
    2734     bucketsOfFail.append(checkStructure(X86::eax, structure));
    2735 
    2736     Structure* currStructure = structure;
    2737     RefPtr<Structure>* chainEntries = chain->head();
    2738     JSObject* protoObject = 0;
    2739     for (unsigned i = 0; i < count; ++i) {
    2740         protoObject = asObject(currStructure->prototypeForLookup(callFrame));
    2741         currStructure = chainEntries[i].get();
    2742 
    2743         // Check the prototype object's Structure had not changed.
    2744         Structure** prototypeStructureAddress = &(protoObject->m_structure);
    2745         __ cmpl_i32m(reinterpret_cast<uint32_t>(currStructure), prototypeStructureAddress);
    2746         bucketsOfFail.append(__ jne());
    2747     }
    2748     ASSERT(protoObject);
    2749 
    2750     PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
    2751     __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
    2752     __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
    2753     __ ret();
    2754 
    2755     void* code = __ executableCopy();
    2756 
    2757     for (unsigned i = 0; i < bucketsOfFail.size(); ++i)
    2758         X86Assembler::link(code, bucketsOfFail[i], reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail));
    2759 
    2760     m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
    2761 
    2762     ctiRepatchCallByReturnAddress(returnAddress, code);
    2763 #endif
    2764 }
    2765 
    2766 void JIT::privateCompilePutByIdReplace(Structure* structure, size_t cachedOffset, void* returnAddress)
    2767 {
    2768     // Check eax is an object of the right Structure.
    2769     __ testl_i32r(JSImmediate::TagMask, X86::eax);
    2770     JmpSrc failureCases1 = __ jne();
    2771     JmpSrc failureCases2 = checkStructure(X86::eax, structure);
    2772 
    2773     // checks out okay! - putDirectOffset
    2774     __ movl_mr(FIELD_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
    2775     __ movl_rm(X86::edx, cachedOffset * sizeof(JSValue*), X86::eax);
    2776     __ ret();
    2777 
    2778     void* code = __ executableCopy();
    2779    
    2780     X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_fail));
    2781     X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_fail));
    2782 
    2783     m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
    2784    
    2785     ctiRepatchCallByReturnAddress(returnAddress, code);
    2786 }
    2787 
    2788 extern "C" {
    2789 
    2790     static JSObject* resizePropertyStorage(JSObject* baseObject, size_t oldSize, size_t newSize)
    2791     {
    2792         baseObject->allocatePropertyStorageInline(oldSize, newSize);
    2793         return baseObject;
    2794     }
    2795 
    2796 }
    2797 
    2798 static inline bool transitionWillNeedStorageRealloc(Structure* oldStructure, Structure* newStructure)
    2799 {
    2800     return oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity();
    2801 }
    2802 
    2803 void JIT::privateCompilePutByIdTransition(Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, void* returnAddress)
    2804 {
    2805     Vector<JmpSrc, 16> failureCases;
    2806     // Check eax is an object of the right Structure.
    2807     __ testl_i32r(JSImmediate::TagMask, X86::eax);
    2808     failureCases.append(__ jne());
    2809     __ cmpl_i32m(reinterpret_cast<uint32_t>(oldStructure), FIELD_OFFSET(JSCell, m_structure), X86::eax);
    2810     failureCases.append(__ jne());
    2811     Vector<JmpSrc> successCases;
    2812 
    2813     //  ecx = baseObject
    2814     __ movl_mr(FIELD_OFFSET(JSCell, m_structure), X86::eax, X86::ecx);
    2815     // proto(ecx) = baseObject->structure()->prototype()
    2816     __ cmpl_i32m(ObjectType, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type), X86::ecx);
    2817     failureCases.append(__ jne());
    2818     __ movl_mr(FIELD_OFFSET(Structure, m_prototype), X86::ecx, X86::ecx);
    2819    
    2820     // ecx = baseObject->m_structure
    2821     for (RefPtr<Structure>* it = chain->head(); *it; ++it) {
    2822         // null check the prototype
    2823         __ cmpl_i32r(asInteger(jsNull()), X86::ecx);
    2824         successCases.append(__ je());
    2825 
    2826         // Check the structure id
    2827         __ cmpl_i32m(reinterpret_cast<uint32_t>(it->get()), FIELD_OFFSET(JSCell, m_structure), X86::ecx);
    2828         failureCases.append(__ jne());
    2829        
    2830         __ movl_mr(FIELD_OFFSET(JSCell, m_structure), X86::ecx, X86::ecx);
    2831         __ cmpl_i32m(ObjectType, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type), X86::ecx);
    2832         failureCases.append(__ jne());
    2833         __ movl_mr(FIELD_OFFSET(Structure, m_prototype), X86::ecx, X86::ecx);
    2834     }
    2835 
    2836     failureCases.append(__ jne());
    2837     for (unsigned i = 0; i < successCases.size(); ++i)
    2838         __ link(successCases[i], __ label());
    2839 
    2840     JmpSrc callTarget;
    2841 
    2842     // emit a call only if storage realloc is needed
    2843     if (transitionWillNeedStorageRealloc(oldStructure, newStructure)) {
    2844         __ pushl_r(X86::edx);
    2845         __ pushl_i32(newStructure->propertyStorageCapacity());
    2846         __ pushl_i32(oldStructure->propertyStorageCapacity());
    2847         __ pushl_r(X86::eax);
    2848         callTarget = __ call();
    2849         __ addl_i32r(3 * sizeof(void*), X86::esp);
    2850         __ popl_r(X86::edx);
    2851     }
    2852 
    2853     // Assumes m_refCount can be decremented easily, refcount decrement is safe as
    2854     // codeblock should ensure oldStructure->m_refCount > 0
    2855     __ subl_i8m(1, reinterpret_cast<void*>(oldStructure));
    2856     __ addl_i8m(1, reinterpret_cast<void*>(newStructure));
    2857     __ movl_i32m(reinterpret_cast<uint32_t>(newStructure), FIELD_OFFSET(JSCell, m_structure), X86::eax);
    2858 
    2859     // write the value
    2860     __ movl_mr(FIELD_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
    2861     __ movl_rm(X86::edx, cachedOffset * sizeof(JSValue*), X86::eax);
    2862 
    2863     __ ret();
    2864    
    2865     JmpSrc failureJump;
    2866     if (failureCases.size()) {
    2867         for (unsigned i = 0; i < failureCases.size(); ++i)
    2868             __ link(failureCases[i], __ label());
    2869         __ restoreArgumentReferenceForTrampoline();
    2870         failureJump = __ jmp();
    2871     }
    2872 
    2873     void* code = __ executableCopy();
    2874 
    2875     if (failureCases.size())
    2876         X86Assembler::link(code, failureJump, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_fail));
    2877 
    2878     if (transitionWillNeedStorageRealloc(oldStructure, newStructure))
    2879         X86Assembler::link(code, callTarget, reinterpret_cast<void*>(resizePropertyStorage));
    2880    
    2881     m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
    2882    
    2883     ctiRepatchCallByReturnAddress(returnAddress, code);
    2884 }
    2885 
    28862089void JIT::privateCompileCTIMachineTrampolines()
    28872090{
     
    30832286}
    30842287
    3085 void JIT::patchGetByIdSelf(CodeBlock* codeBlock, Structure* structure, size_t cachedOffset, void* returnAddress)
    3086 {
    3087     StructureStubInfo& info = codeBlock->getStubInfo(returnAddress);
    3088 
    3089     // We don't want to repatch more than once - in future go to cti_op_get_by_id_generic.
    3090     // Should probably go to Interpreter::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
    3091     ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_self_fail));
    3092 
    3093     // Repatch the offset into the propoerty map to load from, then repatch the Structure to look for.
    3094     X86Assembler::repatchDisplacement(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset, cachedOffset * sizeof(JSValue*));
    3095     X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdStructure, reinterpret_cast<uint32_t>(structure));
    3096 }
    3097 
    3098 void JIT::patchPutByIdReplace(CodeBlock* codeBlock, Structure* structure, size_t cachedOffset, void* returnAddress)
    3099 {
    3100     StructureStubInfo& info = codeBlock->getStubInfo(returnAddress);
    3101    
    3102     // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
    3103     // Should probably go to Interpreter::cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
    3104     ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_generic));
    3105 
    3106     // Repatch the offset into the propoerty map to load from, then repatch the Structure to look for.
    3107     X86Assembler::repatchDisplacement(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetPutByIdPropertyMapOffset, cachedOffset * sizeof(JSValue*));
    3108     X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetPutByIdStructure, reinterpret_cast<uint32_t>(structure));
    3109 }
    3110 
    3111 void JIT::privateCompilePatchGetArrayLength(void* returnAddress)
    3112 {
    3113     StructureStubInfo& info = m_codeBlock->getStubInfo(returnAddress);
    3114 
    3115     // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
    3116     ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_array_fail));
    3117 
    3118     // Check eax is an array
    3119     __ cmpl_i32m(reinterpret_cast<unsigned>(m_interpreter->m_jsArrayVptr), X86::eax);
    3120     JmpSrc failureCases1 = __ jne();
    3121 
    3122     // Checks out okay! - get the length from the storage
    3123     __ movl_mr(FIELD_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
    3124     __ movl_mr(FIELD_OFFSET(ArrayStorage, m_length), X86::ecx, X86::ecx);
    3125 
    3126     __ cmpl_i32r(JSImmediate::maxImmediateInt, X86::ecx);
    3127     JmpSrc failureCases2 = __ ja();
    3128 
    3129     __ addl_rr(X86::ecx, X86::ecx);
    3130     __ addl_i8r(1, X86::ecx);
    3131     __ movl_rr(X86::ecx, X86::eax);
    3132     JmpSrc success = __ jmp();
    3133 
    3134     void* code = __ executableCopy();
    3135 
    3136     // Use the repatch information to link the failure cases back to the original slow case routine.
    3137     void* slowCaseBegin = reinterpret_cast<char*>(info.callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
    3138     X86Assembler::link(code, failureCases1, slowCaseBegin);
    3139     X86Assembler::link(code, failureCases2, slowCaseBegin);
    3140 
    3141     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
    3142     intptr_t successDest = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
    3143     X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
    3144 
    3145     // Track the stub we have created so that it will be deleted later.
    3146     m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
    3147 
    3148     // Finally repatch the jump to sow case back in the hot path to jump here instead.
    3149     intptr_t jmpLocation = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
    3150     X86Assembler::repatchBranchOffset(jmpLocation, code);
    3151 }
    3152 
    31532288void JIT::emitGetVariableObjectRegister(RegisterID variableObject, int index, RegisterID dst)
    31542289{
Note: See TracChangeset for help on using the changeset viewer.