Changeset 36418 in webkit for trunk/JavaScriptCore/VM/CTI.cpp


Ignore:
Timestamp:
Sep 14, 2008, 7:18:13 PM (17 years ago)
Author:
[email protected]
Message:

2008-09-14 Maciej Stachowiak <[email protected]>

Reviewed by Cameron Zwarich.


  • split the "prototype" lookup for hasInstance into opcode stream so it can be cached


~5% speedup on v8 earley-boyer test

  • API/JSCallbackObject.h: Add a parameter for the pre-looked-up prototype.
  • API/JSCallbackObjectFunctions.h: (JSC::::hasInstance): Ditto.
  • API/JSValueRef.cpp: (JSValueIsInstanceOfConstructor): Look up and pass in prototype.
  • JavaScriptCore.exp:
  • VM/CTI.cpp: (JSC::CTI::privateCompileMainPass): Pass along prototype.
  • VM/CodeBlock.cpp: (JSC::CodeBlock::dump): Print third arg.
  • VM/CodeGenerator.cpp: (JSC::CodeGenerator::emitInstanceOf): Implement this, now that there is a third argument.
  • VM/CodeGenerator.h:
  • VM/Machine.cpp: (JSC::Machine::privateExecute): Pass along the prototype. (JSC::Machine::cti_op_instanceof): ditto
  • kjs/JSObject.cpp: (JSC::JSObject::hasInstance): Expect to get a pre-looked-up prototype.
  • kjs/JSObject.h:
  • kjs/nodes.cpp: (JSC::InstanceOfNode::emitCode): Emit a get_by_id of the prototype property and pass that register to instanceof.
  • kjs/nodes.h:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/JavaScriptCore/VM/CTI.cpp

    r36417 r36418  
    243243#endif
    244244
    245 ALWAYS_INLINE void CTI::emitCall(unsigned opcodeIndex, CTIHelper_j helper)
     245ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_j helper)
    246246{
    247247#if ENABLE(SAMPLING_TOOL)
    248248    m_jit.movl_i32m(1, &inCalledCode);
    249249#endif
    250     m_calls.append(CallRecord(m_jit.emitCall(), helper, opcodeIndex));
     250    X86Assembler::JmpSrc call = m_jit.emitCall();
     251    m_calls.append(CallRecord(call, helper, opcodeIndex));
    251252    emitDebugExceptionCheck();
    252253#if ENABLE(SAMPLING_TOOL)
    253254    m_jit.movl_i32m(0, &inCalledCode);
    254255#endif
    255 }
    256 
    257 ALWAYS_INLINE void CTI::emitCall(unsigned opcodeIndex, CTIHelper_p helper)
     256
     257    return call;
     258}
     259
     260ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_p helper)
    258261{
    259262#if ENABLE(SAMPLING_TOOL)
    260263    m_jit.movl_i32m(1, &inCalledCode);
    261264#endif
    262     m_calls.append(CallRecord(m_jit.emitCall(), helper, opcodeIndex));
     265    X86Assembler::JmpSrc call = m_jit.emitCall();
     266    m_calls.append(CallRecord(call, helper, opcodeIndex));
    263267    emitDebugExceptionCheck();
    264268#if ENABLE(SAMPLING_TOOL)
    265269    m_jit.movl_i32m(0, &inCalledCode);
    266270#endif
    267 }
    268 
    269 ALWAYS_INLINE void CTI::emitCall(unsigned opcodeIndex, CTIHelper_b helper)
     271
     272    return call;
     273}
     274
     275ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_b helper)
    270276{
    271277#if ENABLE(SAMPLING_TOOL)
    272278    m_jit.movl_i32m(1, &inCalledCode);
    273279#endif
    274     m_calls.append(CallRecord(m_jit.emitCall(), helper, opcodeIndex));
     280    X86Assembler::JmpSrc call = m_jit.emitCall();
     281    m_calls.append(CallRecord(call, helper, opcodeIndex));
    275282    emitDebugExceptionCheck();
    276283#if ENABLE(SAMPLING_TOOL)
    277284    m_jit.movl_i32m(0, &inCalledCode);
    278285#endif
    279 }
    280 
    281 ALWAYS_INLINE void CTI::emitCall(unsigned opcodeIndex, CTIHelper_v helper)
     286
     287    return call;
     288}
     289
     290ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_v helper)
    282291{
    283292#if ENABLE(SAMPLING_TOOL)
    284293    m_jit.movl_i32m(1, &inCalledCode);
    285294#endif
    286     m_calls.append(CallRecord(m_jit.emitCall(), helper, opcodeIndex));
     295    X86Assembler::JmpSrc call = m_jit.emitCall();
     296    m_calls.append(CallRecord(call, helper, opcodeIndex));
    287297    emitDebugExceptionCheck();
    288298#if ENABLE(SAMPLING_TOOL)
    289299    m_jit.movl_i32m(0, &inCalledCode);
    290300#endif
    291 }
    292 
    293 ALWAYS_INLINE void CTI::emitCall(unsigned opcodeIndex, CTIHelper_s helper)
     301
     302    return call;
     303}
     304
     305ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_s helper)
    294306{
    295307#if ENABLE(SAMPLING_TOOL)
    296308    m_jit.movl_i32m(1, &inCalledCode);
    297309#endif
    298     m_calls.append(CallRecord(m_jit.emitCall(), helper, opcodeIndex));
     310    X86Assembler::JmpSrc call = m_jit.emitCall();
     311    m_calls.append(CallRecord(call, helper, opcodeIndex));
    299312    emitDebugExceptionCheck();
    300313#if ENABLE(SAMPLING_TOOL)
    301314    m_jit.movl_i32m(0, &inCalledCode);
    302315#endif
     316
     317    return call;
     318}
     319
     320ALWAYS_INLINE void CTI::emitJumpSlowCaseIfNotJSCell(X86Assembler::RegisterID reg, unsigned opcodeIndex)
     321{
     322    m_jit.testl_i32r(JSImmediate::TagMask, reg);
     323    m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), opcodeIndex));
    303324}
    304325
     
    362383    , m_codeBlock(codeBlock)
    363384    , m_labels(codeBlock ? codeBlock->instructions.size() : 0)
     385    , m_structureStubCompilationInfo(codeBlock ? codeBlock->structureIDInstructions.size() : 0)
    364386{
    365387}
     
    469491    Instruction* instruction = m_codeBlock->instructions.begin();
    470492    unsigned instructionCount = m_codeBlock->instructions.size();
     493
     494    unsigned structureIDInstructionIndex = 0;
    471495
    472496    for (unsigned i = 0; i < instructionCount; ) {
     
    608632        }
    609633        case op_put_by_id: {
    610             Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
    611             emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
     634            // In order to be able to repatch both the StructureID, and the object offset, we store one pointer,
     635            // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
     636            // such that the StructureID & offset are always at the same distance from this.
     637
    612638            emitGetArg(instruction[i + 1].u.operand, X86::eax);
    613639            emitGetArg(instruction[i + 3].u.operand, X86::edx);
    614             emitPutArg(X86::eax, 0); // leave the base in eax
    615             emitPutArg(X86::edx, 8); // leave the base in edx
    616             emitCall(i, Machine::cti_op_put_by_id);
     640
     641            ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
     642            X86Assembler::JmpDst hotPathBegin = m_jit.label();
     643            m_structureStubCompilationInfo[structureIDInstructionIndex].hotPathBegin = hotPathBegin;
     644            ++structureIDInstructionIndex;
     645
     646            // Jump to a slow case if either the base object is an immediate, or if the StructureID does not match.
     647            emitJumpSlowCaseIfNotJSCell(X86::eax, i);
     648            // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
     649            m_jit.cmpl_i32m(repatchGetByIdDefaultStructureID, OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
     650            ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetPutByIdStructureID);
     651            m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
     652
     653            // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
     654            m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
     655            m_jit.movl_rm(X86::edx, repatchGetByIdDefaultOffset, X86::eax);
     656            ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetPutByIdPropertyMapOffset);
     657
    617658            i += 8;
    618659            break;
    619660        }
    620661        case op_get_by_id: {
    621             Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
    622             emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
     662            // As for put_by_id, get_by_id requires the offset of the StructureID and the offset of the access to be repatched.
     663            // Additionally, for get_by_id we need repatch the offset of the branch to the slow case (we repatch this to jump
     664            // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
     665            // to jump back to if one of these trampolies finds a match.
     666
    623667            emitGetArg(instruction[i + 2].u.operand, X86::eax);
    624             emitPutArg(X86::eax, 0); // leave the base in eax
    625             emitCall(i, Machine::cti_op_get_by_id);
    626             emitPutResult(instruction[i + 1].u.operand);
     668
     669            ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
     670
     671            X86Assembler::JmpDst hotPathBegin = m_jit.label();
     672            m_structureStubCompilationInfo[structureIDInstructionIndex].hotPathBegin = hotPathBegin;
     673            ++structureIDInstructionIndex;
     674
     675            emitJumpSlowCaseIfNotJSCell(X86::eax, i);
     676            m_jit.cmpl_i32m(repatchGetByIdDefaultStructureID, OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
     677            ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdStructureID);
     678            m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
     679            ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdBranchToSlowCase);
     680
     681            m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
     682            m_jit.movl_mr(repatchGetByIdDefaultOffset, X86::eax, X86::ecx);
     683            ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdPropertyMapOffset);
     684            emitPutResult(instruction[i + 1].u.operand, X86::ecx);
     685
    627686            i += 8;
    628687            break;
     
    778837            m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
    779838            m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
     839
     840            // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
     841            m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
    780842            m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSArray, m_fastAccessCutoff), X86::eax);
    781843            m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJbe(), i));
    782844
    783             m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::eax);
    784             m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::eax, X86::edx, sizeof(JSValue*), X86::eax);
     845            // Get the value from the vector
     846            m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*), X86::eax);
    785847            emitPutResult(instruction[i + 1].u.operand);
    786848            i += 4;
     
    811873            emitGetArg(instruction[i + 1].u.operand, X86::eax);
    812874            emitGetArg(instruction[i + 2].u.operand, X86::edx);
    813             emitGetArg(instruction[i + 3].u.operand, X86::ecx);
    814875            emitJumpSlowCaseIfNotImm(X86::edx, i);
    815876            emitFastArithImmToInt(X86::edx);
     
    818879            m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
    819880            m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
     881
     882            // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
     883            m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
    820884            m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSArray, m_fastAccessCutoff), X86::eax);
     885            X86Assembler::JmpSrc inFastVector = m_jit.emitUnlinkedJa();
     886            // No; oh well, check if the access if within the vector - if so, we may still be okay.
     887            m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(ArrayStorage, m_vectorLength), X86::ecx);
    821888            m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJbe(), i));
    822889
    823             m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::eax);
    824             m_jit.movl_rm(X86::ecx, OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::eax, X86::edx, sizeof(JSValue*));
     890            // This is a write to the slow part of the vector; first, we have to check if this would be the first write to this location.
     891            // FIXME: should be able to handle initial write to array; increment the the number of items in the array, and potentially update fast access cutoff.
     892            m_jit.cmpl_i8m(0, OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*));
     893            m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i));
     894
     895            // All good - put the value into the array.
     896            m_jit.link(inFastVector, m_jit.label());
     897            emitGetArg(instruction[i + 3].u.operand, X86::eax);
     898            m_jit.movl_rm(X86::eax, OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*));
    825899            i += 4;
    826900            break;
     
    13401414        }
    13411415    }
     1416
     1417    ASSERT(structureIDInstructionIndex == m_codeBlock->structureIDInstructions.size());
    13421418}
    13431419
     
    13641440void CTI::privateCompileSlowCases()
    13651441{
     1442    unsigned structureIDInstructionIndex = 0;
     1443
    13661444    Instruction* instruction = m_codeBlock->instructions.begin();
    13671445    for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end(); ++iter) {
    1368         int i = iter->to;
     1446        unsigned i = iter->to;
    13691447        m_jit.emitRestoreArgumentReference();
    13701448        switch (m_machine->getOpcodeID(instruction[i].u.opcode)) {
     
    14031481        }
    14041482        case op_get_by_val: {
     1483            // The slow case that handles accesses to arrays (below) may jump back up to here.
     1484            X86Assembler::JmpDst beginGetByValSlow = m_jit.label();
     1485
    14051486            X86Assembler::JmpSrc notImm = iter->from;
    1406             m_jit.link((++iter)->from, m_jit.label());
    14071487            m_jit.link((++iter)->from, m_jit.label());
    14081488            m_jit.link((++iter)->from, m_jit.label());
     
    14131493            emitCall(i, Machine::cti_op_get_by_val);
    14141494            emitPutResult(instruction[i + 1].u.operand);
     1495            m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i + 4]);
     1496
     1497            // This is slow case that handles accesses to arrays above the fast cut-off.
     1498            // First, check if this is an access to the vector
     1499            m_jit.link((++iter)->from, m_jit.label());
     1500            m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(ArrayStorage, m_vectorLength), X86::ecx);
     1501            m_jit.link(m_jit.emitUnlinkedJbe(), beginGetByValSlow);
     1502
     1503            // okay, missed the fast region, but it is still in the vector.  Get the value.
     1504            m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*), X86::ecx);
     1505            // Check whether the value loaded is zero; if so we need to return undefined.
     1506            m_jit.testl_rr(X86::ecx, X86::ecx);
     1507            m_jit.link(m_jit.emitUnlinkedJe(), beginGetByValSlow);
     1508            emitPutResult(instruction[i + 1].u.operand, X86::ecx);
     1509           
    14151510            i += 4;
    14161511            break;
     
    14771572            break;
    14781573        }
     1574        case op_put_by_id: {
     1575            m_jit.link(iter->from, m_jit.label());
     1576            m_jit.link((++iter)->from, m_jit.label());
     1577
     1578            Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
     1579            emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
     1580            emitPutArg(X86::eax, 0);
     1581            emitPutArg(X86::edx, 8);
     1582            X86Assembler::JmpSrc call = emitCall(i, Machine::cti_op_put_by_id);
     1583
     1584            // Track the location of the call; this will be used to recover repatch information.
     1585            ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
     1586            m_structureStubCompilationInfo[structureIDInstructionIndex].callReturnLocation = call;
     1587            ++structureIDInstructionIndex;
     1588
     1589            i += 8;
     1590            break;
     1591        }
     1592        case op_get_by_id: {
     1593            // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
     1594            // so that we only need track one pointer into the slow case code - we track a pointer to the location
     1595            // of the call (which we can use to look up the repatch information), but should a array-length or
     1596            // prototype access tramopile fail we want to bail out back to here.  To do so we can subtract back
     1597            // the distance from the call to the head of the slow case.
     1598
     1599            m_jit.link(iter->from, m_jit.label());
     1600            m_jit.link((++iter)->from, m_jit.label());
     1601
     1602#ifndef NDEBUG
     1603            X86Assembler::JmpDst coldPathBegin = m_jit.label();
     1604#endif       
     1605            emitPutArg(X86::eax, 0);
     1606            Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
     1607            emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
     1608            X86Assembler::JmpSrc call = emitCall(i, Machine::cti_op_get_by_id);
     1609            ASSERT(X86Assembler::getDifferenceBetweenLabels(coldPathBegin, call) == repatchOffsetGetByIdSlowCaseCall);
     1610            emitPutResult(instruction[i + 1].u.operand);
     1611
     1612            // Track the location of the call; this will be used to recover repatch information.
     1613            ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
     1614            m_structureStubCompilationInfo[structureIDInstructionIndex].callReturnLocation = call;
     1615            ++structureIDInstructionIndex;
     1616
     1617            i += 8;
     1618            break;
     1619        }
    14791620        case op_loop_if_lesseq: {
    14801621            emitSlowScriptCheck(i);
     
    15141655        }
    15151656        case op_put_by_val: {
     1657            // Normal slow cases - either is not an immediate imm, or is an array.
    15161658            X86Assembler::JmpSrc notImm = iter->from;
    1517             m_jit.link((++iter)->from, m_jit.label());
    15181659            m_jit.link((++iter)->from, m_jit.label());
    15191660            m_jit.link((++iter)->from, m_jit.label());
    15201661            emitFastArithIntToImmNoCheck(X86::edx);
    15211662            m_jit.link(notImm, m_jit.label());
     1663            emitGetArg(instruction[i + 3].u.operand, X86::ecx);
    15221664            emitPutArg(X86::eax, 0);
    15231665            emitPutArg(X86::edx, 4);
    15241666            emitPutArg(X86::ecx, 8);
    15251667            emitCall(i, Machine::cti_op_put_by_val);
     1668            m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i + 4]);
     1669
     1670            // slow cases for immediate int accesses to arrays
     1671            m_jit.link((++iter)->from, m_jit.label());
     1672            m_jit.link((++iter)->from, m_jit.label());
     1673            emitGetArg(instruction[i + 3].u.operand, X86::ecx);
     1674            emitPutArg(X86::eax, 0);
     1675            emitPutArg(X86::edx, 4);
     1676            emitPutArg(X86::ecx, 8);
     1677            emitCall(i, Machine::cti_op_put_by_val_array);
     1678
    15261679            i += 4;
    15271680            break;
     
    17011854        m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i]);
    17021855    }
     1856
     1857    ASSERT(structureIDInstructionIndex == m_codeBlock->structureIDInstructions.size());
    17031858}
    17041859
     
    17621917        X86Assembler::linkAbsoluteAddress(code, iter->addrPosition, iter->target);
    17631918
     1919    for (unsigned i = 0; i < m_codeBlock->structureIDInstructions.size(); ++i) {
     1920        StructureStubInfo& info = m_codeBlock->structureIDInstructions[i];
     1921        info.callReturnLocation = X86Assembler::getRelocatedAddress(code, m_structureStubCompilationInfo[i].callReturnLocation);
     1922        info.hotPathBegin = X86Assembler::getRelocatedAddress(code, m_structureStubCompilationInfo[i].hotPathBegin);
     1923    }
     1924
     1925
    17641926    m_codeBlock->ctiCode = code;
    17651927}
    17661928
    1767 void* CTI::privateCompileGetByIdSelf(StructureID* structureID, size_t cachedOffset)
     1929void CTI::privateCompileGetByIdSelf(StructureID* structureID, size_t cachedOffset, void* returnAddress)
    17681930{
    17691931    // Check eax is an object of the right StructureID.
     
    17841946    X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
    17851947   
    1786     m_codeBlock->structureIDAccessStubs.append(code);
     1948    m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
    17871949   
    1788     return code;
    1789 }
    1790 
    1791 void* CTI::privateCompileGetByIdProto(ExecState* exec, StructureID* structureID, StructureID* prototypeStructureID, size_t cachedOffset)
    1792 {
     1950    ctiRepatchCallByReturnAddress(returnAddress, code);
     1951}
     1952
     1953void CTI::privateCompileGetByIdProto(StructureID* structureID, StructureID* prototypeStructureID, size_t cachedOffset, void* returnAddress)
     1954{
     1955#if USE(CTI_REPATCH_PIC)
     1956    StructureStubInfo& info = m_codeBlock->getStubInfo(returnAddress);
     1957
     1958    // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
     1959    ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
     1960
    17931961    // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a StructureID that is
    17941962    // referencing the prototype object - let's speculatively load it's table nice and early!)
    1795     JSObject* protoObject = static_cast<JSObject*>(structureID->prototypeForLookup(exec));
     1963    JSObject* protoObject = static_cast<JSObject*>(structureID->prototypeForLookup(m_exec));
    17961964    PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
    17971965    m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
     
    18091977
    18101978    // Checks out okay! - getDirectOffset
     1979    m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::ecx);
     1980
     1981    X86Assembler::JmpSrc success = m_jit.emitUnlinkedJmp();
     1982
     1983    void* code = m_jit.copy();
     1984    ASSERT(code);
     1985
     1986    // Use the repatch information to link the failure cases back to the original slow case routine.
     1987    void* slowCaseBegin = reinterpret_cast<char*>(info.callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
     1988    X86Assembler::link(code, failureCases1, slowCaseBegin);
     1989    X86Assembler::link(code, failureCases2, slowCaseBegin);
     1990    X86Assembler::link(code, failureCases3, slowCaseBegin);
     1991
     1992    // On success return back to the hot patch code, at a point it will perform the store to dest for us.
     1993    intptr_t successDest = (intptr_t)(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
     1994    X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
     1995
     1996    // Track the stub we have created so that it will be deleted later.
     1997    m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
     1998
     1999    // Finally repatch the jump to sow case back in the hot path to jump here instead.
     2000    // FIXME: should revert this repatching, on failure.
     2001    intptr_t jmpLocation = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
     2002    X86Assembler::repatchBranchOffset(jmpLocation, code);
     2003#else
     2004    // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a StructureID that is
     2005    // referencing the prototype object - let's speculatively load it's table nice and early!)
     2006    JSObject* protoObject = static_cast<JSObject*>(structureID->prototypeForLookup(m_exec));
     2007    PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
     2008    m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
     2009
     2010    // check eax is an object of the right StructureID.
     2011    m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
     2012    X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
     2013    m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
     2014    X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
     2015
     2016    // Check the prototype object's StructureID had not changed.
     2017    StructureID** protoStructureIDAddress = &(protoObject->m_structureID);
     2018    m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(prototypeStructureID), static_cast<void*>(protoStructureIDAddress));
     2019    X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJne();
     2020
     2021    // Checks out okay! - getDirectOffset
    18112022    m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
    18122023
     
    18202031    X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
    18212032
    1822     m_codeBlock->structureIDAccessStubs.append(code);
    1823 
    1824     return code;
    1825 }
    1826 
    1827 void* CTI::privateCompileGetByIdChain(ExecState* exec, StructureID* structureID, StructureIDChain* chain, size_t count, size_t cachedOffset)
     2033    m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
     2034
     2035    ctiRepatchCallByReturnAddress(returnAddress, code);
     2036#endif
     2037}
     2038
     2039void CTI::privateCompileGetByIdChain(StructureID* structureID, StructureIDChain* chain, size_t count, size_t cachedOffset, void* returnAddress)
    18282040{
    18292041    ASSERT(count);
     
    18412053    JSObject* protoObject = 0;
    18422054    for (unsigned i = 0; i<count; ++i) {
    1843         protoObject = static_cast<JSObject*>(currStructureID->prototypeForLookup(exec));
     2055        protoObject = static_cast<JSObject*>(currStructureID->prototypeForLookup(m_exec));
    18442056        currStructureID = chainEntries[i].get();
    18452057
     
    18632075    for (unsigned i = 0; i < bucketsOfFail.size(); ++i)
    18642076        X86Assembler::link(code, bucketsOfFail[i], reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
    1865     m_codeBlock->structureIDAccessStubs.append(code);
    1866     return code;
    1867 }
    1868 
    1869 void* CTI::privateCompilePutByIdReplace(StructureID* structureID, size_t cachedOffset)
     2077
     2078    m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
     2079
     2080    ctiRepatchCallByReturnAddress(returnAddress, code);
     2081}
     2082
     2083void CTI::privateCompilePutByIdReplace(StructureID* structureID, size_t cachedOffset, void* returnAddress)
    18702084{
    18712085    // check eax is an object of the right StructureID.
     
    18862100    X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_put_by_id_fail));
    18872101
    1888     m_codeBlock->structureIDAccessStubs.append(code);
     2102    m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
    18892103   
    1890     return code;
     2104    ctiRepatchCallByReturnAddress(returnAddress, code);
    18912105}
    18922106
     
    19222136}
    19232137
    1924 void* CTI::privateCompilePutByIdTransition(StructureID* oldStructureID, StructureID* newStructureID, size_t cachedOffset, StructureIDChain* sIDC)
     2138void CTI::privateCompilePutByIdTransition(StructureID* oldStructureID, StructureID* newStructureID, size_t cachedOffset, StructureIDChain* sIDC, void* returnAddress)
    19252139{
    19262140    Vector<X86Assembler::JmpSrc, 16> failureCases;
     
    19932207        X86Assembler::link(code, callTarget, reinterpret_cast<void*>(transitionObject));
    19942208   
    1995     m_codeBlock->structureIDAccessStubs.append(code);
     2209    m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
    19962210   
    1997     return code;
    1998 }
    1999 
    2000 void* CTI::privateArrayLengthTrampoline()
     2211    ctiRepatchCallByReturnAddress(returnAddress, code);
     2212}
     2213
     2214void* CTI::privateCompileArrayLengthTrampoline()
    20012215{
    20022216    // Check eax is an array
     
    20262240}
    20272241
    2028 void* CTI::privateStringLengthTrampoline()
     2242void* CTI::privateCompileStringLengthTrampoline()
    20292243{
    20302244    // Check eax is a string
     
    20522266
    20532267    return code;
     2268}
     2269
     2270void CTI::patchGetByIdSelf(CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset, void* returnAddress)
     2271{
     2272    StructureStubInfo& info = codeBlock->getStubInfo(returnAddress);
     2273
     2274    // We don't want to repatch more than once - in future go to cti_op_get_by_id_generic.
     2275    // Should probably go to Machine::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
     2276    ctiRepatchCallByReturnAddress(returnAddress, (void*)(Machine::cti_op_get_by_id_generic));
     2277
     2278    // Repatch the offset into the propoerty map to load from, then repatch the StructureID to look for.
     2279    X86Assembler::repatchDisplacement(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset, cachedOffset * sizeof(JSValue*));
     2280    X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdStructureID, reinterpret_cast<uint32_t>(structureID));
     2281}
     2282
     2283void CTI::patchPutByIdReplace(CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset, void* returnAddress)
     2284{
     2285    StructureStubInfo& info = codeBlock->getStubInfo(returnAddress);
     2286   
     2287    // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
     2288    // Should probably go to Machine::cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
     2289    ctiRepatchCallByReturnAddress(returnAddress, (void*)(Machine::cti_op_put_by_id_generic));
     2290
     2291    // Repatch the offset into the propoerty map to load from, then repatch the StructureID to look for.
     2292    X86Assembler::repatchDisplacement(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetPutByIdPropertyMapOffset, cachedOffset * sizeof(JSValue*));
     2293    X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetPutByIdStructureID, reinterpret_cast<uint32_t>(structureID));
     2294}
     2295
     2296void CTI::privateCompilePatchGetArrayLength(void* returnAddress)
     2297{
     2298    StructureStubInfo& info = m_codeBlock->getStubInfo(returnAddress);
     2299
     2300    // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
     2301    ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
     2302
     2303    // Check eax is an array
     2304    m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
     2305    X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
     2306    m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
     2307    X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
     2308
     2309    // Checks out okay! - get the length from the storage
     2310    m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
     2311    m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_length), X86::ecx, X86::ecx);
     2312
     2313    m_jit.addl_rr(X86::ecx, X86::ecx);
     2314    X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJo();
     2315    m_jit.addl_i8r(1, X86::ecx);
     2316
     2317    X86Assembler::JmpSrc success = m_jit.emitUnlinkedJmp();
     2318
     2319    void* code = m_jit.copy();
     2320    ASSERT(code);
     2321
     2322    // Use the repatch information to link the failure cases back to the original slow case routine.
     2323    void* slowCaseBegin = reinterpret_cast<char*>(info.callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
     2324    X86Assembler::link(code, failureCases1, slowCaseBegin);
     2325    X86Assembler::link(code, failureCases2, slowCaseBegin);
     2326    X86Assembler::link(code, failureCases3, slowCaseBegin);
     2327
     2328    // On success return back to the hot patch code, at a point it will perform the store to dest for us.
     2329    intptr_t successDest = (intptr_t)(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
     2330    X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
     2331
     2332    // Track the stub we have created so that it will be deleted later.
     2333    m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
     2334
     2335    // Finally repatch the jump to sow case back in the hot path to jump here instead.
     2336    // FIXME: should revert this repatching, on failure.
     2337    intptr_t jmpLocation = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
     2338    X86Assembler::repatchBranchOffset(jmpLocation, code);
    20542339}
    20552340
Note: See TracChangeset for help on using the changeset viewer.