Changeset 36418 in webkit for trunk/JavaScriptCore


Ignore:
Timestamp:
Sep 14, 2008, 7:18:13 PM (17 years ago)
Author:
[email protected]
Message:

2008-09-14 Maciej Stachowiak <[email protected]>

Reviewed by Cameron Zwarich.


  • split the "prototype" lookup for hasInstance into opcode stream so it can be cached


~5% speedup on v8 earley-boyer test

  • API/JSCallbackObject.h: Add a parameter for the pre-looked-up prototype.
  • API/JSCallbackObjectFunctions.h: (JSC::::hasInstance): Ditto.
  • API/JSValueRef.cpp: (JSValueIsInstanceOfConstructor): Look up and pass in prototype.
  • JavaScriptCore.exp:
  • VM/CTI.cpp: (JSC::CTI::privateCompileMainPass): Pass along prototype.
  • VM/CodeBlock.cpp: (JSC::CodeBlock::dump): Print third arg.
  • VM/CodeGenerator.cpp: (JSC::CodeGenerator::emitInstanceOf): Implement this, now that there is a third argument.
  • VM/CodeGenerator.h:
  • VM/Machine.cpp: (JSC::Machine::privateExecute): Pass along the prototype. (JSC::Machine::cti_op_instanceof): ditto
  • kjs/JSObject.cpp: (JSC::JSObject::hasInstance): Expect to get a pre-looked-up prototype.
  • kjs/JSObject.h:
  • kjs/nodes.cpp: (JSC::InstanceOfNode::emitCode): Emit a get_by_id of the prototype property and pass that register to instanceof.
  • kjs/nodes.h:
Location:
trunk/JavaScriptCore
Files:
8 edited

Legend:

Unmodified
Added
Removed
  • trunk/JavaScriptCore/ChangeLog

    r36417 r36418  
    3131        property and pass that register to instanceof.
    3232        * kjs/nodes.h:
     33
     342008-09-14  Gavin Barraclough  <[email protected]>
     35
     36        Reviewed by Sam Weinig.
     37
     38        Accelerated property accesses.
     39
     40        Inline more of the array access code into the JIT code for get/put_by_val.
     41        Accelerate get/put_by_id by speculatively inlining a disable direct access
     42        into the hot path of the code, and repatch this with the correct StructureID
     43        and property map offset once these are known.  In the case of accesses to the
     44        prototype and reading the array-length a trampoline is genertaed, and the
     45        branch to the slow-case is relinked to jump to this.
     46
     47        By repatching, we mean rewriting the x86 instruction stream.  Instructions are
     48        only modified in a simple fasion - altering immediate operands, memory access
     49        deisplacements, and branch offsets.
     50       
     51        For regular get_by_id/put_by_id accesses to an object, a StructureID in an
     52        instruction's immediate operant is updateded, and a memory access operation's
     53        displacement is updated to access the correct field on the object.  In the case
     54        of more complex accesses (array length and get_by_id_prototype) the offset on
     55        the branch to slow-case is updated, to now jump to a trampoline.
     56
     57        +2.8% sunspider, +13% v8-tests
     58
     59        * VM/CTI.cpp:
     60        (JSC::CTI::emitCall):
     61        (JSC::CTI::emitJumpSlowCaseIfNotJSCell):
     62        (JSC::CTI::CTI):
     63        (JSC::CTI::privateCompileMainPass):
     64        (JSC::CTI::privateCompileSlowCases):
     65        (JSC::CTI::privateCompile):
     66        (JSC::CTI::privateCompileGetByIdSelf):
     67        (JSC::CTI::privateCompileGetByIdProto):
     68        (JSC::CTI::privateCompileGetByIdChain):
     69        (JSC::CTI::privateCompilePutByIdReplace):
     70        (JSC::CTI::privateCompilePutByIdTransition):
     71        (JSC::CTI::privateCompileArrayLengthTrampoline):
     72        (JSC::CTI::privateCompileStringLengthTrampoline):
     73        (JSC::CTI::patchGetByIdSelf):
     74        (JSC::CTI::patchPutByIdReplace):
     75        (JSC::CTI::privateCompilePatchGetArrayLength):
     76        (JSC::CTI::privateCompilePatchGetStringLength):
     77        * VM/CTI.h:
     78        (JSC::CTI::compileGetByIdSelf):
     79        (JSC::CTI::compileGetByIdProto):
     80        (JSC::CTI::compileGetByIdChain):
     81        (JSC::CTI::compilePutByIdReplace):
     82        (JSC::CTI::compilePutByIdTransition):
     83        (JSC::CTI::compileArrayLengthTrampoline):
     84        (JSC::CTI::compileStringLengthTrampoline):
     85        (JSC::CTI::compilePatchGetArrayLength):
     86        (JSC::CTI::compilePatchGetStringLength):
     87        * VM/CodeBlock.cpp:
     88        (JSC::CodeBlock::dump):
     89        (JSC::CodeBlock::~CodeBlock):
     90        * VM/CodeBlock.h:
     91        (JSC::StructureStubInfo::StructureStubInfo):
     92        (JSC::CodeBlock::getStubInfo):
     93        * VM/Machine.cpp:
     94        (JSC::Machine::tryCTICachePutByID):
     95        (JSC::Machine::tryCTICacheGetByID):
     96        (JSC::Machine::cti_op_put_by_val_array):
     97        * VM/Machine.h:
     98        * masm/X86Assembler.h:
     99        (JSC::X86Assembler::):
     100        (JSC::X86Assembler::cmpl_i8m):
     101        (JSC::X86Assembler::emitUnlinkedJa):
     102        (JSC::X86Assembler::getRelocatedAddress):
     103        (JSC::X86Assembler::getDifferenceBetweenLabels):
     104        (JSC::X86Assembler::emitModRm_opmsib):
    33105
    341062008-09-14  Gavin Barraclough  <[email protected]>
  • trunk/JavaScriptCore/VM/CTI.cpp

    r36417 r36418  
    243243#endif
    244244
    245 ALWAYS_INLINE void CTI::emitCall(unsigned opcodeIndex, CTIHelper_j helper)
     245ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_j helper)
    246246{
    247247#if ENABLE(SAMPLING_TOOL)
    248248    m_jit.movl_i32m(1, &inCalledCode);
    249249#endif
    250     m_calls.append(CallRecord(m_jit.emitCall(), helper, opcodeIndex));
     250    X86Assembler::JmpSrc call = m_jit.emitCall();
     251    m_calls.append(CallRecord(call, helper, opcodeIndex));
    251252    emitDebugExceptionCheck();
    252253#if ENABLE(SAMPLING_TOOL)
    253254    m_jit.movl_i32m(0, &inCalledCode);
    254255#endif
    255 }
    256 
    257 ALWAYS_INLINE void CTI::emitCall(unsigned opcodeIndex, CTIHelper_p helper)
     256
     257    return call;
     258}
     259
     260ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_p helper)
    258261{
    259262#if ENABLE(SAMPLING_TOOL)
    260263    m_jit.movl_i32m(1, &inCalledCode);
    261264#endif
    262     m_calls.append(CallRecord(m_jit.emitCall(), helper, opcodeIndex));
     265    X86Assembler::JmpSrc call = m_jit.emitCall();
     266    m_calls.append(CallRecord(call, helper, opcodeIndex));
    263267    emitDebugExceptionCheck();
    264268#if ENABLE(SAMPLING_TOOL)
    265269    m_jit.movl_i32m(0, &inCalledCode);
    266270#endif
    267 }
    268 
    269 ALWAYS_INLINE void CTI::emitCall(unsigned opcodeIndex, CTIHelper_b helper)
     271
     272    return call;
     273}
     274
     275ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_b helper)
    270276{
    271277#if ENABLE(SAMPLING_TOOL)
    272278    m_jit.movl_i32m(1, &inCalledCode);
    273279#endif
    274     m_calls.append(CallRecord(m_jit.emitCall(), helper, opcodeIndex));
     280    X86Assembler::JmpSrc call = m_jit.emitCall();
     281    m_calls.append(CallRecord(call, helper, opcodeIndex));
    275282    emitDebugExceptionCheck();
    276283#if ENABLE(SAMPLING_TOOL)
    277284    m_jit.movl_i32m(0, &inCalledCode);
    278285#endif
    279 }
    280 
    281 ALWAYS_INLINE void CTI::emitCall(unsigned opcodeIndex, CTIHelper_v helper)
     286
     287    return call;
     288}
     289
     290ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_v helper)
    282291{
    283292#if ENABLE(SAMPLING_TOOL)
    284293    m_jit.movl_i32m(1, &inCalledCode);
    285294#endif
    286     m_calls.append(CallRecord(m_jit.emitCall(), helper, opcodeIndex));
     295    X86Assembler::JmpSrc call = m_jit.emitCall();
     296    m_calls.append(CallRecord(call, helper, opcodeIndex));
    287297    emitDebugExceptionCheck();
    288298#if ENABLE(SAMPLING_TOOL)
    289299    m_jit.movl_i32m(0, &inCalledCode);
    290300#endif
    291 }
    292 
    293 ALWAYS_INLINE void CTI::emitCall(unsigned opcodeIndex, CTIHelper_s helper)
     301
     302    return call;
     303}
     304
     305ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_s helper)
    294306{
    295307#if ENABLE(SAMPLING_TOOL)
    296308    m_jit.movl_i32m(1, &inCalledCode);
    297309#endif
    298     m_calls.append(CallRecord(m_jit.emitCall(), helper, opcodeIndex));
     310    X86Assembler::JmpSrc call = m_jit.emitCall();
     311    m_calls.append(CallRecord(call, helper, opcodeIndex));
    299312    emitDebugExceptionCheck();
    300313#if ENABLE(SAMPLING_TOOL)
    301314    m_jit.movl_i32m(0, &inCalledCode);
    302315#endif
     316
     317    return call;
     318}
     319
     320ALWAYS_INLINE void CTI::emitJumpSlowCaseIfNotJSCell(X86Assembler::RegisterID reg, unsigned opcodeIndex)
     321{
     322    m_jit.testl_i32r(JSImmediate::TagMask, reg);
     323    m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), opcodeIndex));
    303324}
    304325
     
    362383    , m_codeBlock(codeBlock)
    363384    , m_labels(codeBlock ? codeBlock->instructions.size() : 0)
     385    , m_structureStubCompilationInfo(codeBlock ? codeBlock->structureIDInstructions.size() : 0)
    364386{
    365387}
     
    469491    Instruction* instruction = m_codeBlock->instructions.begin();
    470492    unsigned instructionCount = m_codeBlock->instructions.size();
     493
     494    unsigned structureIDInstructionIndex = 0;
    471495
    472496    for (unsigned i = 0; i < instructionCount; ) {
     
    608632        }
    609633        case op_put_by_id: {
    610             Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
    611             emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
     634            // In order to be able to repatch both the StructureID, and the object offset, we store one pointer,
     635            // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
     636            // such that the StructureID & offset are always at the same distance from this.
     637
    612638            emitGetArg(instruction[i + 1].u.operand, X86::eax);
    613639            emitGetArg(instruction[i + 3].u.operand, X86::edx);
    614             emitPutArg(X86::eax, 0); // leave the base in eax
    615             emitPutArg(X86::edx, 8); // leave the base in edx
    616             emitCall(i, Machine::cti_op_put_by_id);
     640
     641            ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
     642            X86Assembler::JmpDst hotPathBegin = m_jit.label();
     643            m_structureStubCompilationInfo[structureIDInstructionIndex].hotPathBegin = hotPathBegin;
     644            ++structureIDInstructionIndex;
     645
     646            // Jump to a slow case if either the base object is an immediate, or if the StructureID does not match.
     647            emitJumpSlowCaseIfNotJSCell(X86::eax, i);
     648            // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
     649            m_jit.cmpl_i32m(repatchGetByIdDefaultStructureID, OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
     650            ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetPutByIdStructureID);
     651            m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
     652
     653            // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
     654            m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
     655            m_jit.movl_rm(X86::edx, repatchGetByIdDefaultOffset, X86::eax);
     656            ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetPutByIdPropertyMapOffset);
     657
    617658            i += 8;
    618659            break;
    619660        }
    620661        case op_get_by_id: {
    621             Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
    622             emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
     662            // As for put_by_id, get_by_id requires the offset of the StructureID and the offset of the access to be repatched.
     663            // Additionally, for get_by_id we need repatch the offset of the branch to the slow case (we repatch this to jump
     664            // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
     665            // to jump back to if one of these trampolies finds a match.
     666
    623667            emitGetArg(instruction[i + 2].u.operand, X86::eax);
    624             emitPutArg(X86::eax, 0); // leave the base in eax
    625             emitCall(i, Machine::cti_op_get_by_id);
    626             emitPutResult(instruction[i + 1].u.operand);
     668
     669            ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
     670
     671            X86Assembler::JmpDst hotPathBegin = m_jit.label();
     672            m_structureStubCompilationInfo[structureIDInstructionIndex].hotPathBegin = hotPathBegin;
     673            ++structureIDInstructionIndex;
     674
     675            emitJumpSlowCaseIfNotJSCell(X86::eax, i);
     676            m_jit.cmpl_i32m(repatchGetByIdDefaultStructureID, OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
     677            ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdStructureID);
     678            m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
     679            ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdBranchToSlowCase);
     680
     681            m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
     682            m_jit.movl_mr(repatchGetByIdDefaultOffset, X86::eax, X86::ecx);
     683            ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdPropertyMapOffset);
     684            emitPutResult(instruction[i + 1].u.operand, X86::ecx);
     685
    627686            i += 8;
    628687            break;
     
    778837            m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
    779838            m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
     839
     840            // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
     841            m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
    780842            m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSArray, m_fastAccessCutoff), X86::eax);
    781843            m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJbe(), i));
    782844
    783             m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::eax);
    784             m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::eax, X86::edx, sizeof(JSValue*), X86::eax);
     845            // Get the value from the vector
     846            m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*), X86::eax);
    785847            emitPutResult(instruction[i + 1].u.operand);
    786848            i += 4;
     
    811873            emitGetArg(instruction[i + 1].u.operand, X86::eax);
    812874            emitGetArg(instruction[i + 2].u.operand, X86::edx);
    813             emitGetArg(instruction[i + 3].u.operand, X86::ecx);
    814875            emitJumpSlowCaseIfNotImm(X86::edx, i);
    815876            emitFastArithImmToInt(X86::edx);
     
    818879            m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
    819880            m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
     881
     882            // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
     883            m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
    820884            m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSArray, m_fastAccessCutoff), X86::eax);
     885            X86Assembler::JmpSrc inFastVector = m_jit.emitUnlinkedJa();
     886            // No; oh well, check if the access if within the vector - if so, we may still be okay.
     887            m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(ArrayStorage, m_vectorLength), X86::ecx);
    821888            m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJbe(), i));
    822889
    823             m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::eax);
    824             m_jit.movl_rm(X86::ecx, OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::eax, X86::edx, sizeof(JSValue*));
     890            // This is a write to the slow part of the vector; first, we have to check if this would be the first write to this location.
     891            // FIXME: should be able to handle initial write to array; increment the the number of items in the array, and potentially update fast access cutoff.
     892            m_jit.cmpl_i8m(0, OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*));
     893            m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i));
     894
     895            // All good - put the value into the array.
     896            m_jit.link(inFastVector, m_jit.label());
     897            emitGetArg(instruction[i + 3].u.operand, X86::eax);
     898            m_jit.movl_rm(X86::eax, OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*));
    825899            i += 4;
    826900            break;
     
    13401414        }
    13411415    }
     1416
     1417    ASSERT(structureIDInstructionIndex == m_codeBlock->structureIDInstructions.size());
    13421418}
    13431419
     
    13641440void CTI::privateCompileSlowCases()
    13651441{
     1442    unsigned structureIDInstructionIndex = 0;
     1443
    13661444    Instruction* instruction = m_codeBlock->instructions.begin();
    13671445    for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end(); ++iter) {
    1368         int i = iter->to;
     1446        unsigned i = iter->to;
    13691447        m_jit.emitRestoreArgumentReference();
    13701448        switch (m_machine->getOpcodeID(instruction[i].u.opcode)) {
     
    14031481        }
    14041482        case op_get_by_val: {
     1483            // The slow case that handles accesses to arrays (below) may jump back up to here.
     1484            X86Assembler::JmpDst beginGetByValSlow = m_jit.label();
     1485
    14051486            X86Assembler::JmpSrc notImm = iter->from;
    1406             m_jit.link((++iter)->from, m_jit.label());
    14071487            m_jit.link((++iter)->from, m_jit.label());
    14081488            m_jit.link((++iter)->from, m_jit.label());
     
    14131493            emitCall(i, Machine::cti_op_get_by_val);
    14141494            emitPutResult(instruction[i + 1].u.operand);
     1495            m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i + 4]);
     1496
     1497            // This is slow case that handles accesses to arrays above the fast cut-off.
     1498            // First, check if this is an access to the vector
     1499            m_jit.link((++iter)->from, m_jit.label());
     1500            m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(ArrayStorage, m_vectorLength), X86::ecx);
     1501            m_jit.link(m_jit.emitUnlinkedJbe(), beginGetByValSlow);
     1502
     1503            // okay, missed the fast region, but it is still in the vector.  Get the value.
     1504            m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*), X86::ecx);
     1505            // Check whether the value loaded is zero; if so we need to return undefined.
     1506            m_jit.testl_rr(X86::ecx, X86::ecx);
     1507            m_jit.link(m_jit.emitUnlinkedJe(), beginGetByValSlow);
     1508            emitPutResult(instruction[i + 1].u.operand, X86::ecx);
     1509           
    14151510            i += 4;
    14161511            break;
     
    14771572            break;
    14781573        }
     1574        case op_put_by_id: {
     1575            m_jit.link(iter->from, m_jit.label());
     1576            m_jit.link((++iter)->from, m_jit.label());
     1577
     1578            Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
     1579            emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
     1580            emitPutArg(X86::eax, 0);
     1581            emitPutArg(X86::edx, 8);
     1582            X86Assembler::JmpSrc call = emitCall(i, Machine::cti_op_put_by_id);
     1583
     1584            // Track the location of the call; this will be used to recover repatch information.
     1585            ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
     1586            m_structureStubCompilationInfo[structureIDInstructionIndex].callReturnLocation = call;
     1587            ++structureIDInstructionIndex;
     1588
     1589            i += 8;
     1590            break;
     1591        }
     1592        case op_get_by_id: {
     1593            // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
     1594            // so that we only need track one pointer into the slow case code - we track a pointer to the location
     1595            // of the call (which we can use to look up the repatch information), but should a array-length or
     1596            // prototype access tramopile fail we want to bail out back to here.  To do so we can subtract back
     1597            // the distance from the call to the head of the slow case.
     1598
     1599            m_jit.link(iter->from, m_jit.label());
     1600            m_jit.link((++iter)->from, m_jit.label());
     1601
     1602#ifndef NDEBUG
     1603            X86Assembler::JmpDst coldPathBegin = m_jit.label();
     1604#endif       
     1605            emitPutArg(X86::eax, 0);
     1606            Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
     1607            emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
     1608            X86Assembler::JmpSrc call = emitCall(i, Machine::cti_op_get_by_id);
     1609            ASSERT(X86Assembler::getDifferenceBetweenLabels(coldPathBegin, call) == repatchOffsetGetByIdSlowCaseCall);
     1610            emitPutResult(instruction[i + 1].u.operand);
     1611
     1612            // Track the location of the call; this will be used to recover repatch information.
     1613            ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
     1614            m_structureStubCompilationInfo[structureIDInstructionIndex].callReturnLocation = call;
     1615            ++structureIDInstructionIndex;
     1616
     1617            i += 8;
     1618            break;
     1619        }
    14791620        case op_loop_if_lesseq: {
    14801621            emitSlowScriptCheck(i);
     
    15141655        }
    15151656        case op_put_by_val: {
     1657            // Normal slow cases - either is not an immediate imm, or is an array.
    15161658            X86Assembler::JmpSrc notImm = iter->from;
    1517             m_jit.link((++iter)->from, m_jit.label());
    15181659            m_jit.link((++iter)->from, m_jit.label());
    15191660            m_jit.link((++iter)->from, m_jit.label());
    15201661            emitFastArithIntToImmNoCheck(X86::edx);
    15211662            m_jit.link(notImm, m_jit.label());
     1663            emitGetArg(instruction[i + 3].u.operand, X86::ecx);
    15221664            emitPutArg(X86::eax, 0);
    15231665            emitPutArg(X86::edx, 4);
    15241666            emitPutArg(X86::ecx, 8);
    15251667            emitCall(i, Machine::cti_op_put_by_val);
     1668            m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i + 4]);
     1669
     1670            // slow cases for immediate int accesses to arrays
     1671            m_jit.link((++iter)->from, m_jit.label());
     1672            m_jit.link((++iter)->from, m_jit.label());
     1673            emitGetArg(instruction[i + 3].u.operand, X86::ecx);
     1674            emitPutArg(X86::eax, 0);
     1675            emitPutArg(X86::edx, 4);
     1676            emitPutArg(X86::ecx, 8);
     1677            emitCall(i, Machine::cti_op_put_by_val_array);
     1678
    15261679            i += 4;
    15271680            break;
     
    17011854        m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i]);
    17021855    }
     1856
     1857    ASSERT(structureIDInstructionIndex == m_codeBlock->structureIDInstructions.size());
    17031858}
    17041859
     
    17621917        X86Assembler::linkAbsoluteAddress(code, iter->addrPosition, iter->target);
    17631918
     1919    for (unsigned i = 0; i < m_codeBlock->structureIDInstructions.size(); ++i) {
     1920        StructureStubInfo& info = m_codeBlock->structureIDInstructions[i];
     1921        info.callReturnLocation = X86Assembler::getRelocatedAddress(code, m_structureStubCompilationInfo[i].callReturnLocation);
     1922        info.hotPathBegin = X86Assembler::getRelocatedAddress(code, m_structureStubCompilationInfo[i].hotPathBegin);
     1923    }
     1924
     1925
    17641926    m_codeBlock->ctiCode = code;
    17651927}
    17661928
    1767 void* CTI::privateCompileGetByIdSelf(StructureID* structureID, size_t cachedOffset)
     1929void CTI::privateCompileGetByIdSelf(StructureID* structureID, size_t cachedOffset, void* returnAddress)
    17681930{
    17691931    // Check eax is an object of the right StructureID.
     
    17841946    X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
    17851947   
    1786     m_codeBlock->structureIDAccessStubs.append(code);
     1948    m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
    17871949   
    1788     return code;
    1789 }
    1790 
    1791 void* CTI::privateCompileGetByIdProto(ExecState* exec, StructureID* structureID, StructureID* prototypeStructureID, size_t cachedOffset)
    1792 {
     1950    ctiRepatchCallByReturnAddress(returnAddress, code);
     1951}
     1952
     1953void CTI::privateCompileGetByIdProto(StructureID* structureID, StructureID* prototypeStructureID, size_t cachedOffset, void* returnAddress)
     1954{
     1955#if USE(CTI_REPATCH_PIC)
     1956    StructureStubInfo& info = m_codeBlock->getStubInfo(returnAddress);
     1957
     1958    // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
     1959    ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
     1960
    17931961    // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a StructureID that is
    17941962    // referencing the prototype object - let's speculatively load it's table nice and early!)
    1795     JSObject* protoObject = static_cast<JSObject*>(structureID->prototypeForLookup(exec));
     1963    JSObject* protoObject = static_cast<JSObject*>(structureID->prototypeForLookup(m_exec));
    17961964    PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
    17971965    m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
     
    18091977
    18101978    // Checks out okay! - getDirectOffset
     1979    m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::ecx);
     1980
     1981    X86Assembler::JmpSrc success = m_jit.emitUnlinkedJmp();
     1982
     1983    void* code = m_jit.copy();
     1984    ASSERT(code);
     1985
     1986    // Use the repatch information to link the failure cases back to the original slow case routine.
     1987    void* slowCaseBegin = reinterpret_cast<char*>(info.callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
     1988    X86Assembler::link(code, failureCases1, slowCaseBegin);
     1989    X86Assembler::link(code, failureCases2, slowCaseBegin);
     1990    X86Assembler::link(code, failureCases3, slowCaseBegin);
     1991
     1992    // On success return back to the hot patch code, at a point it will perform the store to dest for us.
     1993    intptr_t successDest = (intptr_t)(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
     1994    X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
     1995
     1996    // Track the stub we have created so that it will be deleted later.
     1997    m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
     1998
     1999    // Finally repatch the jump to sow case back in the hot path to jump here instead.
     2000    // FIXME: should revert this repatching, on failure.
     2001    intptr_t jmpLocation = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
     2002    X86Assembler::repatchBranchOffset(jmpLocation, code);
     2003#else
     2004    // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a StructureID that is
     2005    // referencing the prototype object - let's speculatively load it's table nice and early!)
     2006    JSObject* protoObject = static_cast<JSObject*>(structureID->prototypeForLookup(m_exec));
     2007    PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
     2008    m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
     2009
     2010    // check eax is an object of the right StructureID.
     2011    m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
     2012    X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
     2013    m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
     2014    X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
     2015
     2016    // Check the prototype object's StructureID had not changed.
     2017    StructureID** protoStructureIDAddress = &(protoObject->m_structureID);
     2018    m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(prototypeStructureID), static_cast<void*>(protoStructureIDAddress));
     2019    X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJne();
     2020
     2021    // Checks out okay! - getDirectOffset
    18112022    m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
    18122023
     
    18202031    X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
    18212032
    1822     m_codeBlock->structureIDAccessStubs.append(code);
    1823 
    1824     return code;
    1825 }
    1826 
    1827 void* CTI::privateCompileGetByIdChain(ExecState* exec, StructureID* structureID, StructureIDChain* chain, size_t count, size_t cachedOffset)
     2033    m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
     2034
     2035    ctiRepatchCallByReturnAddress(returnAddress, code);
     2036#endif
     2037}
     2038
     2039void CTI::privateCompileGetByIdChain(StructureID* structureID, StructureIDChain* chain, size_t count, size_t cachedOffset, void* returnAddress)
    18282040{
    18292041    ASSERT(count);
     
    18412053    JSObject* protoObject = 0;
    18422054    for (unsigned i = 0; i<count; ++i) {
    1843         protoObject = static_cast<JSObject*>(currStructureID->prototypeForLookup(exec));
     2055        protoObject = static_cast<JSObject*>(currStructureID->prototypeForLookup(m_exec));
    18442056        currStructureID = chainEntries[i].get();
    18452057
     
    18632075    for (unsigned i = 0; i < bucketsOfFail.size(); ++i)
    18642076        X86Assembler::link(code, bucketsOfFail[i], reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
    1865     m_codeBlock->structureIDAccessStubs.append(code);
    1866     return code;
    1867 }
    1868 
    1869 void* CTI::privateCompilePutByIdReplace(StructureID* structureID, size_t cachedOffset)
     2077
     2078    m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
     2079
     2080    ctiRepatchCallByReturnAddress(returnAddress, code);
     2081}
     2082
     2083void CTI::privateCompilePutByIdReplace(StructureID* structureID, size_t cachedOffset, void* returnAddress)
    18702084{
    18712085    // check eax is an object of the right StructureID.
     
    18862100    X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_put_by_id_fail));
    18872101
    1888     m_codeBlock->structureIDAccessStubs.append(code);
     2102    m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
    18892103   
    1890     return code;
     2104    ctiRepatchCallByReturnAddress(returnAddress, code);
    18912105}
    18922106
     
    19222136}
    19232137
    1924 void* CTI::privateCompilePutByIdTransition(StructureID* oldStructureID, StructureID* newStructureID, size_t cachedOffset, StructureIDChain* sIDC)
     2138void CTI::privateCompilePutByIdTransition(StructureID* oldStructureID, StructureID* newStructureID, size_t cachedOffset, StructureIDChain* sIDC, void* returnAddress)
    19252139{
    19262140    Vector<X86Assembler::JmpSrc, 16> failureCases;
     
    19932207        X86Assembler::link(code, callTarget, reinterpret_cast<void*>(transitionObject));
    19942208   
    1995     m_codeBlock->structureIDAccessStubs.append(code);
     2209    m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
    19962210   
    1997     return code;
    1998 }
    1999 
    2000 void* CTI::privateArrayLengthTrampoline()
     2211    ctiRepatchCallByReturnAddress(returnAddress, code);
     2212}
     2213
     2214void* CTI::privateCompileArrayLengthTrampoline()
    20012215{
    20022216    // Check eax is an array
     
    20262240}
    20272241
    2028 void* CTI::privateStringLengthTrampoline()
     2242void* CTI::privateCompileStringLengthTrampoline()
    20292243{
    20302244    // Check eax is a string
     
    20522266
    20532267    return code;
     2268}
     2269
     2270void CTI::patchGetByIdSelf(CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset, void* returnAddress)
     2271{
     2272    StructureStubInfo& info = codeBlock->getStubInfo(returnAddress);
     2273
     2274    // We don't want to repatch more than once - in future go to cti_op_get_by_id_generic.
     2275    // Should probably go to Machine::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
     2276    ctiRepatchCallByReturnAddress(returnAddress, (void*)(Machine::cti_op_get_by_id_generic));
     2277
     2278    // Repatch the offset into the propoerty map to load from, then repatch the StructureID to look for.
     2279    X86Assembler::repatchDisplacement(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset, cachedOffset * sizeof(JSValue*));
     2280    X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdStructureID, reinterpret_cast<uint32_t>(structureID));
     2281}
     2282
     2283void CTI::patchPutByIdReplace(CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset, void* returnAddress)
     2284{
     2285    StructureStubInfo& info = codeBlock->getStubInfo(returnAddress);
     2286   
     2287    // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
     2288    // Should probably go to Machine::cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
     2289    ctiRepatchCallByReturnAddress(returnAddress, (void*)(Machine::cti_op_put_by_id_generic));
     2290
     2291    // Repatch the offset into the propoerty map to load from, then repatch the StructureID to look for.
     2292    X86Assembler::repatchDisplacement(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetPutByIdPropertyMapOffset, cachedOffset * sizeof(JSValue*));
     2293    X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetPutByIdStructureID, reinterpret_cast<uint32_t>(structureID));
     2294}
     2295
     2296void CTI::privateCompilePatchGetArrayLength(void* returnAddress)
     2297{
     2298    StructureStubInfo& info = m_codeBlock->getStubInfo(returnAddress);
     2299
     2300    // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
     2301    ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
     2302
     2303    // Check eax is an array
     2304    m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
     2305    X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
     2306    m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
     2307    X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
     2308
     2309    // Checks out okay! - get the length from the storage
     2310    m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
     2311    m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_length), X86::ecx, X86::ecx);
     2312
     2313    m_jit.addl_rr(X86::ecx, X86::ecx);
     2314    X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJo();
     2315    m_jit.addl_i8r(1, X86::ecx);
     2316
     2317    X86Assembler::JmpSrc success = m_jit.emitUnlinkedJmp();
     2318
     2319    void* code = m_jit.copy();
     2320    ASSERT(code);
     2321
     2322    // Use the repatch information to link the failure cases back to the original slow case routine.
     2323    void* slowCaseBegin = reinterpret_cast<char*>(info.callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
     2324    X86Assembler::link(code, failureCases1, slowCaseBegin);
     2325    X86Assembler::link(code, failureCases2, slowCaseBegin);
     2326    X86Assembler::link(code, failureCases3, slowCaseBegin);
     2327
     2328    // On success return back to the hot patch code, at a point it will perform the store to dest for us.
     2329    intptr_t successDest = (intptr_t)(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
     2330    X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
     2331
     2332    // Track the stub we have created so that it will be deleted later.
     2333    m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
     2334
     2335    // Finally repatch the jump to sow case back in the hot path to jump here instead.
     2336    // FIXME: should revert this repatching, on failure.
     2337    intptr_t jmpLocation = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
     2338    X86Assembler::repatchBranchOffset(jmpLocation, code);
    20542339}
    20552340
  • trunk/JavaScriptCore/VM/CTI.h

    r36402 r36418  
    2929#if ENABLE(CTI)
    3030
     31#define WTF_USE_CTI_REPATCH_PIC 1
     32
    3133#include "Opcode.h"
    3234#include "RegisterFile.h"
     
    217219    };
    218220
     221    struct StructureStubCompilationInfo {
     222        X86Assembler::JmpSrc callReturnLocation;
     223        X86Assembler::JmpDst hotPathBegin;
     224    };
     225
    219226    extern "C" {
    220227        JSValue* ctiTrampoline(void* code, ExecState* exec, RegisterFile* registerFile, Register* r, ScopeChainNode* scopeChain, CodeBlock* codeBlock, JSValue** exception, Profiler**);
     
    226233
    227234    class CTI {
     235        static const int repatchGetByIdDefaultStructureID = -1;
     236        // Magic number - initial offset cannot be representable as a signed 8bit value, or the X86Assembler
     237        // will compress the displacement, and we may not be able to fit a repatched offset.
     238        static const int repatchGetByIdDefaultOffset = 256;
     239
     240        // These architecture specific value are used to enable repatching - see comment on op_put_by_id.
     241        static const int repatchOffsetPutByIdStructureID = 19;
     242        static const int repatchOffsetPutByIdPropertyMapOffset = 34;
     243        // These architecture specific value are used to enable repatching - see comment on op_get_by_id.
     244        static const int repatchOffsetGetByIdStructureID = 19;
     245        static const int repatchOffsetGetByIdBranchToSlowCase = 25;
     246        static const int repatchOffsetGetByIdPropertyMapOffset = 34;
     247        static const int repatchOffsetGetByIdSlowCaseCall = 17;
     248
    228249    public:
    229250        static void compile(Machine* machine, ExecState* exec, CodeBlock* codeBlock)
     
    237258#endif
    238259
    239         static void* compileGetByIdSelf(Machine* machine, ExecState* exec, CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset)
    240         {
    241             CTI cti(machine, exec, codeBlock);
    242             return cti.privateCompileGetByIdSelf(structureID, cachedOffset);
    243         }
    244 
    245         static void* compileGetByIdProto(Machine* machine, ExecState* exec, CodeBlock* codeBlock, StructureID* structureID, StructureID* prototypeStructureID, size_t cachedOffset)
    246         {
    247             CTI cti(machine, exec, codeBlock);
    248             return cti.privateCompileGetByIdProto(exec, structureID, prototypeStructureID, cachedOffset);
    249         }
    250 
    251         static void* compileGetByIdChain(Machine* machine, ExecState* exec, CodeBlock* codeBlock, StructureID* structureID, StructureIDChain* chain, size_t count, size_t cachedOffset)
    252         {
    253             CTI cti(machine, exec, codeBlock);
    254             return cti.privateCompileGetByIdChain(exec, structureID, chain, count, cachedOffset);
    255         }
    256 
    257         static void* compilePutByIdReplace(Machine* machine, ExecState* exec, CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset)
    258         {
    259             CTI cti(machine, exec, codeBlock);
    260             return cti.privateCompilePutByIdReplace(structureID, cachedOffset);
    261         }
    262        
    263         static void* compilePutByIdTransition(Machine* machine, ExecState* exec, CodeBlock* codeBlock, StructureID* oldStructureID, StructureID* newStructureID, size_t cachedOffset, StructureIDChain* sIDC)
    264         {
    265             CTI cti(machine, exec, codeBlock);
    266             return cti.privateCompilePutByIdTransition(oldStructureID, newStructureID, cachedOffset, sIDC);
     260        static void compileGetByIdSelf(Machine* machine, ExecState* exec, CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset, void* returnAddress)
     261        {
     262            CTI cti(machine, exec, codeBlock);
     263            cti.privateCompileGetByIdSelf(structureID, cachedOffset, returnAddress);
     264        }
     265
     266        static void compileGetByIdProto(Machine* machine, ExecState* exec, CodeBlock* codeBlock, StructureID* structureID, StructureID* prototypeStructureID, size_t cachedOffset, void* returnAddress)
     267        {
     268            CTI cti(machine, exec, codeBlock);
     269            cti.privateCompileGetByIdProto(structureID, prototypeStructureID, cachedOffset, returnAddress);
     270        }
     271
     272        static void compileGetByIdChain(Machine* machine, ExecState* exec, CodeBlock* codeBlock, StructureID* structureID, StructureIDChain* chain, size_t count, size_t cachedOffset, void* returnAddress)
     273        {
     274            CTI cti(machine, exec, codeBlock);
     275            cti.privateCompileGetByIdChain(structureID, chain, count, cachedOffset, returnAddress);
     276        }
     277
     278        static void compilePutByIdReplace(Machine* machine, ExecState* exec, CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset, void* returnAddress)
     279        {
     280            CTI cti(machine, exec, codeBlock);
     281            cti.privateCompilePutByIdReplace(structureID, cachedOffset, returnAddress);
     282        }
     283       
     284        static void compilePutByIdTransition(Machine* machine, ExecState* exec, CodeBlock* codeBlock, StructureID* oldStructureID, StructureID* newStructureID, size_t cachedOffset, StructureIDChain* sIDC, void* returnAddress)
     285        {
     286            CTI cti(machine, exec, codeBlock);
     287            cti.privateCompilePutByIdTransition(oldStructureID, newStructureID, cachedOffset, sIDC, returnAddress);
    267288        }
    268289
     
    270291        {
    271292            CTI cti(machine, exec, codeBlock);
    272             return cti.privateArrayLengthTrampoline();
     293            return cti.privateCompileArrayLengthTrampoline();
    273294        }
    274295
     
    276297        {
    277298            CTI cti(machine, exec, codeBlock);
    278             return cti.privateStringLengthTrampoline();
     299            return cti.privateCompileStringLengthTrampoline();
     300        }
     301
     302        static void patchGetByIdSelf(CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset, void* returnAddress);
     303        static void patchPutByIdReplace(CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset, void* returnAddress);
     304
     305        static void compilePatchGetArrayLength(Machine* machine, ExecState* exec, CodeBlock* codeBlock, void* returnAddress)
     306        {
     307            CTI cti(machine, exec, codeBlock);
     308            return cti.privateCompilePatchGetArrayLength(returnAddress);
    279309        }
    280310
     
    294324        void privateCompileSlowCases();
    295325        void privateCompile();
    296         void* privateCompileGetByIdSelf(StructureID*, size_t cachedOffset);
    297         void* privateCompileGetByIdProto(ExecState*, StructureID*, StructureID* prototypeStructureID, size_t cachedOffset);
    298         void* privateCompileGetByIdChain(ExecState*, StructureID*, StructureIDChain*, size_t count, size_t cachedOffset);
    299         void* privateCompilePutByIdReplace(StructureID*, size_t cachedOffset);
    300         void* privateCompilePutByIdTransition(StructureID*, StructureID*, size_t cachedOffset, StructureIDChain*);
    301         void* privateArrayLengthTrampoline();
    302         void* privateStringLengthTrampoline();
     326        void privateCompileGetByIdSelf(StructureID*, size_t cachedOffset, void* returnAddress);
     327        void privateCompileGetByIdProto(StructureID*, StructureID* prototypeStructureID, size_t cachedOffset, void* returnAddress);
     328        void privateCompileGetByIdChain(StructureID*, StructureIDChain*, size_t count, size_t cachedOffset, void* returnAddress);
     329        void privateCompilePutByIdReplace(StructureID*, size_t cachedOffset, void* returnAddress);
     330        void privateCompilePutByIdTransition(StructureID*, StructureID*, size_t cachedOffset, StructureIDChain*, void* returnAddress);
     331
     332        void* privateCompileArrayLengthTrampoline();
     333        void* privateCompileStringLengthTrampoline();
     334        void privateCompilePatchGetArrayLength(void* returnAddress);
    303335
    304336        enum CompileOpCallType { OpCallNormal, OpCallEval, OpConstruct };
     
    320352        unsigned getDeTaggedConstantImmediate(JSValue* imm);
    321353
     354        void CTI::emitJumpSlowCaseIfNotJSCell(X86Assembler::RegisterID reg, unsigned opcodeIndex);
    322355        void emitJumpSlowCaseIfNotImm(X86Assembler::RegisterID, unsigned opcodeIndex);
    323356        void emitJumpSlowCaseIfNotImms(X86Assembler::RegisterID, X86Assembler::RegisterID, unsigned opcodeIndex);
     
    332365        void emitDebugExceptionCheck();
    333366
    334         void emitCall(unsigned opcodeIndex, CTIHelper_j);
    335         void emitCall(unsigned opcodeIndex, CTIHelper_p);
    336         void emitCall(unsigned opcodeIndex, CTIHelper_b);
    337         void emitCall(unsigned opcodeIndex, CTIHelper_v);
    338         void emitCall(unsigned opcodeIndex, CTIHelper_s);
     367        X86Assembler::JmpSrc emitCall(unsigned opcodeIndex, CTIHelper_j);
     368        X86Assembler::JmpSrc emitCall(unsigned opcodeIndex, CTIHelper_p);
     369        X86Assembler::JmpSrc emitCall(unsigned opcodeIndex, CTIHelper_b);
     370        X86Assembler::JmpSrc emitCall(unsigned opcodeIndex, CTIHelper_v);
     371        X86Assembler::JmpSrc emitCall(unsigned opcodeIndex, CTIHelper_s);
    339372       
    340373        void emitGetVariableObjectRegister(X86Assembler::RegisterID variableObject, int index, X86Assembler::RegisterID dst);
     
    353386        Vector<CallRecord> m_calls;
    354387        Vector<X86Assembler::JmpDst> m_labels;
     388        Vector<StructureStubCompilationInfo> m_structureStubCompilationInfo;
    355389        Vector<JmpTable> m_jmpTable;
    356390
  • trunk/JavaScriptCore/VM/CodeBlock.cpp

    r36417 r36418  
    278278        size_t i = 0;
    279279        do {
    280              printStructureIDs(&instructions[structureIDInstructions[i]]);
     280             printStructureIDs(&instructions[structureIDInstructions[i].opcodeIndex]);
    281281             ++i;
    282282        } while (i < structureIDInstructions.size());
     
    876876{
    877877    size_t size = structureIDInstructions.size();
    878     for (size_t i = 0; i < size; ++i)
    879         derefStructureIDs(&instructions[structureIDInstructions[i]]);
    880 
    881     size = structureIDAccessStubs.size();
    882     for (size_t i = 0; i < size; ++i)
    883         fastFree(structureIDAccessStubs[i]);
    884 
     878    for (size_t i = 0; i < size; ++i) {
     879        derefStructureIDs(&instructions[structureIDInstructions[i].opcodeIndex]);
     880        if (structureIDInstructions[i].stubRoutine)
     881            fastFree(structureIDInstructions[i].stubRoutine);
     882    }
    885883#if ENABLE(CTI)
    886884    if (ctiCode)
  • trunk/JavaScriptCore/VM/CodeBlock.h

    r36267 r36418  
    7777    };
    7878
     79    struct StructureStubInfo {
     80        StructureStubInfo(unsigned opcodeIndex)
     81            : opcodeIndex(opcodeIndex)
     82            , stubRoutine(0)
     83            , callReturnLocation(0)
     84            , hotPathBegin(0)
     85        {
     86        }
     87   
     88        unsigned opcodeIndex;
     89        void* stubRoutine;
     90        void* callReturnLocation;
     91        void* hotPathBegin;
     92    };
     93
    7994    struct StringJumpTable {
    8095        typedef HashMap<RefPtr<UString::Rep>, OffsetLocation> StringOffsetTable;
     
    200215        void derefStructureIDs(Instruction* vPC) const;
    201216
     217        StructureStubInfo& getStubInfo(void* returnAddress)
     218        {
     219            // FIXME: would a binary chop be faster here?
     220            for (unsigned i = 0; i < structureIDInstructions.size(); ++i) {
     221                if (structureIDInstructions[i].callReturnLocation == returnAddress)
     222                    return structureIDInstructions[i];
     223            }
     224           
     225            ASSERT_NOT_REACHED();
     226            // keep the compiler happy.
     227            static StructureStubInfo duff(0);
     228            return duff;
     229        }
     230
    202231        ScopeNode* ownerNode;
    203232        JSGlobalData* globalData;
     
    219248
    220249        Vector<Instruction> instructions;
    221         Vector<size_t> structureIDInstructions;
    222         Vector<void*> structureIDAccessStubs;
     250        Vector<StructureStubInfo> structureIDInstructions;
    223251
    224252        // Constant pool
  • trunk/JavaScriptCore/VM/Machine.cpp

    r36417 r36418  
    38133813        vPC[7] = slot.cachedOffset();
    38143814        codeBlock->refStructureIDs(vPC);
    3815         ctiRepatchCallByReturnAddress(returnAddress, CTI::compilePutByIdTransition(this, exec, codeBlock, structureID->previousID(), structureID, slot.cachedOffset(), chain));
     3815        CTI::compilePutByIdTransition(this, exec, codeBlock, structureID->previousID(), structureID, slot.cachedOffset(), chain, returnAddress);
    38163816        return;
    38173817    }
     
    38223822    codeBlock->refStructureIDs(vPC);
    38233823
    3824     ctiRepatchCallByReturnAddress(returnAddress, CTI::compilePutByIdReplace(this, exec, codeBlock, structureID, slot.cachedOffset()));
     3824#if USE(CTI_REPATCH_PIC)
     3825    UNUSED_PARAM(exec);
     3826    CTI::patchPutByIdReplace(codeBlock, structureID, slot.cachedOffset(), returnAddress);
     3827#else
     3828    CTI::compilePutByIdReplace(this, exec, codeBlock, structureID, slot.cachedOffset(), returnAddress);
     3829#endif
    38253830}
    38263831
     
    38463851
    38473852    if (isJSArray(baseValue) && propertyName == exec->propertyNames().length) {
     3853#if USE(CTI_REPATCH_PIC)
     3854        CTI::compilePatchGetArrayLength(this, exec, codeBlock, returnAddress);
     3855#else
    38483856        ctiRepatchCallByReturnAddress(returnAddress, getCTIArrayLengthTrampoline(exec, codeBlock));
     3857#endif
    38493858        return;
    38503859    }
    38513860    if (isJSString(baseValue) && propertyName == exec->propertyNames().length) {
     3861        // The tradeoff of compiling an repatched inline string length access routine does not seem
     3862        // to pay off, so we currently only do this for arrays.
    38523863        ctiRepatchCallByReturnAddress(returnAddress, getCTIStringLengthTrampoline(exec, codeBlock));
    38533864        return;
     
    38893900        codeBlock->refStructureIDs(vPC);
    38903901       
    3891         ctiRepatchCallByReturnAddress(returnAddress, CTI::compileGetByIdSelf(this, exec, codeBlock, structureID, slot.cachedOffset()));
     3902#if USE(CTI_REPATCH_PIC)
     3903        CTI::patchGetByIdSelf(codeBlock, structureID, slot.cachedOffset(), returnAddress);
     3904#else
     3905        CTI::compileGetByIdSelf(this, exec, codeBlock, structureID, slot.cachedOffset(), returnAddress);
     3906#endif
    38923907        return;
    38933908    }
     
    39123927        codeBlock->refStructureIDs(vPC);
    39133928
    3914         ctiRepatchCallByReturnAddress(returnAddress, CTI::compileGetByIdProto(this, exec, codeBlock, structureID, slotBaseObject->structureID(), slot.cachedOffset()));
     3929        CTI::compileGetByIdProto(this, exec, codeBlock, structureID, slotBaseObject->structureID(), slot.cachedOffset(), returnAddress);
    39153930        return;
    39163931    }
     
    39543969    codeBlock->refStructureIDs(vPC);
    39553970
    3956     ctiRepatchCallByReturnAddress(returnAddress, CTI::compileGetByIdChain(this, exec, codeBlock, structureID, chain, count, slot.cachedOffset()));
     3971    CTI::compileGetByIdChain(this, exec, codeBlock, structureID, chain, count, slot.cachedOffset(), returnAddress);
    39573972}
    39583973
     
    46414656}
    46424657
     4658void Machine::cti_op_put_by_val_array(CTI_ARGS)
     4659{
     4660    ExecState* exec = ARG_exec;
     4661
     4662    JSValue* baseValue = ARG_src1;
     4663    int i = ARG_int2;
     4664    JSValue* value = ARG_src3;
     4665
     4666    ASSERT(exec->machine()->isJSArray(baseValue));
     4667
     4668    if (LIKELY(i >= 0))
     4669        static_cast<JSArray*>(baseValue)->JSArray::put(exec, i, value);
     4670    else {
     4671        Identifier property(exec, JSImmediate::from(i)->toString(exec));
     4672        // FIXME: can toString throw an exception here?
     4673        if (!exec->hadException()) { // Don't put to an object if toString threw an exception.
     4674            PutPropertySlot slot;
     4675            baseValue->put(exec, property, value, slot);
     4676        }
     4677    }
     4678
     4679    VM_CHECK_EXCEPTION_AT_END();
     4680}
     4681
    46434682JSValue* Machine::cti_op_lesseq(CTI_ARGS)
    46444683{
  • trunk/JavaScriptCore/VM/Machine.h

    r36412 r36418  
    170170        static JSValue* SFX_CALL cti_op_sub(CTI_ARGS);
    171171        static void SFX_CALL cti_op_put_by_val(CTI_ARGS);
     172        static void SFX_CALL cti_op_put_by_val_array(CTI_ARGS);
    172173        static JSValue* SFX_CALL cti_op_lesseq(CTI_ARGS);
    173174        static int SFX_CALL cti_op_loop_if_true(CTI_ARGS);
  • trunk/JavaScriptCore/masm/X86Assembler.h

    r36401 r36418  
    210210        OP2_JNE_rel32   = 0x85,
    211211        OP2_JBE_rel32   = 0x86,
     212        OP2_JA_rel32    = 0x87,
    212213        OP2_JL_rel32    = 0x8C,
    213214        OP2_JGE_rel32   = 0x8D,
     
    370371        emitModRm_opm(GROUP1_OP_CMP, addr);
    371372        m_buffer->putInt(imm);
     373    }
     374
     375    void cmpl_i8m(int imm, int offset, RegisterID base, RegisterID index, int scale)
     376    {
     377        m_buffer->putByte(OP_GROUP1_EvIb);
     378        emitModRm_opmsib(GROUP1_OP_CMP, base, index, scale, offset);
     379        m_buffer->putByte(imm);
    372380    }
    373381
     
    742750    }
    743751   
     752    JmpSrc emitUnlinkedJa()
     753    {
     754        m_buffer->putByte(OP_2BYTE_ESCAPE);
     755        m_buffer->putByte(OP2_JA_rel32);
     756        m_buffer->putInt(0);
     757        return JmpSrc(m_buffer->getOffset());
     758    }
     759   
    744760    JmpSrc emitUnlinkedJae()
    745761    {
     
    786802    }
    787803   
    788     void* getRelocatedAddress(void* code, JmpSrc jump)
     804    static void* getRelocatedAddress(void* code, JmpSrc jump)
    789805    {
    790806        return reinterpret_cast<void*>((ptrdiff_t)code + jump.m_offset);
    791807    }
    792808   
    793     void* getRelocatedAddress(void* code, JmpDst jump)
     809    static void* getRelocatedAddress(void* code, JmpDst jump)
    794810    {
    795811        return reinterpret_cast<void*>((ptrdiff_t)code + jump.m_offset);
     812    }
     813   
     814    static int getDifferenceBetweenLabels(JmpDst src, JmpDst dst)
     815    {
     816        return dst.m_offset - src.m_offset;
     817    }
     818   
     819    static int getDifferenceBetweenLabels(JmpDst src, JmpSrc dst)
     820    {
     821        return dst.m_offset - src.m_offset;
     822    }
     823   
     824    static void repatchImmediate(intptr_t where, int32_t value)
     825    {
     826        reinterpret_cast<int32_t*>(where)[-1] = value;
     827    }
     828   
     829    static void repatchDisplacement(intptr_t where, intptr_t value)
     830    {
     831        reinterpret_cast<intptr_t*>(where)[-1] = value;
     832    }
     833   
     834    static void repatchBranchOffset(intptr_t where, void* destination)
     835    {
     836        reinterpret_cast<intptr_t*>(where)[-1] = (reinterpret_cast<intptr_t>(destination) - where);
    796837    }
    797838   
     
    932973    }
    933974
     975    void emitModRm_opmsib(OpcodeID opcode, RegisterID base, RegisterID index, int scale, int offset)
     976    {
     977        emitModRm_rmsib(static_cast<RegisterID>(opcode), base, index, scale, offset);
     978    }
     979
    934980    JITCodeBuffer* m_buffer;
    935981};
Note: See TracChangeset for help on using the changeset viewer.