Ignore:
Timestamp:
Jul 30, 2009, 1:57:44 PM (16 years ago)
Author:
[email protected]
Message:

Merged nitro-extreme branch into trunk.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/JavaScriptCore/jit/JITPropertyAccess.cpp

    r46438 r46598  
    11/*
    2  * Copyright (C) 2008 Apple Inc. All rights reserved.
     2 * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
    33 *
    44 * Redistribution and use in source and binary forms, with or without
     
    4848namespace JSC {
    4949
     50#if USE(JSVALUE32_64)
     51
     52void JIT::emit_op_put_by_index(Instruction* currentInstruction)
     53{
     54    unsigned base = currentInstruction[1].u.operand;
     55    unsigned property = currentInstruction[2].u.operand;
     56    unsigned value = currentInstruction[3].u.operand;
     57
     58    JITStubCall stubCall(this, cti_op_put_by_index);
     59    stubCall.addArgument(base);
     60    stubCall.addArgument(Imm32(property));
     61    stubCall.addArgument(value);
     62    stubCall.call();
     63}
     64
     65void JIT::emit_op_put_getter(Instruction* currentInstruction)
     66{
     67    unsigned base = currentInstruction[1].u.operand;
     68    unsigned property = currentInstruction[2].u.operand;
     69    unsigned function = currentInstruction[3].u.operand;
     70
     71    JITStubCall stubCall(this, cti_op_put_getter);
     72    stubCall.addArgument(base);
     73    stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property)));
     74    stubCall.addArgument(function);
     75    stubCall.call();
     76}
     77
     78void JIT::emit_op_put_setter(Instruction* currentInstruction)
     79{
     80    unsigned base = currentInstruction[1].u.operand;
     81    unsigned property = currentInstruction[2].u.operand;
     82    unsigned function = currentInstruction[3].u.operand;
     83
     84    JITStubCall stubCall(this, cti_op_put_setter);
     85    stubCall.addArgument(base);
     86    stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property)));
     87    stubCall.addArgument(function);
     88    stubCall.call();
     89}
     90
     91void JIT::emit_op_del_by_id(Instruction* currentInstruction)
     92{
     93    unsigned dst = currentInstruction[1].u.operand;
     94    unsigned base = currentInstruction[2].u.operand;
     95    unsigned property = currentInstruction[3].u.operand;
     96
     97    JITStubCall stubCall(this, cti_op_del_by_id);
     98    stubCall.addArgument(base);
     99    stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property)));
     100    stubCall.call(dst);
     101}
     102
     103
     104#if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
     105
     106/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
     107
     108// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
     109void JIT::emit_op_method_check(Instruction*) {}
     110void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
     111#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
     112#error "JIT_OPTIMIZE_METHOD_CALLS requires JIT_OPTIMIZE_PROPERTY_ACCESS"
     113#endif
     114
     115void JIT::emit_op_get_by_val(Instruction* currentInstruction)
     116{
     117    unsigned dst = currentInstruction[1].u.operand;
     118    unsigned base = currentInstruction[2].u.operand;
     119    unsigned property = currentInstruction[3].u.operand;
     120
     121    JITStubCall stubCall(this, cti_op_get_by_val);
     122    stubCall.addArgument(base);
     123    stubCall.addArgument(property);
     124    stubCall.call(dst);
     125}
     126
     127void JIT::emitSlow_op_get_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&)
     128{
     129    ASSERT_NOT_REACHED();
     130}
     131
     132void JIT::emit_op_put_by_val(Instruction* currentInstruction)
     133{
     134    unsigned base = currentInstruction[1].u.operand;
     135    unsigned property = currentInstruction[2].u.operand;
     136    unsigned value = currentInstruction[3].u.operand;
     137
     138    JITStubCall stubCall(this, cti_op_put_by_val);
     139    stubCall.addArgument(base);
     140    stubCall.addArgument(property);
     141    stubCall.addArgument(value);
     142    stubCall.call();
     143}
     144
     145void JIT::emitSlow_op_put_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&)
     146{
     147    ASSERT_NOT_REACHED();
     148}
     149
     150void JIT::emit_op_get_by_id(Instruction* currentInstruction)
     151{
     152    int dst = currentInstruction[1].u.operand;
     153    int base = currentInstruction[2].u.operand;
     154    int ident = currentInstruction[3].u.operand;
     155
     156    JITStubCall stubCall(this, cti_op_get_by_id_generic);
     157    stubCall.addArgument(base);
     158    stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident))));
     159    stubCall.call(dst);
     160
     161    m_propertyAccessInstructionIndex++;
     162}
     163
     164void JIT::emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
     165{
     166    m_propertyAccessInstructionIndex++;
     167    ASSERT_NOT_REACHED();
     168}
     169
     170void JIT::emit_op_put_by_id(Instruction* currentInstruction)
     171{
     172    int base = currentInstruction[1].u.operand;
     173    int ident = currentInstruction[2].u.operand;
     174    int value = currentInstruction[3].u.operand;
     175
     176    JITStubCall stubCall(this, cti_op_put_by_id_generic);
     177    stubCall.addArgument(base);
     178    stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident))));
     179    stubCall.addArgument(value);
     180    stubCall.call();
     181
     182    m_propertyAccessInstructionIndex++;
     183}
     184
     185void JIT::emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
     186{
     187    m_propertyAccessInstructionIndex++;
     188    ASSERT_NOT_REACHED();
     189}
     190
     191#else // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
     192
     193/* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
     194
     195#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
     196
     197void JIT::emit_op_method_check(Instruction* currentInstruction)
     198{
     199    // Assert that the following instruction is a get_by_id.
     200    ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id);
     201
     202    currentInstruction += OPCODE_LENGTH(op_method_check);
     203
     204    // Do the method check - check the object & its prototype's structure inline (this is the common case).
     205    m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_propertyAccessInstructionIndex));
     206    MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last();
     207
     208    int dst = currentInstruction[1].u.operand;
     209    int base = currentInstruction[2].u.operand;
     210
     211    emitLoad(base, regT1, regT0);
     212    emitJumpSlowCaseIfNotJSCell(base, regT1);
     213
     214    Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), info.structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
     215    DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(ImmPtr(0), regT2);
     216    Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), protoStructureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
     217
     218    // This will be relinked to load the function without doing a load.
     219    DataLabelPtr putFunction = moveWithPatch(ImmPtr(0), regT0);
     220    move(Imm32(JSValue::CellTag), regT1);
     221    Jump match = jump();
     222
     223    ASSERT(differenceBetween(info.structureToCompare, protoObj) == patchOffsetMethodCheckProtoObj);
     224    ASSERT(differenceBetween(info.structureToCompare, protoStructureToCompare) == patchOffsetMethodCheckProtoStruct);
     225    ASSERT(differenceBetween(info.structureToCompare, putFunction) == patchOffsetMethodCheckPutFunction);
     226
     227    // Link the failure cases here.
     228    structureCheck.link(this);
     229    protoStructureCheck.link(this);
     230
     231    // Do a regular(ish) get_by_id (the slow case will be link to
     232    // cti_op_get_by_id_method_check instead of cti_op_get_by_id.
     233    compileGetByIdHotPath();
     234
     235    match.link(this);
     236    emitStore(dst, regT1, regT0);
     237    map(m_bytecodeIndex + OPCODE_LENGTH(op_method_check), dst, regT1, regT0);
     238
     239    // We've already generated the following get_by_id, so make sure it's skipped over.
     240    m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id);
     241}
     242
     243void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
     244{
     245    currentInstruction += OPCODE_LENGTH(op_method_check);
     246
     247    int dst = currentInstruction[1].u.operand;
     248    int base = currentInstruction[2].u.operand;
     249    int ident = currentInstruction[3].u.operand;
     250
     251    compileGetByIdSlowCase(dst, base, &(m_codeBlock->identifier(ident)), iter, true);
     252
     253    // We've already generated the following get_by_id, so make sure it's skipped over.
     254    m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id);
     255}
     256
     257#else //!ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
     258
     259// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
     260void JIT::emit_op_method_check(Instruction*) {}
     261void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
     262
     263#endif
     264
     265void JIT::emit_op_get_by_val(Instruction* currentInstruction)
     266{
     267    unsigned dst = currentInstruction[1].u.operand;
     268    unsigned base = currentInstruction[2].u.operand;
     269    unsigned property = currentInstruction[3].u.operand;
     270   
     271    emitLoad2(base, regT1, regT0, property, regT3, regT2);
     272
     273    addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
     274    emitJumpSlowCaseIfNotJSCell(base, regT1);
     275    addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
     276    addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, OBJECT_OFFSETOF(JSArray, m_fastAccessCutoff))));
     277
     278    loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT0);
     279    load32(BaseIndex(regT0, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4), regT1); // tag
     280    load32(BaseIndex(regT0, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0); // payload
     281    emitStore(dst, regT1, regT0);
     282    map(m_bytecodeIndex + OPCODE_LENGTH(op_get_by_val), dst, regT1, regT0);
     283}
     284
     285void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
     286{
     287    unsigned dst = currentInstruction[1].u.operand;
     288    unsigned base = currentInstruction[2].u.operand;
     289    unsigned property = currentInstruction[3].u.operand;
     290
     291    // The slow void JIT::emitSlow_that handles accesses to arrays (below) may jump back up to here.
     292    Label callGetByValJITStub(this);
     293
     294    linkSlowCase(iter); // property int32 check
     295    linkSlowCaseIfNotJSCell(iter, base); // base cell check
     296    linkSlowCase(iter); // base array check
     297
     298    JITStubCall stubCall(this, cti_op_get_by_val);
     299    stubCall.addArgument(base);
     300    stubCall.addArgument(property);
     301    stubCall.call(dst);
     302
     303    emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
     304
     305    linkSlowCase(iter); // array fast cut-off check
     306
     307    loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT0);
     308    branch32(AboveOrEqual, regT2, Address(regT0, OBJECT_OFFSETOF(ArrayStorage, m_vectorLength)), callGetByValJITStub);
     309
     310    // Missed the fast region, but it is still in the vector.
     311    load32(BaseIndex(regT0, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4), regT1); // tag
     312    load32(BaseIndex(regT0, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0); // payload
     313
     314    // FIXME: Maybe we can optimize this comparison to JSValue().
     315    Jump skip = branch32(NotEqual, regT0, Imm32(0));
     316    branch32(Equal, regT1, Imm32(JSValue::CellTag), callGetByValJITStub);
     317
     318    skip.link(this);
     319    emitStore(dst, regT1, regT0);
     320}
     321
     322void JIT::emit_op_put_by_val(Instruction* currentInstruction)
     323{
     324    unsigned base = currentInstruction[1].u.operand;
     325    unsigned property = currentInstruction[2].u.operand;
     326    unsigned value = currentInstruction[3].u.operand;
     327
     328    emitLoad2(base, regT1, regT0, property, regT3, regT2);
     329
     330    addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
     331    emitJumpSlowCaseIfNotJSCell(base, regT1);
     332    addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
     333    loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT3);
     334
     335    Jump inFastVector = branch32(Below, regT2, Address(regT0, OBJECT_OFFSETOF(JSArray, m_fastAccessCutoff)));
     336
     337    // Check if the access is within the vector.
     338    addSlowCase(branch32(AboveOrEqual, regT2, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_vectorLength))));
     339
     340    // This is a write to the slow part of the vector; first, we have to check if this would be the first write to this location.
     341    // FIXME: should be able to handle initial write to array; increment the the number of items in the array, and potentially update fast access cutoff.
     342    Jump skip = branch32(NotEqual, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4), Imm32(JSValue::CellTag));
     343    addSlowCase(branch32(Equal, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), Imm32(0)));
     344    skip.link(this);
     345
     346    inFastVector.link(this);
     347
     348    emitLoad(value, regT1, regT0);
     349    store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); // payload
     350    store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4)); // tag
     351}
     352
     353void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
     354{
     355    unsigned base = currentInstruction[1].u.operand;
     356    unsigned property = currentInstruction[2].u.operand;
     357    unsigned value = currentInstruction[3].u.operand;
     358
     359    linkSlowCase(iter); // property int32 check
     360    linkSlowCaseIfNotJSCell(iter, base); // base cell check
     361    linkSlowCase(iter); // base not array check
     362
     363    JITStubCall stubPutByValCall(this, cti_op_put_by_val);
     364    stubPutByValCall.addArgument(base);
     365    stubPutByValCall.addArgument(property);
     366    stubPutByValCall.addArgument(value);
     367    stubPutByValCall.call();
     368
     369    emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
     370
     371    // Slow cases for immediate int accesses to arrays.
     372    linkSlowCase(iter); // in vector check
     373    linkSlowCase(iter); // written to slot check
     374
     375    JITStubCall stubCall(this, cti_op_put_by_val_array);
     376    stubCall.addArgument(regT1, regT0);
     377    stubCall.addArgument(regT2);
     378    stubCall.addArgument(value);
     379    stubCall.call();
     380}
     381
     382void JIT::emit_op_get_by_id(Instruction* currentInstruction)
     383{
     384    int dst = currentInstruction[1].u.operand;
     385    int base = currentInstruction[2].u.operand;
     386   
     387    emitLoad(base, regT1, regT0);
     388    emitJumpSlowCaseIfNotJSCell(base, regT1);
     389    compileGetByIdHotPath();
     390    emitStore(dst, regT1, regT0);
     391    map(m_bytecodeIndex + OPCODE_LENGTH(op_get_by_id), dst, regT1, regT0);
     392}
     393
     394void JIT::compileGetByIdHotPath()
     395{
     396    // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
     397    // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
     398    // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
     399    // to jump back to if one of these trampolies finds a match.
     400    Label hotPathBegin(this);
     401    m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
     402    m_propertyAccessInstructionIndex++;
     403
     404    DataLabelPtr structureToCompare;
     405    Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
     406    addSlowCase(structureCheck);
     407    ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetGetByIdStructure);
     408    ASSERT(differenceBetween(hotPathBegin, structureCheck) == patchOffsetGetByIdBranchToSlowCase);
     409
     410    Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT2);
     411    Label externalLoadComplete(this);
     412    ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetGetByIdExternalLoad);
     413    ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthGetByIdExternalLoad);
     414
     415    DataLabel32 displacementLabel1 = loadPtrWithAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT0); // payload
     416    ASSERT(differenceBetween(hotPathBegin, displacementLabel1) == patchOffsetGetByIdPropertyMapOffset1);
     417    DataLabel32 displacementLabel2 = loadPtrWithAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT1); // tag
     418    ASSERT(differenceBetween(hotPathBegin, displacementLabel2) == patchOffsetGetByIdPropertyMapOffset2);
     419
     420    Label putResult(this);
     421    ASSERT(differenceBetween(hotPathBegin, putResult) == patchOffsetGetByIdPutResult);
     422}
     423
     424void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
     425{
     426    int dst = currentInstruction[1].u.operand;
     427    int base = currentInstruction[2].u.operand;
     428    int ident = currentInstruction[3].u.operand;
     429
     430    compileGetByIdSlowCase(dst, base, &(m_codeBlock->identifier(ident)), iter);
     431}
     432
     433void JIT::compileGetByIdSlowCase(int dst, int base, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck)
     434{
     435    // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
     436    // so that we only need track one pointer into the slow case code - we track a pointer to the location
     437    // of the call (which we can use to look up the patch information), but should a array-length or
     438    // prototype access trampoline fail we want to bail out back to here.  To do so we can subtract back
     439    // the distance from the call to the head of the slow case.
     440    linkSlowCaseIfNotJSCell(iter, base);
     441    linkSlowCase(iter);
     442
     443    Label coldPathBegin(this);
     444
     445    JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id);
     446    stubCall.addArgument(regT1, regT0);
     447    stubCall.addArgument(ImmPtr(ident));
     448    Call call = stubCall.call(dst);
     449
     450    ASSERT(differenceBetween(coldPathBegin, call) == patchOffsetGetByIdSlowCaseCall);
     451
     452    // Track the location of the call; this will be used to recover patch information.
     453    m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
     454    m_propertyAccessInstructionIndex++;
     455}
     456
     457void JIT::emit_op_put_by_id(Instruction* currentInstruction)
     458{
     459    // In order to be able to patch both the Structure, and the object offset, we store one pointer,
     460    // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
     461    // such that the Structure & offset are always at the same distance from this.
     462
     463    int base = currentInstruction[1].u.operand;
     464    int value = currentInstruction[3].u.operand;
     465
     466    emitLoad2(base, regT1, regT0, value, regT3, regT2);
     467
     468    emitJumpSlowCaseIfNotJSCell(base, regT1);
     469
     470    Label hotPathBegin(this);
     471    m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
     472    m_propertyAccessInstructionIndex++;
     473
     474    // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
     475    DataLabelPtr structureToCompare;
     476    addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
     477    ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetPutByIdStructure);
     478
     479    // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
     480    Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT0);
     481    Label externalLoadComplete(this);
     482    ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetPutByIdExternalLoad);
     483    ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthPutByIdExternalLoad);
     484
     485    DataLabel32 displacementLabel1 = storePtrWithAddressOffsetPatch(regT2, Address(regT0, patchGetByIdDefaultOffset)); // payload
     486    DataLabel32 displacementLabel2 = storePtrWithAddressOffsetPatch(regT3, Address(regT0, patchGetByIdDefaultOffset)); // tag
     487    ASSERT(differenceBetween(hotPathBegin, displacementLabel1) == patchOffsetPutByIdPropertyMapOffset1);
     488    ASSERT(differenceBetween(hotPathBegin, displacementLabel2) == patchOffsetPutByIdPropertyMapOffset2);
     489}
     490
     491void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
     492{
     493    int base = currentInstruction[1].u.operand;
     494    int ident = currentInstruction[2].u.operand;
     495
     496    linkSlowCaseIfNotJSCell(iter, base);
     497    linkSlowCase(iter);
     498
     499    JITStubCall stubCall(this, cti_op_put_by_id);
     500    stubCall.addArgument(regT1, regT0);
     501    stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident))));
     502    stubCall.addArgument(regT3, regT2);
     503    Call call = stubCall.call();
     504
     505    // Track the location of the call; this will be used to recover patch information.
     506    m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
     507    m_propertyAccessInstructionIndex++;
     508}
     509
     510// Compile a store into an object's property storage.  May overwrite base.
     511void JIT::compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, Structure* structure, size_t cachedOffset)
     512{
     513    int offset = cachedOffset;
     514    if (structure->isUsingInlineStorage())
     515        offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage) /  sizeof(Register);
     516    else
     517        loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
     518    emitStore(offset, valueTag, valuePayload, base);
     519}
     520
     521// Compile a load from an object's property storage.  May overwrite base.
     522void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, Structure* structure, size_t cachedOffset)
     523{
     524    int offset = cachedOffset;
     525    if (structure->isUsingInlineStorage())
     526        offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage) / sizeof(Register);
     527    else
     528        loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
     529    emitLoad(offset, resultTag, resultPayload, base);
     530}
     531
     532void JIT::compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID resultTag, RegisterID resultPayload, size_t cachedOffset)
     533{
     534    if (base->isUsingInlineStorage()) {
     535        load32(reinterpret_cast<char*>(&base->m_inlineStorage[cachedOffset]), resultPayload);
     536        load32(reinterpret_cast<char*>(&base->m_inlineStorage[cachedOffset]) + 4, resultTag);
     537        return;
     538    }
     539
     540    size_t offset = cachedOffset * sizeof(JSValue);
     541
     542    PropertyStorage* protoPropertyStorage = &base->m_externalStorage;
     543    loadPtr(static_cast<void*>(protoPropertyStorage), temp);
     544    load32(Address(temp, offset), resultPayload);
     545    load32(Address(temp, offset + 4), resultTag);
     546}
     547
     548void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress)
     549{
     550    // It is assumed that regT0 contains the basePayload and regT1 contains the baseTag.  The value can be found on the stack.
     551
     552    JumpList failureCases;
     553    failureCases.append(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
     554
     555    loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
     556    failureCases.append(branchPtr(NotEqual, regT2, ImmPtr(oldStructure)));
     557
     558    // Verify that nothing in the prototype chain has a setter for this property.
     559    for (RefPtr<Structure>* it = chain->head(); *it; ++it) {
     560        loadPtr(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype)), regT2);
     561        loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
     562        failureCases.append(branchPtr(NotEqual, regT2, ImmPtr(it->get())));
     563    }
     564
     565    // Reallocate property storage if needed.
     566    Call callTarget;
     567    bool willNeedStorageRealloc = oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity();
     568    if (willNeedStorageRealloc) {
     569        // This trampoline was called to like a JIT stub; before we can can call again we need to
     570        // remove the return address from the stack, to prevent the stack from becoming misaligned.
     571        preserveReturnAddressAfterCall(regT3);
     572 
     573        JITStubCall stubCall(this, cti_op_put_by_id_transition_realloc);
     574        stubCall.skipArgument(); // base
     575        stubCall.skipArgument(); // ident
     576        stubCall.skipArgument(); // value
     577        stubCall.addArgument(Imm32(oldStructure->propertyStorageCapacity()));
     578        stubCall.addArgument(Imm32(newStructure->propertyStorageCapacity()));
     579        stubCall.call(regT0);
     580
     581        restoreReturnAddressBeforeReturn(regT3);
     582    }
     583
     584    sub32(Imm32(1), AbsoluteAddress(oldStructure->addressOfCount()));
     585    add32(Imm32(1), AbsoluteAddress(newStructure->addressOfCount()));
     586    storePtr(ImmPtr(newStructure), Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)));
     587 
     588    load32(Address(stackPointerRegister, offsetof(struct JITStackFrame, args[2]) + sizeof(void*)), regT3);
     589    load32(Address(stackPointerRegister, offsetof(struct JITStackFrame, args[2]) + sizeof(void*) + 4), regT2);
     590
     591    // Write the value
     592    compilePutDirectOffset(regT0, regT2, regT3, newStructure, cachedOffset);
     593
     594    ret();
     595   
     596    ASSERT(!failureCases.empty());
     597    failureCases.link(this);
     598    restoreArgumentReferenceForTrampoline();
     599    Call failureCall = tailRecursiveCall();
     600
     601    LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
     602
     603    patchBuffer.link(failureCall, FunctionPtr(cti_op_put_by_id_fail));
     604
     605    if (willNeedStorageRealloc) {
     606        ASSERT(m_calls.size() == 1);
     607        patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc));
     608    }
     609   
     610    CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
     611    stubInfo->stubRoutine = entryLabel;
     612    RepatchBuffer repatchBuffer(m_codeBlock);
     613    repatchBuffer.relinkCallerToTrampoline(returnAddress, entryLabel);
     614}
     615
     616void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
     617{
     618    RepatchBuffer repatchBuffer(codeBlock);
     619
     620    // We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
     621    // Should probably go to JITStubs::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
     622    repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_self_fail));
     623
     624    int offset = sizeof(JSValue) * cachedOffset;
     625
     626    // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
     627    // and makes the subsequent load's offset automatically correct
     628    if (structure->isUsingInlineStorage())
     629        repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetGetByIdExternalLoad));
     630
     631    // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
     632    repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure), structure);
     633    repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset1), offset); // payload
     634    repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset2), offset + 4); // tag
     635}
     636
     637void JIT::patchMethodCallProto(CodeBlock* codeBlock, MethodCallLinkInfo& methodCallLinkInfo, JSFunction* callee, Structure* structure, JSObject* proto)
     638{
     639    RepatchBuffer repatchBuffer(codeBlock);
     640
     641    ASSERT(!methodCallLinkInfo.cachedStructure);
     642    methodCallLinkInfo.cachedStructure = structure;
     643    structure->ref();
     644
     645    Structure* prototypeStructure = proto->structure();
     646    ASSERT(!methodCallLinkInfo.cachedPrototypeStructure);
     647    methodCallLinkInfo.cachedPrototypeStructure = prototypeStructure;
     648    prototypeStructure->ref();
     649
     650    repatchBuffer.repatch(methodCallLinkInfo.structureLabel, structure);
     651    repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoObj), proto);
     652    repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoStruct), prototypeStructure);
     653    repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckPutFunction), callee);
     654}
     655
     656void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
     657{
     658    RepatchBuffer repatchBuffer(codeBlock);
     659
     660    // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
     661    // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
     662    repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_put_by_id_generic));
     663
     664    int offset = sizeof(JSValue) * cachedOffset;
     665
     666    // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
     667    // and makes the subsequent load's offset automatically correct
     668    if (structure->isUsingInlineStorage())
     669        repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetPutByIdExternalLoad));
     670
     671    // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
     672    repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure), structure);
     673    repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset1), offset); // payload
     674    repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset2), offset + 4); // tag
     675}
     676
     677void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
     678{
     679    StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress);
     680   
     681    // regT0 holds a JSCell*
     682
     683    // Check for array
     684    Jump failureCases1 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr));
     685
     686    // Checks out okay! - get the length from the storage
     687    loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
     688    load32(Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT2);
     689
     690    Jump failureCases2 = branch32(Above, regT2, Imm32(INT_MAX));
     691    move(regT2, regT0);
     692    move(Imm32(JSValue::Int32Tag), regT1);
     693    Jump success = jump();
     694
     695    LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
     696
     697    // Use the patch information to link the failure cases back to the original slow case routine.
     698    CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
     699    patchBuffer.link(failureCases1, slowCaseBegin);
     700    patchBuffer.link(failureCases2, slowCaseBegin);
     701
     702    // On success return back to the hot patch code, at a point it will perform the store to dest for us.
     703    patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
     704
     705    // Track the stub we have created so that it will be deleted later.
     706    CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
     707    stubInfo->stubRoutine = entryLabel;
     708
     709    // Finally patch the jump to slow case back in the hot path to jump here instead.
     710    CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
     711    RepatchBuffer repatchBuffer(m_codeBlock);
     712    repatchBuffer.relink(jumpLocation, entryLabel);
     713
     714    // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
     715    repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail));
     716}
     717
     718void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
     719{
     720    // regT0 holds a JSCell*
     721
     722    // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
     723    // referencing the prototype object - let's speculatively load it's table nice and early!)
     724    JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
     725
     726    Jump failureCases1 = checkStructure(regT0, structure);
     727
     728    // Check the prototype object's Structure had not changed.
     729    Structure** prototypeStructureAddress = &(protoObject->m_structure);
     730#if PLATFORM(X86_64)
     731    move(ImmPtr(prototypeStructure), regT3);
     732    Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
     733#else
     734    Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
     735#endif
     736
     737    // Checks out okay! - getDirectOffset
     738    compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
     739
     740    Jump success = jump();
     741
     742    LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
     743
     744    // Use the patch information to link the failure cases back to the original slow case routine.
     745    CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
     746    patchBuffer.link(failureCases1, slowCaseBegin);
     747    patchBuffer.link(failureCases2, slowCaseBegin);
     748
     749    // On success return back to the hot patch code, at a point it will perform the store to dest for us.
     750    patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
     751
     752    // Track the stub we have created so that it will be deleted later.
     753    CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
     754    stubInfo->stubRoutine = entryLabel;
     755
     756    // Finally patch the jump to slow case back in the hot path to jump here instead.
     757    CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
     758    RepatchBuffer repatchBuffer(m_codeBlock);
     759    repatchBuffer.relink(jumpLocation, entryLabel);
     760
     761    // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
     762    repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
     763}
     764
     765
     766void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, size_t cachedOffset)
     767{
     768    // regT0 holds a JSCell*
     769   
     770    Jump failureCase = checkStructure(regT0, structure);
     771    compileGetDirectOffset(regT0, regT1, regT0, structure, cachedOffset);
     772    Jump success = jump();
     773
     774    LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
     775
     776    // Use the patch information to link the failure cases back to the original slow case routine.
     777    CodeLocationLabel lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine;
     778    if (!lastProtoBegin)
     779        lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
     780
     781    patchBuffer.link(failureCase, lastProtoBegin);
     782
     783    // On success return back to the hot patch code, at a point it will perform the store to dest for us.
     784    patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
     785
     786    CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
     787
     788    structure->ref();
     789    polymorphicStructures->list[currentIndex].set(entryLabel, structure);
     790
     791    // Finally patch the jump to slow case back in the hot path to jump here instead.
     792    CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
     793    RepatchBuffer repatchBuffer(m_codeBlock);
     794    repatchBuffer.relink(jumpLocation, entryLabel);
     795}
     796
     797void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame)
     798{
     799    // regT0 holds a JSCell*
     800   
     801    // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
     802    // referencing the prototype object - let's speculatively load it's table nice and early!)
     803    JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
     804
     805    // Check eax is an object of the right Structure.
     806    Jump failureCases1 = checkStructure(regT0, structure);
     807
     808    // Check the prototype object's Structure had not changed.
     809    Structure** prototypeStructureAddress = &(protoObject->m_structure);
     810#if PLATFORM(X86_64)
     811    move(ImmPtr(prototypeStructure), regT3);
     812    Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
     813#else
     814    Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
     815#endif
     816
     817    compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
     818
     819    Jump success = jump();
     820
     821    LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
     822
     823    // Use the patch information to link the failure cases back to the original slow case routine.
     824    CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
     825    patchBuffer.link(failureCases1, lastProtoBegin);
     826    patchBuffer.link(failureCases2, lastProtoBegin);
     827
     828    // On success return back to the hot patch code, at a point it will perform the store to dest for us.
     829    patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
     830
     831    CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
     832
     833    structure->ref();
     834    prototypeStructure->ref();
     835    prototypeStructures->list[currentIndex].set(entryLabel, structure, prototypeStructure);
     836
     837    // Finally patch the jump to slow case back in the hot path to jump here instead.
     838    CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
     839    RepatchBuffer repatchBuffer(m_codeBlock);
     840    repatchBuffer.relink(jumpLocation, entryLabel);
     841}
     842
     843void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame)
     844{
     845    // regT0 holds a JSCell*
     846   
     847    ASSERT(count);
     848   
     849    JumpList bucketsOfFail;
     850
     851    // Check eax is an object of the right Structure.
     852    bucketsOfFail.append(checkStructure(regT0, structure));
     853
     854    Structure* currStructure = structure;
     855    RefPtr<Structure>* chainEntries = chain->head();
     856    JSObject* protoObject = 0;
     857    for (unsigned i = 0; i < count; ++i) {
     858        protoObject = asObject(currStructure->prototypeForLookup(callFrame));
     859        currStructure = chainEntries[i].get();
     860
     861        // Check the prototype object's Structure had not changed.
     862        Structure** prototypeStructureAddress = &(protoObject->m_structure);
     863#if PLATFORM(X86_64)
     864        move(ImmPtr(currStructure), regT3);
     865        bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
     866#else
     867        bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
     868#endif
     869    }
     870    ASSERT(protoObject);
     871
     872    compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
     873    Jump success = jump();
     874
     875    LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
     876
     877    // Use the patch information to link the failure cases back to the original slow case routine.
     878    CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
     879
     880    patchBuffer.link(bucketsOfFail, lastProtoBegin);
     881
     882    // On success return back to the hot patch code, at a point it will perform the store to dest for us.
     883    patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
     884
     885    CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
     886
     887    // Track the stub we have created so that it will be deleted later.
     888    structure->ref();
     889    chain->ref();
     890    prototypeStructures->list[currentIndex].set(entryLabel, structure, chain);
     891
     892    // Finally patch the jump to slow case back in the hot path to jump here instead.
     893    CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
     894    RepatchBuffer repatchBuffer(m_codeBlock);
     895    repatchBuffer.relink(jumpLocation, entryLabel);
     896}
     897
     898void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
     899{
     900    // regT0 holds a JSCell*
     901   
     902    ASSERT(count);
     903   
     904    JumpList bucketsOfFail;
     905
     906    // Check eax is an object of the right Structure.
     907    bucketsOfFail.append(checkStructure(regT0, structure));
     908
     909    Structure* currStructure = structure;
     910    RefPtr<Structure>* chainEntries = chain->head();
     911    JSObject* protoObject = 0;
     912    for (unsigned i = 0; i < count; ++i) {
     913        protoObject = asObject(currStructure->prototypeForLookup(callFrame));
     914        currStructure = chainEntries[i].get();
     915
     916        // Check the prototype object's Structure had not changed.
     917        Structure** prototypeStructureAddress = &(protoObject->m_structure);
     918#if PLATFORM(X86_64)
     919        move(ImmPtr(currStructure), regT3);
     920        bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
     921#else
     922        bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
     923#endif
     924    }
     925    ASSERT(protoObject);
     926
     927    compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
     928    Jump success = jump();
     929
     930    LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
     931
     932    // Use the patch information to link the failure cases back to the original slow case routine.
     933    patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall));
     934
     935    // On success return back to the hot patch code, at a point it will perform the store to dest for us.
     936    patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
     937
     938    // Track the stub we have created so that it will be deleted later.
     939    CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
     940    stubInfo->stubRoutine = entryLabel;
     941
     942    // Finally patch the jump to slow case back in the hot path to jump here instead.
     943    CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
     944    RepatchBuffer repatchBuffer(m_codeBlock);
     945    repatchBuffer.relink(jumpLocation, entryLabel);
     946
     947    // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
     948    repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
     949}
     950
     951/* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
     952
     953#endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
     954
     955#else // USE(JSVALUE32_64)
     956
    50957void JIT::emit_op_get_by_val(Instruction* currentInstruction)
    51958{
    52959    emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
    53960    emitJumpSlowCaseIfNotImmediateInteger(regT1);
    54 #if USE(ALTERNATE_JSIMMEDIATE)
     961#if USE(JSVALUE64)
    55962    // This is technically incorrect - we're zero-extending an int32.  On the hot path this doesn't matter.
    56963    // We check the value as if it was a uint32 against the m_fastAccessCutoff - which will always fail if
     
    79986    emitGetVirtualRegisters(currentInstruction[1].u.operand, regT0, currentInstruction[2].u.operand, regT1);
    80987    emitJumpSlowCaseIfNotImmediateInteger(regT1);
    81 #if USE(ALTERNATE_JSIMMEDIATE)
     988#if USE(JSVALUE64)
    82989    // See comment in op_get_by_val.
    83990    zeroExtend32ToPtr(regT1, regT1);
     
    1061013void JIT::emit_op_put_by_index(Instruction* currentInstruction)
    1071014{
    108     JITStubCall stubCall(this, JITStubs::cti_op_put_by_index);
     1015    JITStubCall stubCall(this, cti_op_put_by_index);
    1091016    stubCall.addArgument(currentInstruction[1].u.operand, regT2);
    1101017    stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
     
    1151022void JIT::emit_op_put_getter(Instruction* currentInstruction)
    1161023{
    117     JITStubCall stubCall(this, JITStubs::cti_op_put_getter);
     1024    JITStubCall stubCall(this, cti_op_put_getter);
    1181025    stubCall.addArgument(currentInstruction[1].u.operand, regT2);
    1191026    stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
     
    1241031void JIT::emit_op_put_setter(Instruction* currentInstruction)
    1251032{
    126     JITStubCall stubCall(this, JITStubs::cti_op_put_setter);
     1033    JITStubCall stubCall(this, cti_op_put_setter);
    1271034    stubCall.addArgument(currentInstruction[1].u.operand, regT2);
    1281035    stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
     
    1331040void JIT::emit_op_del_by_id(Instruction* currentInstruction)
    1341041{
    135     JITStubCall stubCall(this, JITStubs::cti_op_del_by_id);
     1042    JITStubCall stubCall(this, cti_op_del_by_id);
    1361043    stubCall.addArgument(currentInstruction[2].u.operand, regT2);
    1371044    stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
     
    1581065
    1591066    emitGetVirtualRegister(baseVReg, regT0);
    160     JITStubCall stubCall(this, JITStubs::cti_op_get_by_id_generic);
     1067    JITStubCall stubCall(this, cti_op_get_by_id_generic);
    1611068    stubCall.addArgument(regT0);
    1621069    stubCall.addArgument(ImmPtr(ident));
     
    1791086    emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1);
    1801087
    181     JITStubCall stubCall(this, JITStubs::cti_op_put_by_id_generic);
     1088    JITStubCall stubCall(this, cti_op_put_by_id_generic);
    1821089    stubCall.addArgument(regT0);
    1831090    stubCall.addArgument(ImmPtr(ident));
     
    2501157    Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
    2511158
    252     compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, m_propertyAccessInstructionIndex++, true);
     1159    compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, true);
    2531160
    2541161    // We've already generated the following get_by_id, so make sure it's skipped over.
     
    3111218    Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
    3121219
    313     compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, m_propertyAccessInstructionIndex++, false);
    314 }
    315 
    316 void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, unsigned propertyAccessInstructionIndex, bool isMethodCheck)
     1220    compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, false);
     1221}
     1222
     1223void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck)
    3171224{
    3181225    // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
     
    3281235    Label coldPathBegin(this);
    3291236#endif
    330     JITStubCall stubCall(this, isMethodCheck ? JITStubs::cti_op_get_by_id_method_check : JITStubs::cti_op_get_by_id);
     1237    JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id);
    3311238    stubCall.addArgument(regT0);
    3321239    stubCall.addArgument(ImmPtr(ident));
     
    3361243
    3371244    // Track the location of the call; this will be used to recover patch information.
    338     m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call;
     1245    m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
     1246    m_propertyAccessInstructionIndex++;
    3391247}
    3401248
     
    3831291    linkSlowCase(iter);
    3841292
    385     JITStubCall stubCall(this, JITStubs::cti_op_put_by_id);
     1293    JITStubCall stubCall(this, cti_op_put_by_id);
    3861294    stubCall.addArgument(regT0);
    3871295    stubCall.addArgument(ImmPtr(ident));
     
    4661374        preserveReturnAddressAfterCall(regT3);
    4671375 
    468         JITStubCall stubCall(this, JITStubs::cti_op_put_by_id_transition_realloc);
    469         stubCall.addArgument(regT0);
     1376        JITStubCall stubCall(this, cti_op_put_by_id_transition_realloc);
     1377        stubCall.skipArgument(); // base
     1378        stubCall.skipArgument(); // ident
     1379        stubCall.skipArgument(); // value
    4701380        stubCall.addArgument(Imm32(oldStructure->propertyStorageCapacity()));
    4711381        stubCall.addArgument(Imm32(newStructure->propertyStorageCapacity()));
    472         stubCall.addArgument(regT1); // This argument is not used in the stub; we set it up on the stack so that it can be restored, below.
    4731382        stubCall.call(regT0);
    474         emitGetJITStubArg(4, regT1);
     1383        emitGetJITStubArg(3, regT1);
    4751384
    4761385        restoreReturnAddressBeforeReturn(regT3);
     
    4951404    LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
    4961405
    497     patchBuffer.link(failureCall, FunctionPtr(JITStubs::cti_op_put_by_id_fail));
     1406    patchBuffer.link(failureCall, FunctionPtr(cti_op_put_by_id_fail));
    4981407
    4991408    if (willNeedStorageRealloc) {
    5001409        ASSERT(m_calls.size() == 1);
    501         patchBuffer.link(m_calls[0].from, FunctionPtr(JITStubs::cti_op_put_by_id_transition_realloc));
     1410        patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc));
    5021411    }
    5031412   
     
    5131422
    5141423    // We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
    515     // Should probably go to JITStubs::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
    516     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(JITStubs::cti_op_get_by_id_self_fail));
     1424    // Should probably go to cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
     1425    repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_self_fail));
    5171426
    5181427    int offset = sizeof(JSValue) * cachedOffset;
     
    5521461
    5531462    // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
    554     // Should probably go to JITStubs::cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
    555     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(JITStubs::cti_op_put_by_id_generic));
     1463    // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
     1464    repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_put_by_id_generic));
    5561465
    5571466    int offset = sizeof(JSValue) * cachedOffset;
     
    6031512
    6041513    // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
    605     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(JITStubs::cti_op_get_by_id_array_fail));
     1514    repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail));
    6061515}
    6071516
     
    6491558
    6501559    // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
    651     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(JITStubs::cti_op_get_by_id_proto_list));
     1560    repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
    6521561}
    6531562
     
    8281737
    8291738    // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
    830     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(JITStubs::cti_op_get_by_id_proto_list));
     1739    repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
    8311740}
    8321741
     
    8351744#endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
    8361745
     1746#endif // USE(JSVALUE32_64)
     1747
    8371748} // namespace JSC
    8381749
Note: See TracChangeset for help on using the changeset viewer.