source: webkit/trunk/JavaScriptCore/jit/JITPropertyAccess.cpp@ 43896

Last change on this file since 43896 was 43849, checked in by [email protected], 16 years ago

2009-05-18 Maciej Stachowiak <[email protected]>

Reviewed by Geoff Garen.

  • Improve code generation for access to prototype properties


~0.4% speedup on SunSpider.


Based on a suggestion from Geoff Garen.

  • jit/JIT.h:
  • jit/JITPropertyAccess.cpp: (JSC::JIT::compileGetDirectOffset): (JSC::JIT::privateCompileGetByIdProto): (JSC::JIT::privateCompileGetByIdProtoList): (JSC::JIT::privateCompileGetByIdChainList): (JSC::JIT::privateCompileGetByIdChain):
File size: 34.0 KB
Line 
1/*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "JIT.h"
28
29#if ENABLE(JIT)
30
31#include "CodeBlock.h"
32#include "JITInlineMethods.h"
33#include "JITStubCall.h"
34#include "JSArray.h"
35#include "JSFunction.h"
36#include "Interpreter.h"
37#include "ResultType.h"
38#include "SamplingTool.h"
39
40#ifndef NDEBUG
41#include <stdio.h>
42#endif
43
44using namespace std;
45
46namespace JSC {
47
48void JIT::emit_op_put_by_id(Instruction* currentInstruction)
49{
50 compilePutByIdHotPath(currentInstruction[1].u.operand, &(m_codeBlock->identifier(currentInstruction[2].u.operand)), currentInstruction[3].u.operand, m_propertyAccessInstructionIndex++);
51}
52
53void JIT::emit_op_get_by_id(Instruction* currentInstruction)
54{
55 compileGetByIdHotPath(currentInstruction[1].u.operand, currentInstruction[2].u.operand, &(m_codeBlock->identifier(currentInstruction[3].u.operand)), m_propertyAccessInstructionIndex++);
56}
57
58void JIT::emit_op_get_by_val(Instruction* currentInstruction)
59{
60 emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
61 emitJumpSlowCaseIfNotImmediateInteger(regT1);
62#if USE(ALTERNATE_JSIMMEDIATE)
63 // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter.
64 // We check the value as if it was a uint32 against the m_fastAccessCutoff - which will always fail if
65 // number was signed since m_fastAccessCutoff is always less than intmax (since the total allocation
66 // size is always less than 4Gb). As such zero extending wil have been correct (and extending the value
67 // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign
68 // extending since it makes it easier to re-tag the value in the slow case.
69 zeroExtend32ToPtr(regT1, regT1);
70#else
71 emitFastArithImmToInt(regT1);
72#endif
73 emitJumpSlowCaseIfNotJSCell(regT0);
74 addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
75
76 // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
77 loadPtr(Address(regT0, FIELD_OFFSET(JSArray, m_storage)), regT2);
78 addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, FIELD_OFFSET(JSArray, m_fastAccessCutoff))));
79
80 // Get the value from the vector
81 loadPtr(BaseIndex(regT2, regT1, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0])), regT0);
82 emitPutVirtualRegister(currentInstruction[1].u.operand);
83}
84
85void JIT::emit_op_put_by_val(Instruction* currentInstruction)
86{
87 emitGetVirtualRegisters(currentInstruction[1].u.operand, regT0, currentInstruction[2].u.operand, regT1);
88 emitJumpSlowCaseIfNotImmediateInteger(regT1);
89#if USE(ALTERNATE_JSIMMEDIATE)
90 // See comment in op_get_by_val.
91 zeroExtend32ToPtr(regT1, regT1);
92#else
93 emitFastArithImmToInt(regT1);
94#endif
95 emitJumpSlowCaseIfNotJSCell(regT0);
96 addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
97
98 // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
99 loadPtr(Address(regT0, FIELD_OFFSET(JSArray, m_storage)), regT2);
100 Jump inFastVector = branch32(Below, regT1, Address(regT0, FIELD_OFFSET(JSArray, m_fastAccessCutoff)));
101 // No; oh well, check if the access if within the vector - if so, we may still be okay.
102 addSlowCase(branch32(AboveOrEqual, regT1, Address(regT2, FIELD_OFFSET(ArrayStorage, m_vectorLength))));
103
104 // This is a write to the slow part of the vector; first, we have to check if this would be the first write to this location.
105 // FIXME: should be able to handle initial write to array; increment the the number of items in the array, and potentially update fast access cutoff.
106 addSlowCase(branchTestPtr(Zero, BaseIndex(regT2, regT1, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0]))));
107
108 // All good - put the value into the array.
109 inFastVector.link(this);
110 emitGetVirtualRegister(currentInstruction[3].u.operand, regT0);
111 storePtr(regT0, BaseIndex(regT2, regT1, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0])));
112}
113
114void JIT::emit_op_put_by_index(Instruction* currentInstruction)
115{
116 JITStubCall stubCall(this, JITStubs::cti_op_put_by_index);
117 stubCall.addArgument(currentInstruction[1].u.operand, regT2);
118 stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
119 stubCall.addArgument(currentInstruction[3].u.operand, regT2);
120 stubCall.call();
121}
122
123void JIT::emit_op_put_getter(Instruction* currentInstruction)
124{
125 JITStubCall stubCall(this, JITStubs::cti_op_put_getter);
126 stubCall.addArgument(currentInstruction[1].u.operand, regT2);
127 stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
128 stubCall.addArgument(currentInstruction[3].u.operand, regT2);
129 stubCall.call();
130}
131
132void JIT::emit_op_put_setter(Instruction* currentInstruction)
133{
134 JITStubCall stubCall(this, JITStubs::cti_op_put_setter);
135 stubCall.addArgument(currentInstruction[1].u.operand, regT2);
136 stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
137 stubCall.addArgument(currentInstruction[3].u.operand, regT2);
138 stubCall.call();
139}
140
141void JIT::emit_op_del_by_id(Instruction* currentInstruction)
142{
143 JITStubCall stubCall(this, JITStubs::cti_op_del_by_id);
144 stubCall.addArgument(currentInstruction[2].u.operand, regT2);
145 stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
146 stubCall.call(currentInstruction[1].u.operand);
147}
148
149
150#if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
151
152/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
153
154void JIT::compileGetByIdHotPath(int resultVReg, int baseVReg, Identifier* ident, unsigned)
155{
156 // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
157 // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
158 // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
159 // to jump back to if one of these trampolies finds a match.
160
161 emitGetVirtualRegister(baseVReg, regT0);
162
163 JITStubCall stubCall(this, JITStubs::cti_op_get_by_id_generic);
164 stubCall.addArgument(regT0);
165 stubCall.addArgument(ImmPtr(ident));
166 stubCall.call(resultVReg);
167}
168
169void JIT::compileGetByIdSlowCase(int, int, Identifier*, Vector<SlowCaseEntry>::iterator&, unsigned)
170{
171 ASSERT_NOT_REACHED();
172}
173
174void JIT::compilePutByIdHotPath(int baseVReg, Identifier* ident, int valueVReg, unsigned)
175{
176 // In order to be able to patch both the Structure, and the object offset, we store one pointer,
177 // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
178 // such that the Structure & offset are always at the same distance from this.
179
180 emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1);
181
182 JITStubCall stubCall(this, JITStubs::cti_op_put_by_id_generic);
183 stubCall.addArgument(regT0);
184 stubCall.addArgument(ImmPtr(ident));
185 stubCall.addArgument(regT1);
186 stubCall.call();
187}
188
189void JIT::compilePutByIdSlowCase(int, Identifier*, int, Vector<SlowCaseEntry>::iterator&, unsigned)
190{
191 ASSERT_NOT_REACHED();
192}
193
194#else // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
195
196/* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
197
198void JIT::compileGetByIdHotPath(int resultVReg, int baseVReg, Identifier*, unsigned propertyAccessInstructionIndex)
199{
200 // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
201 // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
202 // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
203 // to jump back to if one of these trampolies finds a match.
204
205 emitGetVirtualRegister(baseVReg, regT0);
206
207 emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
208
209 Label hotPathBegin(this);
210 m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
211
212 DataLabelPtr structureToCompare;
213 Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, FIELD_OFFSET(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
214 addSlowCase(structureCheck);
215 ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetGetByIdStructure);
216 ASSERT(differenceBetween(hotPathBegin, structureCheck) == patchOffsetGetByIdBranchToSlowCase);
217
218 Label externalLoad(this);
219 loadPtr(Address(regT0, FIELD_OFFSET(JSObject, m_externalStorage)), regT0);
220 Label externalLoadComplete(this);
221 ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetGetByIdExternalLoad);
222 ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthGetByIdExternalLoad);
223
224 DataLabel32 displacementLabel = loadPtrWithAddressOffsetPatch(Address(regT0, patchGetByIdDefaultOffset), regT0);
225 ASSERT(differenceBetween(hotPathBegin, displacementLabel) == patchOffsetGetByIdPropertyMapOffset);
226
227 Label putResult(this);
228 ASSERT(differenceBetween(hotPathBegin, putResult) == patchOffsetGetByIdPutResult);
229 emitPutVirtualRegister(resultVReg);
230}
231
232
233void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, unsigned propertyAccessInstructionIndex)
234{
235 // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
236 // so that we only need track one pointer into the slow case code - we track a pointer to the location
237 // of the call (which we can use to look up the patch information), but should a array-length or
238 // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back
239 // the distance from the call to the head of the slow case.
240
241 linkSlowCaseIfNotJSCell(iter, baseVReg);
242 linkSlowCase(iter);
243
244#ifndef NDEBUG
245 Label coldPathBegin(this);
246#endif
247 JITStubCall stubCall(this, JITStubs::cti_op_get_by_id);
248 stubCall.addArgument(regT0);
249 stubCall.addArgument(ImmPtr(ident));
250 Call call = stubCall.call(resultVReg);
251
252 ASSERT(differenceBetween(coldPathBegin, call) == patchOffsetGetByIdSlowCaseCall);
253
254 // Track the location of the call; this will be used to recover patch information.
255 m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call;
256}
257
258void JIT::compilePutByIdHotPath(int baseVReg, Identifier*, int valueVReg, unsigned propertyAccessInstructionIndex)
259{
260 // In order to be able to patch both the Structure, and the object offset, we store one pointer,
261 // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
262 // such that the Structure & offset are always at the same distance from this.
263
264 emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1);
265
266 // Jump to a slow case if either the base object is an immediate, or if the Structure does not match.
267 emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
268
269 Label hotPathBegin(this);
270 m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
271
272 // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
273 DataLabelPtr structureToCompare;
274 addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, FIELD_OFFSET(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
275 ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetPutByIdStructure);
276
277 // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
278 Label externalLoad(this);
279 loadPtr(Address(regT0, FIELD_OFFSET(JSObject, m_externalStorage)), regT0);
280 Label externalLoadComplete(this);
281 ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetPutByIdExternalLoad);
282 ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthPutByIdExternalLoad);
283
284 DataLabel32 displacementLabel = storePtrWithAddressOffsetPatch(regT1, Address(regT0, patchGetByIdDefaultOffset));
285 ASSERT(differenceBetween(hotPathBegin, displacementLabel) == patchOffsetPutByIdPropertyMapOffset);
286}
287
288void JIT::compilePutByIdSlowCase(int baseVReg, Identifier* ident, int, Vector<SlowCaseEntry>::iterator& iter, unsigned propertyAccessInstructionIndex)
289{
290 linkSlowCaseIfNotJSCell(iter, baseVReg);
291 linkSlowCase(iter);
292
293 JITStubCall stubCall(this, JITStubs::cti_op_put_by_id);
294 stubCall.addArgument(regT0);
295 stubCall.addArgument(ImmPtr(ident));
296 stubCall.addArgument(regT1);
297 Call call = stubCall.call();
298
299 // Track the location of the call; this will be used to recover patch information.
300 m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call;
301}
302
303// Compile a store into an object's property storage. May overwrite the
304// value in objectReg.
305void JIT::compilePutDirectOffset(RegisterID base, RegisterID value, Structure* structure, size_t cachedOffset)
306{
307 int offset = cachedOffset * sizeof(JSValue);
308 if (structure->isUsingInlineStorage())
309 offset += FIELD_OFFSET(JSObject, m_inlineStorage);
310 else
311 loadPtr(Address(base, FIELD_OFFSET(JSObject, m_externalStorage)), base);
312 storePtr(value, Address(base, offset));
313}
314
315// Compile a load from an object's property storage. May overwrite base.
316void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, Structure* structure, size_t cachedOffset)
317{
318 int offset = cachedOffset * sizeof(JSValue);
319 if (structure->isUsingInlineStorage())
320 offset += FIELD_OFFSET(JSObject, m_inlineStorage);
321 else
322 loadPtr(Address(base, FIELD_OFFSET(JSObject, m_externalStorage)), base);
323 loadPtr(Address(base, offset), result);
324}
325
326void JIT::compileGetDirectOffset(JSObject* base, RegisterID result, size_t cachedOffset)
327{
328 if (base->isUsingInlineStorage())
329 loadPtr(static_cast<void*>(&base->m_inlineStorage[cachedOffset]), result);
330 else
331 loadPtr(static_cast<void*>(&base->m_externalStorage[cachedOffset]), result);
332}
333
334static JSObject* resizePropertyStorage(JSObject* baseObject, int32_t oldSize, int32_t newSize)
335{
336 baseObject->allocatePropertyStorage(oldSize, newSize);
337 return baseObject;
338}
339
340static inline bool transitionWillNeedStorageRealloc(Structure* oldStructure, Structure* newStructure)
341{
342 return oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity();
343}
344
345void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ProcessorReturnAddress returnAddress)
346{
347 JumpList failureCases;
348 // Check eax is an object of the right Structure.
349 failureCases.append(emitJumpIfNotJSCell(regT0));
350 failureCases.append(branchPtr(NotEqual, Address(regT0, FIELD_OFFSET(JSCell, m_structure)), ImmPtr(oldStructure)));
351 JumpList successCases;
352
353 // ecx = baseObject
354 loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2);
355 // proto(ecx) = baseObject->structure()->prototype()
356 failureCases.append(branch32(NotEqual, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type)), Imm32(ObjectType)));
357
358 loadPtr(Address(regT2, FIELD_OFFSET(Structure, m_prototype)), regT2);
359
360 // ecx = baseObject->m_structure
361 for (RefPtr<Structure>* it = chain->head(); *it; ++it) {
362 // null check the prototype
363 successCases.append(branchPtr(Equal, regT2, ImmPtr(JSValue::encode(jsNull()))));
364
365 // Check the structure id
366 failureCases.append(branchPtr(NotEqual, Address(regT2, FIELD_OFFSET(JSCell, m_structure)), ImmPtr(it->get())));
367
368 loadPtr(Address(regT2, FIELD_OFFSET(JSCell, m_structure)), regT2);
369 failureCases.append(branch32(NotEqual, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type)), Imm32(ObjectType)));
370 loadPtr(Address(regT2, FIELD_OFFSET(Structure, m_prototype)), regT2);
371 }
372
373 successCases.link(this);
374
375 Call callTarget;
376
377 // emit a call only if storage realloc is needed
378 bool willNeedStorageRealloc = transitionWillNeedStorageRealloc(oldStructure, newStructure);
379 if (willNeedStorageRealloc) {
380 pop(X86::ebx);
381#if PLATFORM(X86_64)
382 // Setup arguments in edi, esi, edx. Since baseObject is in regT0,
383 // regT0 had better not be any of these registers.
384 ASSERT(regT0 != X86::edx);
385 ASSERT(regT0 != X86::esi);
386 ASSERT(regT0 != X86::edi);
387 move(Imm32(newStructure->propertyStorageCapacity()), X86::edx);
388 move(Imm32(oldStructure->propertyStorageCapacity()), X86::esi);
389 move(regT0, X86::edi);
390 callTarget = call();
391#else
392 push(Imm32(newStructure->propertyStorageCapacity()));
393 push(Imm32(oldStructure->propertyStorageCapacity()));
394 push(regT0);
395 callTarget = call();
396 addPtr(Imm32(3 * sizeof(void*)), X86::esp);
397#endif
398 emitGetJITStubArg(3, regT1);
399 push(X86::ebx);
400 }
401
402 // Assumes m_refCount can be decremented easily, refcount decrement is safe as
403 // codeblock should ensure oldStructure->m_refCount > 0
404 sub32(Imm32(1), AbsoluteAddress(oldStructure->addressOfCount()));
405 add32(Imm32(1), AbsoluteAddress(newStructure->addressOfCount()));
406 storePtr(ImmPtr(newStructure), Address(regT0, FIELD_OFFSET(JSCell, m_structure)));
407
408 // write the value
409 compilePutDirectOffset(regT0, regT1, newStructure, cachedOffset);
410
411 ret();
412
413 ASSERT(!failureCases.empty());
414 failureCases.link(this);
415 restoreArgumentReferenceForTrampoline();
416 Call failureCall = tailRecursiveCall();
417
418 void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
419 PatchBuffer patchBuffer(code);
420
421 patchBuffer.link(failureCall, JITStubs::cti_op_put_by_id_fail);
422
423 if (willNeedStorageRealloc)
424 patchBuffer.link(callTarget, resizePropertyStorage);
425
426 stubInfo->stubRoutine = patchBuffer.entry();
427
428 returnAddress.relinkCallerToFunction(code);
429}
430
431void JIT::patchGetByIdSelf(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ProcessorReturnAddress returnAddress)
432{
433 // We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
434 // Should probably go to JITStubs::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
435 returnAddress.relinkCallerToFunction(JITStubs::cti_op_get_by_id_self_fail);
436
437 int offset = sizeof(JSValue) * cachedOffset;
438
439 // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
440 // and makes the subsequent load's offset automatically correct
441 if (structure->isUsingInlineStorage())
442 stubInfo->hotPathBegin.instructionAtOffset(patchOffsetGetByIdExternalLoad + patchLengthGetByIdExternalLoadPrefix).patchLoadToLEA();
443
444 // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
445 stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure).repatch(structure);
446 stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset).repatch(offset);
447}
448
449void JIT::patchPutByIdReplace(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ProcessorReturnAddress returnAddress)
450{
451 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
452 // Should probably go to JITStubs::cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
453 returnAddress.relinkCallerToFunction(JITStubs::cti_op_put_by_id_generic);
454
455 int offset = sizeof(JSValue) * cachedOffset;
456
457 // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
458 // and makes the subsequent load's offset automatically correct
459 if (structure->isUsingInlineStorage())
460 stubInfo->hotPathBegin.instructionAtOffset(patchOffsetPutByIdExternalLoad + patchLengthPutByIdExternalLoadPrefix).patchLoadToLEA();
461
462 // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
463 stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure).repatch(structure);
464 stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset).repatch(offset);
465}
466
467void JIT::privateCompilePatchGetArrayLength(ProcessorReturnAddress returnAddress)
468{
469 StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress);
470
471 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
472 returnAddress.relinkCallerToFunction(JITStubs::cti_op_get_by_id_array_fail);
473
474 // Check eax is an array
475 Jump failureCases1 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr));
476
477 // Checks out okay! - get the length from the storage
478 loadPtr(Address(regT0, FIELD_OFFSET(JSArray, m_storage)), regT2);
479 load32(Address(regT2, FIELD_OFFSET(ArrayStorage, m_length)), regT2);
480
481 Jump failureCases2 = branch32(Above, regT2, Imm32(JSImmediate::maxImmediateInt));
482
483 emitFastArithIntToImmNoCheck(regT2, regT0);
484 Jump success = jump();
485
486 void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
487 PatchBuffer patchBuffer(code);
488
489 // Use the patch information to link the failure cases back to the original slow case routine.
490 CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
491 patchBuffer.link(failureCases1, slowCaseBegin);
492 patchBuffer.link(failureCases2, slowCaseBegin);
493
494 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
495 patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
496
497 // Track the stub we have created so that it will be deleted later.
498 CodeLocationLabel entryLabel = patchBuffer.entry();
499 stubInfo->stubRoutine = entryLabel;
500
501 // Finally patch the jump to slow case back in the hot path to jump here instead.
502 CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
503 jumpLocation.relink(entryLabel);
504}
505
506void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, ProcessorReturnAddress returnAddress, CallFrame* callFrame)
507{
508 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
509 returnAddress.relinkCallerToFunction(JITStubs::cti_op_get_by_id_proto_list);
510
511 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
512 // referencing the prototype object - let's speculatively load it's table nice and early!)
513 JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
514
515 // Check eax is an object of the right Structure.
516 Jump failureCases1 = checkStructure(regT0, structure);
517
518 // Check the prototype object's Structure had not changed.
519 Structure** prototypeStructureAddress = &(protoObject->m_structure);
520#if PLATFORM(X86_64)
521 move(ImmPtr(prototypeStructure), regT3);
522 Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
523#else
524 Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
525#endif
526
527 // Checks out okay! - getDirectOffset
528 compileGetDirectOffset(protoObject, regT0, cachedOffset);
529
530 Jump success = jump();
531
532 void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
533 PatchBuffer patchBuffer(code);
534
535 // Use the patch information to link the failure cases back to the original slow case routine.
536 CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
537 patchBuffer.link(failureCases1, slowCaseBegin);
538 patchBuffer.link(failureCases2, slowCaseBegin);
539
540 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
541 patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
542
543 // Track the stub we have created so that it will be deleted later.
544 CodeLocationLabel entryLabel = patchBuffer.entry();
545 stubInfo->stubRoutine = entryLabel;
546
547 // Finally patch the jump to slow case back in the hot path to jump here instead.
548 CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
549 jumpLocation.relink(entryLabel);
550}
551
552void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, size_t cachedOffset)
553{
554 Jump failureCase = checkStructure(regT0, structure);
555 compileGetDirectOffset(regT0, regT0, structure, cachedOffset);
556 Jump success = jump();
557
558 void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
559 ASSERT(code);
560 PatchBuffer patchBuffer(code);
561
562 // Use the patch information to link the failure cases back to the original slow case routine.
563 CodeLocationLabel lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine;
564 if (!lastProtoBegin)
565 lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
566
567 patchBuffer.link(failureCase, lastProtoBegin);
568
569 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
570 patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
571
572 CodeLocationLabel entryLabel = patchBuffer.entry();
573
574 structure->ref();
575 polymorphicStructures->list[currentIndex].set(entryLabel, structure);
576
577 // Finally patch the jump to slow case back in the hot path to jump here instead.
578 CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
579 jumpLocation.relink(entryLabel);
580}
581
582void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame)
583{
584 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
585 // referencing the prototype object - let's speculatively load it's table nice and early!)
586 JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
587
588 // Check eax is an object of the right Structure.
589 Jump failureCases1 = checkStructure(regT0, structure);
590
591 // Check the prototype object's Structure had not changed.
592 Structure** prototypeStructureAddress = &(protoObject->m_structure);
593#if PLATFORM(X86_64)
594 move(ImmPtr(prototypeStructure), regT3);
595 Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
596#else
597 Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
598#endif
599
600 // Checks out okay! - getDirectOffset
601 compileGetDirectOffset(protoObject, regT0, cachedOffset);
602
603 Jump success = jump();
604
605 void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
606 PatchBuffer patchBuffer(code);
607
608 // Use the patch information to link the failure cases back to the original slow case routine.
609 CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
610 patchBuffer.link(failureCases1, lastProtoBegin);
611 patchBuffer.link(failureCases2, lastProtoBegin);
612
613 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
614 patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
615
616 CodeLocationLabel entryLabel = patchBuffer.entry();
617
618 structure->ref();
619 prototypeStructure->ref();
620 prototypeStructures->list[currentIndex].set(entryLabel, structure, prototypeStructure);
621
622 // Finally patch the jump to slow case back in the hot path to jump here instead.
623 CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
624 jumpLocation.relink(entryLabel);
625}
626
627void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame)
628{
629 ASSERT(count);
630
631 JumpList bucketsOfFail;
632
633 // Check eax is an object of the right Structure.
634 Jump baseObjectCheck = checkStructure(regT0, structure);
635 bucketsOfFail.append(baseObjectCheck);
636
637 Structure* currStructure = structure;
638 RefPtr<Structure>* chainEntries = chain->head();
639 JSObject* protoObject = 0;
640 for (unsigned i = 0; i < count; ++i) {
641 protoObject = asObject(currStructure->prototypeForLookup(callFrame));
642 currStructure = chainEntries[i].get();
643
644 // Check the prototype object's Structure had not changed.
645 Structure** prototypeStructureAddress = &(protoObject->m_structure);
646#if PLATFORM(X86_64)
647 move(ImmPtr(currStructure), regT3);
648 bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
649#else
650 bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
651#endif
652 }
653 ASSERT(protoObject);
654
655 compileGetDirectOffset(protoObject, regT0, cachedOffset);
656 Jump success = jump();
657
658 void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
659 PatchBuffer patchBuffer(code);
660
661 // Use the patch information to link the failure cases back to the original slow case routine.
662 CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
663
664 patchBuffer.link(bucketsOfFail, lastProtoBegin);
665
666 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
667 patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
668
669 CodeLocationLabel entryLabel = patchBuffer.entry();
670
671 // Track the stub we have created so that it will be deleted later.
672 structure->ref();
673 chain->ref();
674 prototypeStructures->list[currentIndex].set(entryLabel, structure, chain);
675
676 // Finally patch the jump to slow case back in the hot path to jump here instead.
677 CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
678 jumpLocation.relink(entryLabel);
679}
680
681void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, ProcessorReturnAddress returnAddress, CallFrame* callFrame)
682{
683 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
684 returnAddress.relinkCallerToFunction(JITStubs::cti_op_get_by_id_proto_list);
685
686 ASSERT(count);
687
688 JumpList bucketsOfFail;
689
690 // Check eax is an object of the right Structure.
691 bucketsOfFail.append(checkStructure(regT0, structure));
692
693 Structure* currStructure = structure;
694 RefPtr<Structure>* chainEntries = chain->head();
695 JSObject* protoObject = 0;
696 for (unsigned i = 0; i < count; ++i) {
697 protoObject = asObject(currStructure->prototypeForLookup(callFrame));
698 currStructure = chainEntries[i].get();
699
700 // Check the prototype object's Structure had not changed.
701 Structure** prototypeStructureAddress = &(protoObject->m_structure);
702#if PLATFORM(X86_64)
703 move(ImmPtr(currStructure), regT3);
704 bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
705#else
706 bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
707#endif
708 }
709 ASSERT(protoObject);
710
711 compileGetDirectOffset(protoObject, regT0, cachedOffset);
712 Jump success = jump();
713
714 void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
715 PatchBuffer patchBuffer(code);
716
717 // Use the patch information to link the failure cases back to the original slow case routine.
718 patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall));
719
720 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
721 patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
722
723 // Track the stub we have created so that it will be deleted later.
724 CodeLocationLabel entryLabel = patchBuffer.entry();
725 stubInfo->stubRoutine = entryLabel;
726
727 // Finally patch the jump to slow case back in the hot path to jump here instead.
728 CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
729 jumpLocation.relink(entryLabel);
730}
731
732/* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
733
734#endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
735
736} // namespace JSC
737
738#endif // ENABLE(JIT)
Note: See TracBrowser for help on using the repository browser.