source: webkit/trunk/JavaScriptCore/jit/JITPropertyAccess.cpp@ 39266

Last change on this file since 39266 was 39266, checked in by [email protected], 16 years ago

2008-12-12 Gavin Barraclough <[email protected]>

Reviewed by Geoff Garen.

Remove loop counter 'i' from the JIT generation passes, replace with a member m_bytecodeIndex.

No impact on performance.

  • jit/JIT.cpp: (JSC::JIT::compileOpStrictEq): (JSC::JIT::emitSlowScriptCheck): (JSC::JIT::privateCompileMainPass): (JSC::JIT::privateCompileSlowCases): (JSC::JIT::privateCompile):
  • jit/JIT.h: (JSC::CallRecord::CallRecord): (JSC::JmpTable::JmpTable): (JSC::JIT::emitCTICall):
  • jit/JITArithmetic.cpp: (JSC::JIT::compileBinaryArithOp): (JSC::JIT::compileBinaryArithOpSlowCase):
  • jit/JITCall.cpp: (JSC::JIT::compileOpCall): (JSC::JIT::compileOpCallSlowCase):
  • jit/JITInlineMethods.h: (JSC::JIT::emitGetVirtualRegister): (JSC::JIT::emitGetVirtualRegisters): (JSC::JIT::emitNakedCall): (JSC::JIT::emitCTICall_internal): (JSC::JIT::emitJumpSlowCaseIfJSCell): (JSC::JIT::emitJumpSlowCaseIfNotJSCell): (JSC::JIT::emitJumpSlowCaseIfNotImmNum): (JSC::JIT::emitJumpSlowCaseIfNotImmNums): (JSC::JIT::emitFastArithIntToImmOrSlowCase): (JSC::JIT::addSlowCase): (JSC::JIT::addJump): (JSC::JIT::emitJumpSlowToHot):
  • jit/JITPropertyAccess.cpp: (JSC::JIT::compileGetByIdHotPath): (JSC::JIT::compileGetByIdSlowCase): (JSC::JIT::compilePutByIdHotPath): (JSC::JIT::compilePutByIdSlowCase):
File size: 31.3 KB
Line 
1/*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "JIT.h"
28
29#if ENABLE(JIT)
30
31#include "CodeBlock.h"
32#include "JITInlineMethods.h"
33#include "JSArray.h"
34#include "JSFunction.h"
35#include "Interpreter.h"
36#include "ResultType.h"
37#include "SamplingTool.h"
38
39#ifndef NDEBUG
40#include <stdio.h>
41#endif
42
43using namespace std;
44
45namespace JSC {
46
47#if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
48
49void JIT::compileGetByIdHotPath(int resultVReg, int baseVReg, Identifier* ident, unsigned)
50{
51 // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be repatched.
52 // Additionally, for get_by_id we need repatch the offset of the branch to the slow case (we repatch this to jump
53 // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
54 // to jump back to if one of these trampolies finds a match.
55
56 emitGetVirtualRegister(baseVReg, X86::eax);
57
58 emitPutCTIArg(X86::eax, 0);
59 emitPutCTIArgConstant(reinterpret_cast<unsigned>(ident), 4);
60 emitCTICall(Interpreter::cti_op_get_by_id_generic);
61 emitPutVirtualRegister(resultVReg);
62}
63
64
65void JIT::compileGetByIdSlowCase(int, int, Identifier*, Vector<SlowCaseEntry>::iterator&, unsigned)
66{
67 ASSERT_NOT_REACHED();
68}
69
70void JIT::compilePutByIdHotPath(int baseVReg, Identifier* ident, int valueVReg, unsigned)
71{
72 // In order to be able to repatch both the Structure, and the object offset, we store one pointer,
73 // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
74 // such that the Structure & offset are always at the same distance from this.
75
76 emitGetVirtualRegisters(baseVReg, X86::eax, valueVReg, X86::edx);
77
78 emitPutCTIArgConstant(reinterpret_cast<unsigned>(ident), 4);
79 emitPutCTIArg(X86::eax, 0);
80 emitPutCTIArg(X86::edx, 8);
81 emitCTICall(Interpreter::cti_op_put_by_id_generic);
82}
83
84void JIT::compilePutByIdSlowCase(int, Identifier*, int, Vector<SlowCaseEntry>::iterator&, unsigned)
85{
86 ASSERT_NOT_REACHED();
87}
88
89#else
90
91void JIT::compileGetByIdHotPath(int resultVReg, int baseVReg, Identifier*, unsigned propertyAccessInstructionIndex)
92{
93 // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be repatched.
94 // Additionally, for get_by_id we need repatch the offset of the branch to the slow case (we repatch this to jump
95 // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
96 // to jump back to if one of these trampolies finds a match.
97
98 emitGetVirtualRegister(baseVReg, X86::eax);
99
100 emitJumpSlowCaseIfNotJSCell(X86::eax, baseVReg);
101
102 JmpDst hotPathBegin = __ label();
103 m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
104
105 __ cmpl_i32m(repatchGetByIdDefaultStructure, FIELD_OFFSET(JSCell, m_structure), X86::eax);
106 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, __ label()) == repatchOffsetGetByIdStructure);
107 addSlowCase(__ jne());
108 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, __ label()) == repatchOffsetGetByIdBranchToSlowCase);
109
110 __ movl_mr(FIELD_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
111 __ movl_mr(repatchGetByIdDefaultOffset, X86::eax, X86::eax);
112 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, __ label()) == repatchOffsetGetByIdPropertyMapOffset);
113 emitPutVirtualRegister(resultVReg);
114}
115
116
117void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, unsigned propertyAccessInstructionIndex)
118{
119 // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
120 // so that we only need track one pointer into the slow case code - we track a pointer to the location
121 // of the call (which we can use to look up the repatch information), but should a array-length or
122 // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back
123 // the distance from the call to the head of the slow case.
124
125 linkSlowCaseIfNotJSCell(iter, baseVReg);
126 linkSlowCase(iter);
127
128#ifndef NDEBUG
129 JmpDst coldPathBegin = __ label();
130#endif
131 emitPutCTIArg(X86::eax, 0);
132 emitPutCTIArgConstant(reinterpret_cast<unsigned>(ident), 4);
133 JmpSrc call = emitCTICall(Interpreter::cti_op_get_by_id);
134 ASSERT(X86Assembler::getDifferenceBetweenLabels(coldPathBegin, call) == repatchOffsetGetByIdSlowCaseCall);
135 emitPutVirtualRegister(resultVReg);
136
137 // Track the location of the call; this will be used to recover repatch information.
138 m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call;
139}
140
141void JIT::compilePutByIdHotPath(int baseVReg, Identifier*, int valueVReg, unsigned propertyAccessInstructionIndex)
142{
143 // In order to be able to repatch both the Structure, and the object offset, we store one pointer,
144 // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
145 // such that the Structure & offset are always at the same distance from this.
146
147 emitGetVirtualRegisters(baseVReg, X86::eax, valueVReg, X86::edx);
148
149 // Jump to a slow case if either the base object is an immediate, or if the Structure does not match.
150 emitJumpSlowCaseIfNotJSCell(X86::eax, baseVReg);
151
152 JmpDst hotPathBegin = __ label();
153 m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
154
155 // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
156 __ cmpl_i32m(repatchGetByIdDefaultStructure, FIELD_OFFSET(JSCell, m_structure), X86::eax);
157 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, __ label()) == repatchOffsetPutByIdStructure);
158 addSlowCase(__ jne());
159
160 // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
161 __ movl_mr(FIELD_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
162 __ movl_rm(X86::edx, repatchGetByIdDefaultOffset, X86::eax);
163 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, __ label()) == repatchOffsetPutByIdPropertyMapOffset);
164}
165
166void JIT::compilePutByIdSlowCase(int baseVReg, Identifier* ident, int, Vector<SlowCaseEntry>::iterator& iter, unsigned propertyAccessInstructionIndex)
167{
168 linkSlowCaseIfNotJSCell(iter, baseVReg);
169 linkSlowCase(iter);
170
171 emitPutCTIArgConstant(reinterpret_cast<unsigned>(ident), 4);
172 emitPutCTIArg(X86::eax, 0);
173 emitPutCTIArg(X86::edx, 8);
174 JmpSrc call = emitCTICall(Interpreter::cti_op_put_by_id);
175
176 // Track the location of the call; this will be used to recover repatch information.
177 m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call;
178}
179
180#endif
181
182static JSObject* resizePropertyStorage(JSObject* baseObject, size_t oldSize, size_t newSize)
183{
184 baseObject->allocatePropertyStorageInline(oldSize, newSize);
185 return baseObject;
186}
187
188static inline bool transitionWillNeedStorageRealloc(Structure* oldStructure, Structure* newStructure)
189{
190 return oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity();
191}
192
193void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, void* returnAddress)
194{
195 Vector<JmpSrc, 16> failureCases;
196 // Check eax is an object of the right Structure.
197 __ testl_i32r(JSImmediate::TagMask, X86::eax);
198 failureCases.append(__ jne());
199 __ cmpl_i32m(reinterpret_cast<uint32_t>(oldStructure), FIELD_OFFSET(JSCell, m_structure), X86::eax);
200 failureCases.append(__ jne());
201 Vector<JmpSrc> successCases;
202
203 // ecx = baseObject
204 __ movl_mr(FIELD_OFFSET(JSCell, m_structure), X86::eax, X86::ecx);
205 // proto(ecx) = baseObject->structure()->prototype()
206 __ cmpl_i32m(ObjectType, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type), X86::ecx);
207 failureCases.append(__ jne());
208 __ movl_mr(FIELD_OFFSET(Structure, m_prototype), X86::ecx, X86::ecx);
209
210 // ecx = baseObject->m_structure
211 for (RefPtr<Structure>* it = chain->head(); *it; ++it) {
212 // null check the prototype
213 __ cmpl_i32r(asInteger(jsNull()), X86::ecx);
214 successCases.append(__ je());
215
216 // Check the structure id
217 __ cmpl_i32m(reinterpret_cast<uint32_t>(it->get()), FIELD_OFFSET(JSCell, m_structure), X86::ecx);
218 failureCases.append(__ jne());
219
220 __ movl_mr(FIELD_OFFSET(JSCell, m_structure), X86::ecx, X86::ecx);
221 __ cmpl_i32m(ObjectType, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type), X86::ecx);
222 failureCases.append(__ jne());
223 __ movl_mr(FIELD_OFFSET(Structure, m_prototype), X86::ecx, X86::ecx);
224 }
225
226 failureCases.append(__ jne());
227 for (unsigned i = 0; i < successCases.size(); ++i)
228 __ link(successCases[i], __ label());
229
230 JmpSrc callTarget;
231
232 // emit a call only if storage realloc is needed
233 if (transitionWillNeedStorageRealloc(oldStructure, newStructure)) {
234 __ pushl_r(X86::edx);
235 __ pushl_i32(newStructure->propertyStorageCapacity());
236 __ pushl_i32(oldStructure->propertyStorageCapacity());
237 __ pushl_r(X86::eax);
238 callTarget = __ call();
239 __ addl_i32r(3 * sizeof(void*), X86::esp);
240 __ popl_r(X86::edx);
241 }
242
243 // Assumes m_refCount can be decremented easily, refcount decrement is safe as
244 // codeblock should ensure oldStructure->m_refCount > 0
245 __ subl_i8m(1, reinterpret_cast<void*>(oldStructure));
246 __ addl_i8m(1, reinterpret_cast<void*>(newStructure));
247 __ movl_i32m(reinterpret_cast<uint32_t>(newStructure), FIELD_OFFSET(JSCell, m_structure), X86::eax);
248
249 // write the value
250 __ movl_mr(FIELD_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
251 __ movl_rm(X86::edx, cachedOffset * sizeof(JSValue*), X86::eax);
252
253 __ ret();
254
255 JmpSrc failureJump;
256 if (failureCases.size()) {
257 for (unsigned i = 0; i < failureCases.size(); ++i)
258 __ link(failureCases[i], __ label());
259 failureJump = __ jmp();
260 }
261
262 void* code = __ executableCopy(m_codeBlock->executablePool());
263
264 if (failureCases.size())
265 X86Assembler::link(code, failureJump, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_fail));
266
267 if (transitionWillNeedStorageRealloc(oldStructure, newStructure))
268 X86Assembler::link(code, callTarget, reinterpret_cast<void*>(resizePropertyStorage));
269
270 stubInfo->stubRoutine = code;
271
272 ctiRepatchCallByReturnAddress(returnAddress, code);
273}
274
275void JIT::patchGetByIdSelf(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, void* returnAddress)
276{
277 // We don't want to repatch more than once - in future go to cti_op_get_by_id_generic.
278 // Should probably go to Interpreter::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
279 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_self_fail));
280
281 // Repatch the offset into the propoerty map to load from, then repatch the Structure to look for.
282 X86Assembler::repatchDisplacement(reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset, cachedOffset * sizeof(JSValue*));
283 X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdStructure, reinterpret_cast<uint32_t>(structure));
284}
285
286void JIT::patchPutByIdReplace(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, void* returnAddress)
287{
288 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
289 // Should probably go to Interpreter::cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
290 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_generic));
291
292 // Repatch the offset into the propoerty map to load from, then repatch the Structure to look for.
293 X86Assembler::repatchDisplacement(reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetPutByIdPropertyMapOffset, cachedOffset * sizeof(JSValue*));
294 X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetPutByIdStructure, reinterpret_cast<uint32_t>(structure));
295}
296
297void JIT::privateCompilePatchGetArrayLength(void* returnAddress)
298{
299 StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress);
300
301 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
302 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_array_fail));
303
304 // Check eax is an array
305 __ cmpl_i32m(reinterpret_cast<unsigned>(m_interpreter->m_jsArrayVptr), X86::eax);
306 JmpSrc failureCases1 = __ jne();
307
308 // Checks out okay! - get the length from the storage
309 __ movl_mr(FIELD_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
310 __ movl_mr(FIELD_OFFSET(ArrayStorage, m_length), X86::ecx, X86::ecx);
311
312 __ cmpl_i32r(JSImmediate::maxImmediateInt, X86::ecx);
313 JmpSrc failureCases2 = __ ja();
314
315 __ addl_rr(X86::ecx, X86::ecx);
316 __ addl_i8r(1, X86::ecx);
317 __ movl_rr(X86::ecx, X86::eax);
318 JmpSrc success = __ jmp();
319
320 void* code = __ executableCopy(m_codeBlock->executablePool());
321
322 // Use the repatch information to link the failure cases back to the original slow case routine.
323 void* slowCaseBegin = reinterpret_cast<char*>(stubInfo->callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
324 X86Assembler::link(code, failureCases1, slowCaseBegin);
325 X86Assembler::link(code, failureCases2, slowCaseBegin);
326
327 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
328 intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
329 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
330
331 // Track the stub we have created so that it will be deleted later.
332 stubInfo->stubRoutine = code;
333
334 // Finally repatch the jump to sow case back in the hot path to jump here instead.
335 intptr_t jmpLocation = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
336 X86Assembler::repatchBranchOffset(jmpLocation, code);
337}
338
339void JIT::privateCompileGetByIdSelf(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, void* returnAddress)
340{
341 // Check eax is an object of the right Structure.
342 __ testl_i32r(JSImmediate::TagMask, X86::eax);
343 JmpSrc failureCases1 = __ jne();
344 JmpSrc failureCases2 = checkStructure(X86::eax, structure);
345
346 // Checks out okay! - getDirectOffset
347 __ movl_mr(FIELD_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
348 __ movl_mr(cachedOffset * sizeof(JSValue*), X86::eax, X86::eax);
349 __ ret();
350
351 void* code = __ executableCopy(m_codeBlock->executablePool());
352
353 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_self_fail));
354 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_self_fail));
355
356 stubInfo->stubRoutine = code;
357
358 ctiRepatchCallByReturnAddress(returnAddress, code);
359}
360
361void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, void* returnAddress, CallFrame* callFrame)
362{
363#if USE(CTI_REPATCH_PIC)
364 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
365 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_list));
366
367 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
368 // referencing the prototype object - let's speculatively load it's table nice and early!)
369 JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
370 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
371 __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
372
373 // Check eax is an object of the right Structure.
374 JmpSrc failureCases1 = checkStructure(X86::eax, structure);
375
376 // Check the prototype object's Structure had not changed.
377 Structure** prototypeStructureAddress = &(protoObject->m_structure);
378 __ cmpl_i32m(reinterpret_cast<uint32_t>(prototypeStructure), prototypeStructureAddress);
379 JmpSrc failureCases2 = __ jne();
380
381 // Checks out okay! - getDirectOffset
382 __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
383
384 JmpSrc success = __ jmp();
385
386 void* code = __ executableCopy(m_codeBlock->executablePool());
387
388 // Use the repatch information to link the failure cases back to the original slow case routine.
389 void* slowCaseBegin = reinterpret_cast<char*>(stubInfo->callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
390 X86Assembler::link(code, failureCases1, slowCaseBegin);
391 X86Assembler::link(code, failureCases2, slowCaseBegin);
392
393 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
394 intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
395 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
396
397 // Track the stub we have created so that it will be deleted later.
398 stubInfo->stubRoutine = code;
399
400 // Finally repatch the jump to slow case back in the hot path to jump here instead.
401 intptr_t jmpLocation = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
402 X86Assembler::repatchBranchOffset(jmpLocation, code);
403#else
404 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
405 // referencing the prototype object - let's speculatively load it's table nice and early!)
406 JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
407 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
408 __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
409
410 // Check eax is an object of the right Structure.
411 __ testl_i32r(JSImmediate::TagMask, X86::eax);
412 JmpSrc failureCases1 = __ jne();
413 JmpSrc failureCases2 = checkStructure(X86::eax, structure);
414
415 // Check the prototype object's Structure had not changed.
416 Structure** prototypeStructureAddress = &(protoObject->m_structure);
417 __ cmpl_i32m(reinterpret_cast<uint32_t>(prototypeStructure), prototypeStructureAddress);
418 JmpSrc failureCases3 = __ jne();
419
420 // Checks out okay! - getDirectOffset
421 __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
422
423 __ ret();
424
425 void* code = __ executableCopy(m_codeBlock->executablePool());
426
427 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail));
428 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail));
429 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail));
430
431 stubInfo->stubRoutine = code;
432
433 ctiRepatchCallByReturnAddress(returnAddress, code);
434#endif
435}
436
437#if USE(CTI_REPATCH_PIC)
438void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, size_t cachedOffset)
439{
440 JmpSrc failureCase = checkStructure(X86::eax, structure);
441 __ movl_mr(FIELD_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
442 __ movl_mr(cachedOffset * sizeof(JSValue*), X86::eax, X86::eax);
443 JmpSrc success = __ jmp();
444
445 void* code = __ executableCopy(m_codeBlock->executablePool());
446 ASSERT(code);
447
448 // Use the repatch information to link the failure cases back to the original slow case routine.
449 void* lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine;
450 if (!lastProtoBegin)
451 lastProtoBegin = reinterpret_cast<char*>(stubInfo->callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
452
453 X86Assembler::link(code, failureCase, lastProtoBegin);
454
455 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
456 intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
457 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
458
459 structure->ref();
460 polymorphicStructures->list[currentIndex].set(code, structure);
461
462 // Finally repatch the jump to slow case back in the hot path to jump here instead.
463 intptr_t jmpLocation = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
464 X86Assembler::repatchBranchOffset(jmpLocation, code);
465}
466
467void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame)
468{
469 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
470 // referencing the prototype object - let's speculatively load it's table nice and early!)
471 JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
472 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
473 __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
474
475 // Check eax is an object of the right Structure.
476 JmpSrc failureCases1 = checkStructure(X86::eax, structure);
477
478 // Check the prototype object's Structure had not changed.
479 Structure** prototypeStructureAddress = &(protoObject->m_structure);
480 __ cmpl_i32m(reinterpret_cast<uint32_t>(prototypeStructure), prototypeStructureAddress);
481 JmpSrc failureCases2 = __ jne();
482
483 // Checks out okay! - getDirectOffset
484 __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
485
486 JmpSrc success = __ jmp();
487
488 void* code = __ executableCopy(m_codeBlock->executablePool());
489
490 // Use the repatch information to link the failure cases back to the original slow case routine.
491 void* lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
492 X86Assembler::link(code, failureCases1, lastProtoBegin);
493 X86Assembler::link(code, failureCases2, lastProtoBegin);
494
495 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
496 intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
497 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
498
499 structure->ref();
500 prototypeStructure->ref();
501 prototypeStructures->list[currentIndex].set(code, structure, prototypeStructure);
502
503 // Finally repatch the jump to slow case back in the hot path to jump here instead.
504 intptr_t jmpLocation = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
505 X86Assembler::repatchBranchOffset(jmpLocation, code);
506}
507
508void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame)
509{
510 ASSERT(count);
511
512 Vector<JmpSrc> bucketsOfFail;
513
514 // Check eax is an object of the right Structure.
515 bucketsOfFail.append(checkStructure(X86::eax, structure));
516
517 Structure* currStructure = structure;
518 RefPtr<Structure>* chainEntries = chain->head();
519 JSObject* protoObject = 0;
520 for (unsigned i = 0; i < count; ++i) {
521 protoObject = asObject(currStructure->prototypeForLookup(callFrame));
522 currStructure = chainEntries[i].get();
523
524 // Check the prototype object's Structure had not changed.
525 Structure** prototypeStructureAddress = &(protoObject->m_structure);
526 __ cmpl_i32m(reinterpret_cast<uint32_t>(currStructure), prototypeStructureAddress);
527 bucketsOfFail.append(__ jne());
528 }
529 ASSERT(protoObject);
530
531 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
532 __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
533 __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
534 JmpSrc success = __ jmp();
535
536 void* code = __ executableCopy(m_codeBlock->executablePool());
537
538 // Use the repatch information to link the failure cases back to the original slow case routine.
539 void* lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
540
541 for (unsigned i = 0; i < bucketsOfFail.size(); ++i)
542 X86Assembler::link(code, bucketsOfFail[i], lastProtoBegin);
543
544 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
545 intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
546 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
547
548 // Track the stub we have created so that it will be deleted later.
549 structure->ref();
550 chain->ref();
551 prototypeStructures->list[currentIndex].set(code, structure, chain);
552
553 // Finally repatch the jump to slow case back in the hot path to jump here instead.
554 intptr_t jmpLocation = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
555 X86Assembler::repatchBranchOffset(jmpLocation, code);
556}
557#endif
558
559void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, void* returnAddress, CallFrame* callFrame)
560{
561#if USE(CTI_REPATCH_PIC)
562 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
563 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_list));
564
565 ASSERT(count);
566
567 Vector<JmpSrc> bucketsOfFail;
568
569 // Check eax is an object of the right Structure.
570 bucketsOfFail.append(checkStructure(X86::eax, structure));
571
572 Structure* currStructure = structure;
573 RefPtr<Structure>* chainEntries = chain->head();
574 JSObject* protoObject = 0;
575 for (unsigned i = 0; i < count; ++i) {
576 protoObject = asObject(currStructure->prototypeForLookup(callFrame));
577 currStructure = chainEntries[i].get();
578
579 // Check the prototype object's Structure had not changed.
580 Structure** prototypeStructureAddress = &(protoObject->m_structure);
581 __ cmpl_i32m(reinterpret_cast<uint32_t>(currStructure), prototypeStructureAddress);
582 bucketsOfFail.append(__ jne());
583 }
584 ASSERT(protoObject);
585
586 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
587 __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
588 __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
589 JmpSrc success = __ jmp();
590
591 void* code = __ executableCopy(m_codeBlock->executablePool());
592
593 // Use the repatch information to link the failure cases back to the original slow case routine.
594 void* slowCaseBegin = reinterpret_cast<char*>(stubInfo->callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
595
596 for (unsigned i = 0; i < bucketsOfFail.size(); ++i)
597 X86Assembler::link(code, bucketsOfFail[i], slowCaseBegin);
598
599 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
600 intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
601 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
602
603 // Track the stub we have created so that it will be deleted later.
604 stubInfo->stubRoutine = code;
605
606 // Finally repatch the jump to slow case back in the hot path to jump here instead.
607 intptr_t jmpLocation = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
608 X86Assembler::repatchBranchOffset(jmpLocation, code);
609#else
610 ASSERT(count);
611
612 Vector<JmpSrc> bucketsOfFail;
613
614 // Check eax is an object of the right Structure.
615 __ testl_i32r(JSImmediate::TagMask, X86::eax);
616 bucketsOfFail.append(__ jne());
617 bucketsOfFail.append(checkStructure(X86::eax, structure));
618
619 Structure* currStructure = structure;
620 RefPtr<Structure>* chainEntries = chain->head();
621 JSObject* protoObject = 0;
622 for (unsigned i = 0; i < count; ++i) {
623 protoObject = asObject(currStructure->prototypeForLookup(callFrame));
624 currStructure = chainEntries[i].get();
625
626 // Check the prototype object's Structure had not changed.
627 Structure** prototypeStructureAddress = &(protoObject->m_structure);
628 __ cmpl_i32m(reinterpret_cast<uint32_t>(currStructure), prototypeStructureAddress);
629 bucketsOfFail.append(__ jne());
630 }
631 ASSERT(protoObject);
632
633 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
634 __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
635 __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
636 __ ret();
637
638 void* code = __ executableCopy(m_codeBlock->executablePool());
639
640 for (unsigned i = 0; i < bucketsOfFail.size(); ++i)
641 X86Assembler::link(code, bucketsOfFail[i], reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail));
642
643 stubInfo->stubRoutine = code;
644
645 ctiRepatchCallByReturnAddress(returnAddress, code);
646#endif
647}
648
649void JIT::privateCompilePutByIdReplace(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, void* returnAddress)
650{
651 // Check eax is an object of the right Structure.
652 __ testl_i32r(JSImmediate::TagMask, X86::eax);
653 JmpSrc failureCases1 = __ jne();
654 JmpSrc failureCases2 = checkStructure(X86::eax, structure);
655
656 // checks out okay! - putDirectOffset
657 __ movl_mr(FIELD_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
658 __ movl_rm(X86::edx, cachedOffset * sizeof(JSValue*), X86::eax);
659 __ ret();
660
661 void* code = __ executableCopy(m_codeBlock->executablePool());
662
663 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_fail));
664 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_fail));
665
666 stubInfo->stubRoutine = code;
667
668 ctiRepatchCallByReturnAddress(returnAddress, code);
669}
670
671} // namespace JSC
672
673#endif // ENABLE(JIT)
Note: See TracBrowser for help on using the repository browser.