1 | /*
|
---|
2 | * Copyright (C) 2008 Apple Inc. All rights reserved.
|
---|
3 | *
|
---|
4 | * Redistribution and use in source and binary forms, with or without
|
---|
5 | * modification, are permitted provided that the following conditions
|
---|
6 | * are met:
|
---|
7 | * 1. Redistributions of source code must retain the above copyright
|
---|
8 | * notice, this list of conditions and the following disclaimer.
|
---|
9 | * 2. Redistributions in binary form must reproduce the above copyright
|
---|
10 | * notice, this list of conditions and the following disclaimer in the
|
---|
11 | * documentation and/or other materials provided with the distribution.
|
---|
12 | *
|
---|
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
|
---|
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
---|
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
---|
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
|
---|
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
---|
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
---|
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
---|
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
|
---|
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
---|
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
---|
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
---|
24 | */
|
---|
25 |
|
---|
26 | #include "config.h"
|
---|
27 | #include "JIT.h"
|
---|
28 |
|
---|
29 | #if ENABLE(JIT)
|
---|
30 |
|
---|
31 | #include "CodeBlock.h"
|
---|
32 | #include "JITInlineMethods.h"
|
---|
33 | #include "JITStubCall.h"
|
---|
34 | #include "JSArray.h"
|
---|
35 | #include "JSFunction.h"
|
---|
36 | #include "Interpreter.h"
|
---|
37 | #include "ResultType.h"
|
---|
38 | #include "SamplingTool.h"
|
---|
39 |
|
---|
40 | #ifndef NDEBUG
|
---|
41 | #include <stdio.h>
|
---|
42 | #endif
|
---|
43 |
|
---|
44 | using namespace std;
|
---|
45 |
|
---|
46 | namespace JSC {
|
---|
47 |
|
---|
48 | void JIT::emit_op_put_by_id(Instruction* currentInstruction)
|
---|
49 | {
|
---|
50 | compilePutByIdHotPath(currentInstruction[1].u.operand, &(m_codeBlock->identifier(currentInstruction[2].u.operand)), currentInstruction[3].u.operand, m_propertyAccessInstructionIndex++);
|
---|
51 | }
|
---|
52 |
|
---|
53 | void JIT::emit_op_get_by_id(Instruction* currentInstruction)
|
---|
54 | {
|
---|
55 | compileGetByIdHotPath(currentInstruction[1].u.operand, currentInstruction[2].u.operand, &(m_codeBlock->identifier(currentInstruction[3].u.operand)), m_propertyAccessInstructionIndex++);
|
---|
56 | }
|
---|
57 |
|
---|
58 | void JIT::emit_op_get_by_val(Instruction* currentInstruction)
|
---|
59 | {
|
---|
60 | emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
|
---|
61 | emitJumpSlowCaseIfNotImmediateInteger(regT1);
|
---|
62 | #if USE(ALTERNATE_JSIMMEDIATE)
|
---|
63 | // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter.
|
---|
64 | // We check the value as if it was a uint32 against the m_fastAccessCutoff - which will always fail if
|
---|
65 | // number was signed since m_fastAccessCutoff is always less than intmax (since the total allocation
|
---|
66 | // size is always less than 4Gb). As such zero extending wil have been correct (and extending the value
|
---|
67 | // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign
|
---|
68 | // extending since it makes it easier to re-tag the value in the slow case.
|
---|
69 | zeroExtend32ToPtr(regT1, regT1);
|
---|
70 | #else
|
---|
71 | emitFastArithImmToInt(regT1);
|
---|
72 | #endif
|
---|
73 | emitJumpSlowCaseIfNotJSCell(regT0);
|
---|
74 | addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
|
---|
75 |
|
---|
76 | // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
|
---|
77 | loadPtr(Address(regT0, FIELD_OFFSET(JSArray, m_storage)), regT2);
|
---|
78 | addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, FIELD_OFFSET(JSArray, m_fastAccessCutoff))));
|
---|
79 |
|
---|
80 | // Get the value from the vector
|
---|
81 | loadPtr(BaseIndex(regT2, regT1, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0])), regT0);
|
---|
82 | emitPutVirtualRegister(currentInstruction[1].u.operand);
|
---|
83 | }
|
---|
84 |
|
---|
85 | void JIT::emit_op_put_by_val(Instruction* currentInstruction)
|
---|
86 | {
|
---|
87 | emitGetVirtualRegisters(currentInstruction[1].u.operand, regT0, currentInstruction[2].u.operand, regT1);
|
---|
88 | emitJumpSlowCaseIfNotImmediateInteger(regT1);
|
---|
89 | #if USE(ALTERNATE_JSIMMEDIATE)
|
---|
90 | // See comment in op_get_by_val.
|
---|
91 | zeroExtend32ToPtr(regT1, regT1);
|
---|
92 | #else
|
---|
93 | emitFastArithImmToInt(regT1);
|
---|
94 | #endif
|
---|
95 | emitJumpSlowCaseIfNotJSCell(regT0);
|
---|
96 | addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
|
---|
97 |
|
---|
98 | // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
|
---|
99 | loadPtr(Address(regT0, FIELD_OFFSET(JSArray, m_storage)), regT2);
|
---|
100 | Jump inFastVector = branch32(Below, regT1, Address(regT0, FIELD_OFFSET(JSArray, m_fastAccessCutoff)));
|
---|
101 | // No; oh well, check if the access if within the vector - if so, we may still be okay.
|
---|
102 | addSlowCase(branch32(AboveOrEqual, regT1, Address(regT2, FIELD_OFFSET(ArrayStorage, m_vectorLength))));
|
---|
103 |
|
---|
104 | // This is a write to the slow part of the vector; first, we have to check if this would be the first write to this location.
|
---|
105 | // FIXME: should be able to handle initial write to array; increment the the number of items in the array, and potentially update fast access cutoff.
|
---|
106 | addSlowCase(branchTestPtr(Zero, BaseIndex(regT2, regT1, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0]))));
|
---|
107 |
|
---|
108 | // All good - put the value into the array.
|
---|
109 | inFastVector.link(this);
|
---|
110 | emitGetVirtualRegister(currentInstruction[3].u.operand, regT0);
|
---|
111 | storePtr(regT0, BaseIndex(regT2, regT1, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0])));
|
---|
112 | }
|
---|
113 |
|
---|
114 | void JIT::emit_op_put_by_index(Instruction* currentInstruction)
|
---|
115 | {
|
---|
116 | JITStubCall stubCall(this, JITStubs::cti_op_put_by_index);
|
---|
117 | stubCall.addArgument(currentInstruction[1].u.operand, regT2);
|
---|
118 | stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
|
---|
119 | stubCall.addArgument(currentInstruction[3].u.operand, regT2);
|
---|
120 | stubCall.call();
|
---|
121 | }
|
---|
122 |
|
---|
123 | void JIT::emit_op_put_getter(Instruction* currentInstruction)
|
---|
124 | {
|
---|
125 | JITStubCall stubCall(this, JITStubs::cti_op_put_getter);
|
---|
126 | stubCall.addArgument(currentInstruction[1].u.operand, regT2);
|
---|
127 | stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
|
---|
128 | stubCall.addArgument(currentInstruction[3].u.operand, regT2);
|
---|
129 | stubCall.call();
|
---|
130 | }
|
---|
131 |
|
---|
132 | void JIT::emit_op_put_setter(Instruction* currentInstruction)
|
---|
133 | {
|
---|
134 | JITStubCall stubCall(this, JITStubs::cti_op_put_setter);
|
---|
135 | stubCall.addArgument(currentInstruction[1].u.operand, regT2);
|
---|
136 | stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
|
---|
137 | stubCall.addArgument(currentInstruction[3].u.operand, regT2);
|
---|
138 | stubCall.call();
|
---|
139 | }
|
---|
140 |
|
---|
141 | void JIT::emit_op_del_by_id(Instruction* currentInstruction)
|
---|
142 | {
|
---|
143 | JITStubCall stubCall(this, JITStubs::cti_op_del_by_id);
|
---|
144 | stubCall.addArgument(currentInstruction[2].u.operand, regT2);
|
---|
145 | stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
|
---|
146 | stubCall.call(currentInstruction[1].u.operand);
|
---|
147 | }
|
---|
148 |
|
---|
149 |
|
---|
150 | #if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
|
---|
151 |
|
---|
152 | /* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
|
---|
153 |
|
---|
154 | void JIT::compileGetByIdHotPath(int resultVReg, int baseVReg, Identifier* ident, unsigned)
|
---|
155 | {
|
---|
156 | // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
|
---|
157 | // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
|
---|
158 | // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
|
---|
159 | // to jump back to if one of these trampolies finds a match.
|
---|
160 |
|
---|
161 | emitGetVirtualRegister(baseVReg, regT0);
|
---|
162 |
|
---|
163 | JITStubCall stubCall(this, JITStubs::cti_op_get_by_id_generic);
|
---|
164 | stubCall.addArgument(regT0);
|
---|
165 | stubCall.addArgument(ImmPtr(ident));
|
---|
166 | stubCall.call(resultVReg);
|
---|
167 | }
|
---|
168 |
|
---|
169 | void JIT::compileGetByIdSlowCase(int, int, Identifier*, Vector<SlowCaseEntry>::iterator&, unsigned)
|
---|
170 | {
|
---|
171 | ASSERT_NOT_REACHED();
|
---|
172 | }
|
---|
173 |
|
---|
174 | void JIT::compilePutByIdHotPath(int baseVReg, Identifier* ident, int valueVReg, unsigned)
|
---|
175 | {
|
---|
176 | // In order to be able to patch both the Structure, and the object offset, we store one pointer,
|
---|
177 | // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
|
---|
178 | // such that the Structure & offset are always at the same distance from this.
|
---|
179 |
|
---|
180 | emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1);
|
---|
181 |
|
---|
182 | JITStubCall stubCall(this, JITStubs::cti_op_put_by_id_generic);
|
---|
183 | stubCall.addArgument(regT0);
|
---|
184 | stubCall.addArgument(ImmPtr(ident));
|
---|
185 | stubCall.addArgument(regT1);
|
---|
186 | stubCall.call();
|
---|
187 | }
|
---|
188 |
|
---|
189 | void JIT::compilePutByIdSlowCase(int, Identifier*, int, Vector<SlowCaseEntry>::iterator&, unsigned)
|
---|
190 | {
|
---|
191 | ASSERT_NOT_REACHED();
|
---|
192 | }
|
---|
193 |
|
---|
194 | #else // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
|
---|
195 |
|
---|
196 | /* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
|
---|
197 |
|
---|
198 | void JIT::compileGetByIdHotPath(int resultVReg, int baseVReg, Identifier*, unsigned propertyAccessInstructionIndex)
|
---|
199 | {
|
---|
200 | // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
|
---|
201 | // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
|
---|
202 | // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
|
---|
203 | // to jump back to if one of these trampolies finds a match.
|
---|
204 |
|
---|
205 | emitGetVirtualRegister(baseVReg, regT0);
|
---|
206 |
|
---|
207 | emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
|
---|
208 |
|
---|
209 | Label hotPathBegin(this);
|
---|
210 | m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
|
---|
211 |
|
---|
212 | DataLabelPtr structureToCompare;
|
---|
213 | Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, FIELD_OFFSET(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
|
---|
214 | addSlowCase(structureCheck);
|
---|
215 | ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetGetByIdStructure);
|
---|
216 | ASSERT(differenceBetween(hotPathBegin, structureCheck) == patchOffsetGetByIdBranchToSlowCase);
|
---|
217 |
|
---|
218 | Label externalLoad(this);
|
---|
219 | loadPtr(Address(regT0, FIELD_OFFSET(JSObject, m_externalStorage)), regT0);
|
---|
220 | Label externalLoadComplete(this);
|
---|
221 | ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetGetByIdExternalLoad);
|
---|
222 | ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthGetByIdExternalLoad);
|
---|
223 |
|
---|
224 | DataLabel32 displacementLabel = loadPtrWithAddressOffsetPatch(Address(regT0, patchGetByIdDefaultOffset), regT0);
|
---|
225 | ASSERT(differenceBetween(hotPathBegin, displacementLabel) == patchOffsetGetByIdPropertyMapOffset);
|
---|
226 |
|
---|
227 | Label putResult(this);
|
---|
228 | ASSERT(differenceBetween(hotPathBegin, putResult) == patchOffsetGetByIdPutResult);
|
---|
229 | emitPutVirtualRegister(resultVReg);
|
---|
230 | }
|
---|
231 |
|
---|
232 |
|
---|
233 | void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, unsigned propertyAccessInstructionIndex)
|
---|
234 | {
|
---|
235 | // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
|
---|
236 | // so that we only need track one pointer into the slow case code - we track a pointer to the location
|
---|
237 | // of the call (which we can use to look up the patch information), but should a array-length or
|
---|
238 | // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back
|
---|
239 | // the distance from the call to the head of the slow case.
|
---|
240 |
|
---|
241 | linkSlowCaseIfNotJSCell(iter, baseVReg);
|
---|
242 | linkSlowCase(iter);
|
---|
243 |
|
---|
244 | #ifndef NDEBUG
|
---|
245 | Label coldPathBegin(this);
|
---|
246 | #endif
|
---|
247 | JITStubCall stubCall(this, JITStubs::cti_op_get_by_id);
|
---|
248 | stubCall.addArgument(regT0);
|
---|
249 | stubCall.addArgument(ImmPtr(ident));
|
---|
250 | Call call = stubCall.call(resultVReg);
|
---|
251 |
|
---|
252 | ASSERT(differenceBetween(coldPathBegin, call) == patchOffsetGetByIdSlowCaseCall);
|
---|
253 |
|
---|
254 | // Track the location of the call; this will be used to recover patch information.
|
---|
255 | m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call;
|
---|
256 | }
|
---|
257 |
|
---|
258 | void JIT::compilePutByIdHotPath(int baseVReg, Identifier*, int valueVReg, unsigned propertyAccessInstructionIndex)
|
---|
259 | {
|
---|
260 | // In order to be able to patch both the Structure, and the object offset, we store one pointer,
|
---|
261 | // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
|
---|
262 | // such that the Structure & offset are always at the same distance from this.
|
---|
263 |
|
---|
264 | emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1);
|
---|
265 |
|
---|
266 | // Jump to a slow case if either the base object is an immediate, or if the Structure does not match.
|
---|
267 | emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
|
---|
268 |
|
---|
269 | Label hotPathBegin(this);
|
---|
270 | m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
|
---|
271 |
|
---|
272 | // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
|
---|
273 | DataLabelPtr structureToCompare;
|
---|
274 | addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, FIELD_OFFSET(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
|
---|
275 | ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetPutByIdStructure);
|
---|
276 |
|
---|
277 | // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
|
---|
278 | Label externalLoad(this);
|
---|
279 | loadPtr(Address(regT0, FIELD_OFFSET(JSObject, m_externalStorage)), regT0);
|
---|
280 | Label externalLoadComplete(this);
|
---|
281 | ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetPutByIdExternalLoad);
|
---|
282 | ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthPutByIdExternalLoad);
|
---|
283 |
|
---|
284 | DataLabel32 displacementLabel = storePtrWithAddressOffsetPatch(regT1, Address(regT0, patchGetByIdDefaultOffset));
|
---|
285 | ASSERT(differenceBetween(hotPathBegin, displacementLabel) == patchOffsetPutByIdPropertyMapOffset);
|
---|
286 | }
|
---|
287 |
|
---|
288 | void JIT::compilePutByIdSlowCase(int baseVReg, Identifier* ident, int, Vector<SlowCaseEntry>::iterator& iter, unsigned propertyAccessInstructionIndex)
|
---|
289 | {
|
---|
290 | linkSlowCaseIfNotJSCell(iter, baseVReg);
|
---|
291 | linkSlowCase(iter);
|
---|
292 |
|
---|
293 | JITStubCall stubCall(this, JITStubs::cti_op_put_by_id);
|
---|
294 | stubCall.addArgument(regT0);
|
---|
295 | stubCall.addArgument(ImmPtr(ident));
|
---|
296 | stubCall.addArgument(regT1);
|
---|
297 | Call call = stubCall.call();
|
---|
298 |
|
---|
299 | // Track the location of the call; this will be used to recover patch information.
|
---|
300 | m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call;
|
---|
301 | }
|
---|
302 |
|
---|
303 | // Compile a store into an object's property storage. May overwrite the
|
---|
304 | // value in objectReg.
|
---|
305 | void JIT::compilePutDirectOffset(RegisterID base, RegisterID value, Structure* structure, size_t cachedOffset)
|
---|
306 | {
|
---|
307 | int offset = cachedOffset * sizeof(JSValue);
|
---|
308 | if (structure->isUsingInlineStorage())
|
---|
309 | offset += FIELD_OFFSET(JSObject, m_inlineStorage);
|
---|
310 | else
|
---|
311 | loadPtr(Address(base, FIELD_OFFSET(JSObject, m_externalStorage)), base);
|
---|
312 | storePtr(value, Address(base, offset));
|
---|
313 | }
|
---|
314 |
|
---|
315 | // Compile a load from an object's property storage. May overwrite base.
|
---|
316 | void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, Structure* structure, size_t cachedOffset)
|
---|
317 | {
|
---|
318 | int offset = cachedOffset * sizeof(JSValue);
|
---|
319 | if (structure->isUsingInlineStorage())
|
---|
320 | offset += FIELD_OFFSET(JSObject, m_inlineStorage);
|
---|
321 | else
|
---|
322 | loadPtr(Address(base, FIELD_OFFSET(JSObject, m_externalStorage)), base);
|
---|
323 | loadPtr(Address(base, offset), result);
|
---|
324 | }
|
---|
325 |
|
---|
326 | void JIT::compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID result, size_t cachedOffset)
|
---|
327 | {
|
---|
328 | if (base->isUsingInlineStorage())
|
---|
329 | loadPtr(static_cast<void*>(&base->m_inlineStorage[cachedOffset]), result);
|
---|
330 | else {
|
---|
331 | PropertyStorage* protoPropertyStorage = &base->m_externalStorage;
|
---|
332 | loadPtr(static_cast<void*>(protoPropertyStorage), temp);
|
---|
333 | loadPtr(Address(temp, cachedOffset * sizeof(JSValue)), result);
|
---|
334 | }
|
---|
335 | }
|
---|
336 |
|
---|
337 | static JSObject* resizePropertyStorage(JSObject* baseObject, int32_t oldSize, int32_t newSize)
|
---|
338 | {
|
---|
339 | baseObject->allocatePropertyStorage(oldSize, newSize);
|
---|
340 | return baseObject;
|
---|
341 | }
|
---|
342 |
|
---|
343 | static inline bool transitionWillNeedStorageRealloc(Structure* oldStructure, Structure* newStructure)
|
---|
344 | {
|
---|
345 | return oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity();
|
---|
346 | }
|
---|
347 |
|
---|
348 | void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ProcessorReturnAddress returnAddress)
|
---|
349 | {
|
---|
350 | JumpList failureCases;
|
---|
351 | // Check eax is an object of the right Structure.
|
---|
352 | failureCases.append(emitJumpIfNotJSCell(regT0));
|
---|
353 | failureCases.append(branchPtr(NotEqual, Address(regT0, FIELD_OFFSET(JSCell, m_structure)), ImmPtr(oldStructure)));
|
---|
354 | JumpList successCases;
|
---|
355 |
|
---|
356 | // ecx = baseObject
|
---|
357 | loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2);
|
---|
358 | // proto(ecx) = baseObject->structure()->prototype()
|
---|
359 | failureCases.append(branch32(NotEqual, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type)), Imm32(ObjectType)));
|
---|
360 |
|
---|
361 | loadPtr(Address(regT2, FIELD_OFFSET(Structure, m_prototype)), regT2);
|
---|
362 |
|
---|
363 | // ecx = baseObject->m_structure
|
---|
364 | for (RefPtr<Structure>* it = chain->head(); *it; ++it) {
|
---|
365 | // null check the prototype
|
---|
366 | successCases.append(branchPtr(Equal, regT2, ImmPtr(JSValue::encode(jsNull()))));
|
---|
367 |
|
---|
368 | // Check the structure id
|
---|
369 | failureCases.append(branchPtr(NotEqual, Address(regT2, FIELD_OFFSET(JSCell, m_structure)), ImmPtr(it->get())));
|
---|
370 |
|
---|
371 | loadPtr(Address(regT2, FIELD_OFFSET(JSCell, m_structure)), regT2);
|
---|
372 | failureCases.append(branch32(NotEqual, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type)), Imm32(ObjectType)));
|
---|
373 | loadPtr(Address(regT2, FIELD_OFFSET(Structure, m_prototype)), regT2);
|
---|
374 | }
|
---|
375 |
|
---|
376 | successCases.link(this);
|
---|
377 |
|
---|
378 | Call callTarget;
|
---|
379 |
|
---|
380 | // emit a call only if storage realloc is needed
|
---|
381 | bool willNeedStorageRealloc = transitionWillNeedStorageRealloc(oldStructure, newStructure);
|
---|
382 | if (willNeedStorageRealloc) {
|
---|
383 | pop(X86::ebx);
|
---|
384 | #if PLATFORM(X86_64)
|
---|
385 | // Setup arguments in edi, esi, edx. Since baseObject is in regT0,
|
---|
386 | // regT0 had better not be any of these registers.
|
---|
387 | ASSERT(regT0 != X86::edx);
|
---|
388 | ASSERT(regT0 != X86::esi);
|
---|
389 | ASSERT(regT0 != X86::edi);
|
---|
390 | move(Imm32(newStructure->propertyStorageCapacity()), X86::edx);
|
---|
391 | move(Imm32(oldStructure->propertyStorageCapacity()), X86::esi);
|
---|
392 | move(regT0, X86::edi);
|
---|
393 | callTarget = call();
|
---|
394 | #else
|
---|
395 | push(Imm32(newStructure->propertyStorageCapacity()));
|
---|
396 | push(Imm32(oldStructure->propertyStorageCapacity()));
|
---|
397 | push(regT0);
|
---|
398 | callTarget = call();
|
---|
399 | addPtr(Imm32(3 * sizeof(void*)), X86::esp);
|
---|
400 | #endif
|
---|
401 | emitGetJITStubArg(3, regT1);
|
---|
402 | push(X86::ebx);
|
---|
403 | }
|
---|
404 |
|
---|
405 | // Assumes m_refCount can be decremented easily, refcount decrement is safe as
|
---|
406 | // codeblock should ensure oldStructure->m_refCount > 0
|
---|
407 | sub32(Imm32(1), AbsoluteAddress(oldStructure->addressOfCount()));
|
---|
408 | add32(Imm32(1), AbsoluteAddress(newStructure->addressOfCount()));
|
---|
409 | storePtr(ImmPtr(newStructure), Address(regT0, FIELD_OFFSET(JSCell, m_structure)));
|
---|
410 |
|
---|
411 | // write the value
|
---|
412 | compilePutDirectOffset(regT0, regT1, newStructure, cachedOffset);
|
---|
413 |
|
---|
414 | ret();
|
---|
415 |
|
---|
416 | ASSERT(!failureCases.empty());
|
---|
417 | failureCases.link(this);
|
---|
418 | restoreArgumentReferenceForTrampoline();
|
---|
419 | Call failureCall = tailRecursiveCall();
|
---|
420 |
|
---|
421 | void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
|
---|
422 | PatchBuffer patchBuffer(code);
|
---|
423 |
|
---|
424 | patchBuffer.link(failureCall, JITStubs::cti_op_put_by_id_fail);
|
---|
425 |
|
---|
426 | if (willNeedStorageRealloc)
|
---|
427 | patchBuffer.link(callTarget, resizePropertyStorage);
|
---|
428 |
|
---|
429 | stubInfo->stubRoutine = patchBuffer.entry();
|
---|
430 |
|
---|
431 | returnAddress.relinkCallerToFunction(code);
|
---|
432 | }
|
---|
433 |
|
---|
434 | void JIT::patchGetByIdSelf(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ProcessorReturnAddress returnAddress)
|
---|
435 | {
|
---|
436 | // We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
|
---|
437 | // Should probably go to JITStubs::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
|
---|
438 | returnAddress.relinkCallerToFunction(JITStubs::cti_op_get_by_id_self_fail);
|
---|
439 |
|
---|
440 | int offset = sizeof(JSValue) * cachedOffset;
|
---|
441 |
|
---|
442 | // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
|
---|
443 | // and makes the subsequent load's offset automatically correct
|
---|
444 | if (structure->isUsingInlineStorage())
|
---|
445 | stubInfo->hotPathBegin.instructionAtOffset(patchOffsetGetByIdExternalLoad + patchLengthGetByIdExternalLoadPrefix).patchLoadToLEA();
|
---|
446 |
|
---|
447 | // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
|
---|
448 | stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure).repatch(structure);
|
---|
449 | stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset).repatch(offset);
|
---|
450 | }
|
---|
451 |
|
---|
452 | void JIT::patchPutByIdReplace(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ProcessorReturnAddress returnAddress)
|
---|
453 | {
|
---|
454 | // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
|
---|
455 | // Should probably go to JITStubs::cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
|
---|
456 | returnAddress.relinkCallerToFunction(JITStubs::cti_op_put_by_id_generic);
|
---|
457 |
|
---|
458 | int offset = sizeof(JSValue) * cachedOffset;
|
---|
459 |
|
---|
460 | // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
|
---|
461 | // and makes the subsequent load's offset automatically correct
|
---|
462 | if (structure->isUsingInlineStorage())
|
---|
463 | stubInfo->hotPathBegin.instructionAtOffset(patchOffsetPutByIdExternalLoad + patchLengthPutByIdExternalLoadPrefix).patchLoadToLEA();
|
---|
464 |
|
---|
465 | // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
|
---|
466 | stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure).repatch(structure);
|
---|
467 | stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset).repatch(offset);
|
---|
468 | }
|
---|
469 |
|
---|
470 | void JIT::privateCompilePatchGetArrayLength(ProcessorReturnAddress returnAddress)
|
---|
471 | {
|
---|
472 | StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress);
|
---|
473 |
|
---|
474 | // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
|
---|
475 | returnAddress.relinkCallerToFunction(JITStubs::cti_op_get_by_id_array_fail);
|
---|
476 |
|
---|
477 | // Check eax is an array
|
---|
478 | Jump failureCases1 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr));
|
---|
479 |
|
---|
480 | // Checks out okay! - get the length from the storage
|
---|
481 | loadPtr(Address(regT0, FIELD_OFFSET(JSArray, m_storage)), regT2);
|
---|
482 | load32(Address(regT2, FIELD_OFFSET(ArrayStorage, m_length)), regT2);
|
---|
483 |
|
---|
484 | Jump failureCases2 = branch32(Above, regT2, Imm32(JSImmediate::maxImmediateInt));
|
---|
485 |
|
---|
486 | emitFastArithIntToImmNoCheck(regT2, regT0);
|
---|
487 | Jump success = jump();
|
---|
488 |
|
---|
489 | void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
|
---|
490 | PatchBuffer patchBuffer(code);
|
---|
491 |
|
---|
492 | // Use the patch information to link the failure cases back to the original slow case routine.
|
---|
493 | CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
|
---|
494 | patchBuffer.link(failureCases1, slowCaseBegin);
|
---|
495 | patchBuffer.link(failureCases2, slowCaseBegin);
|
---|
496 |
|
---|
497 | // On success return back to the hot patch code, at a point it will perform the store to dest for us.
|
---|
498 | patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
|
---|
499 |
|
---|
500 | // Track the stub we have created so that it will be deleted later.
|
---|
501 | CodeLocationLabel entryLabel = patchBuffer.entry();
|
---|
502 | stubInfo->stubRoutine = entryLabel;
|
---|
503 |
|
---|
504 | // Finally patch the jump to slow case back in the hot path to jump here instead.
|
---|
505 | CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
|
---|
506 | jumpLocation.relink(entryLabel);
|
---|
507 | }
|
---|
508 |
|
---|
509 | void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, ProcessorReturnAddress returnAddress, CallFrame* callFrame)
|
---|
510 | {
|
---|
511 | // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
|
---|
512 | returnAddress.relinkCallerToFunction(JITStubs::cti_op_get_by_id_proto_list);
|
---|
513 |
|
---|
514 | // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
|
---|
515 | // referencing the prototype object - let's speculatively load it's table nice and early!)
|
---|
516 | JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
|
---|
517 |
|
---|
518 | // Check eax is an object of the right Structure.
|
---|
519 | Jump failureCases1 = checkStructure(regT0, structure);
|
---|
520 |
|
---|
521 | // Check the prototype object's Structure had not changed.
|
---|
522 | Structure** prototypeStructureAddress = &(protoObject->m_structure);
|
---|
523 | #if PLATFORM(X86_64)
|
---|
524 | move(ImmPtr(prototypeStructure), regT3);
|
---|
525 | Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
|
---|
526 | #else
|
---|
527 | Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
|
---|
528 | #endif
|
---|
529 |
|
---|
530 | // Checks out okay! - getDirectOffset
|
---|
531 | compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
|
---|
532 |
|
---|
533 | Jump success = jump();
|
---|
534 |
|
---|
535 | void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
|
---|
536 | PatchBuffer patchBuffer(code);
|
---|
537 |
|
---|
538 | // Use the patch information to link the failure cases back to the original slow case routine.
|
---|
539 | CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
|
---|
540 | patchBuffer.link(failureCases1, slowCaseBegin);
|
---|
541 | patchBuffer.link(failureCases2, slowCaseBegin);
|
---|
542 |
|
---|
543 | // On success return back to the hot patch code, at a point it will perform the store to dest for us.
|
---|
544 | patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
|
---|
545 |
|
---|
546 | // Track the stub we have created so that it will be deleted later.
|
---|
547 | CodeLocationLabel entryLabel = patchBuffer.entry();
|
---|
548 | stubInfo->stubRoutine = entryLabel;
|
---|
549 |
|
---|
550 | // Finally patch the jump to slow case back in the hot path to jump here instead.
|
---|
551 | CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
|
---|
552 | jumpLocation.relink(entryLabel);
|
---|
553 | }
|
---|
554 |
|
---|
555 | void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, size_t cachedOffset)
|
---|
556 | {
|
---|
557 | Jump failureCase = checkStructure(regT0, structure);
|
---|
558 | compileGetDirectOffset(regT0, regT0, structure, cachedOffset);
|
---|
559 | Jump success = jump();
|
---|
560 |
|
---|
561 | void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
|
---|
562 | ASSERT(code);
|
---|
563 | PatchBuffer patchBuffer(code);
|
---|
564 |
|
---|
565 | // Use the patch information to link the failure cases back to the original slow case routine.
|
---|
566 | CodeLocationLabel lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine;
|
---|
567 | if (!lastProtoBegin)
|
---|
568 | lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
|
---|
569 |
|
---|
570 | patchBuffer.link(failureCase, lastProtoBegin);
|
---|
571 |
|
---|
572 | // On success return back to the hot patch code, at a point it will perform the store to dest for us.
|
---|
573 | patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
|
---|
574 |
|
---|
575 | CodeLocationLabel entryLabel = patchBuffer.entry();
|
---|
576 |
|
---|
577 | structure->ref();
|
---|
578 | polymorphicStructures->list[currentIndex].set(entryLabel, structure);
|
---|
579 |
|
---|
580 | // Finally patch the jump to slow case back in the hot path to jump here instead.
|
---|
581 | CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
|
---|
582 | jumpLocation.relink(entryLabel);
|
---|
583 | }
|
---|
584 |
|
---|
585 | void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame)
|
---|
586 | {
|
---|
587 | // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
|
---|
588 | // referencing the prototype object - let's speculatively load it's table nice and early!)
|
---|
589 | JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
|
---|
590 |
|
---|
591 | // Check eax is an object of the right Structure.
|
---|
592 | Jump failureCases1 = checkStructure(regT0, structure);
|
---|
593 |
|
---|
594 | // Check the prototype object's Structure had not changed.
|
---|
595 | Structure** prototypeStructureAddress = &(protoObject->m_structure);
|
---|
596 | #if PLATFORM(X86_64)
|
---|
597 | move(ImmPtr(prototypeStructure), regT3);
|
---|
598 | Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
|
---|
599 | #else
|
---|
600 | Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
|
---|
601 | #endif
|
---|
602 |
|
---|
603 | // Checks out okay! - getDirectOffset
|
---|
604 | compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
|
---|
605 |
|
---|
606 | Jump success = jump();
|
---|
607 |
|
---|
608 | void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
|
---|
609 | PatchBuffer patchBuffer(code);
|
---|
610 |
|
---|
611 | // Use the patch information to link the failure cases back to the original slow case routine.
|
---|
612 | CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
|
---|
613 | patchBuffer.link(failureCases1, lastProtoBegin);
|
---|
614 | patchBuffer.link(failureCases2, lastProtoBegin);
|
---|
615 |
|
---|
616 | // On success return back to the hot patch code, at a point it will perform the store to dest for us.
|
---|
617 | patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
|
---|
618 |
|
---|
619 | CodeLocationLabel entryLabel = patchBuffer.entry();
|
---|
620 |
|
---|
621 | structure->ref();
|
---|
622 | prototypeStructure->ref();
|
---|
623 | prototypeStructures->list[currentIndex].set(entryLabel, structure, prototypeStructure);
|
---|
624 |
|
---|
625 | // Finally patch the jump to slow case back in the hot path to jump here instead.
|
---|
626 | CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
|
---|
627 | jumpLocation.relink(entryLabel);
|
---|
628 | }
|
---|
629 |
|
---|
630 | void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame)
|
---|
631 | {
|
---|
632 | ASSERT(count);
|
---|
633 |
|
---|
634 | JumpList bucketsOfFail;
|
---|
635 |
|
---|
636 | // Check eax is an object of the right Structure.
|
---|
637 | Jump baseObjectCheck = checkStructure(regT0, structure);
|
---|
638 | bucketsOfFail.append(baseObjectCheck);
|
---|
639 |
|
---|
640 | Structure* currStructure = structure;
|
---|
641 | RefPtr<Structure>* chainEntries = chain->head();
|
---|
642 | JSObject* protoObject = 0;
|
---|
643 | for (unsigned i = 0; i < count; ++i) {
|
---|
644 | protoObject = asObject(currStructure->prototypeForLookup(callFrame));
|
---|
645 | currStructure = chainEntries[i].get();
|
---|
646 |
|
---|
647 | // Check the prototype object's Structure had not changed.
|
---|
648 | Structure** prototypeStructureAddress = &(protoObject->m_structure);
|
---|
649 | #if PLATFORM(X86_64)
|
---|
650 | move(ImmPtr(currStructure), regT3);
|
---|
651 | bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
|
---|
652 | #else
|
---|
653 | bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
|
---|
654 | #endif
|
---|
655 | }
|
---|
656 | ASSERT(protoObject);
|
---|
657 |
|
---|
658 | compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
|
---|
659 | Jump success = jump();
|
---|
660 |
|
---|
661 | void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
|
---|
662 | PatchBuffer patchBuffer(code);
|
---|
663 |
|
---|
664 | // Use the patch information to link the failure cases back to the original slow case routine.
|
---|
665 | CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
|
---|
666 |
|
---|
667 | patchBuffer.link(bucketsOfFail, lastProtoBegin);
|
---|
668 |
|
---|
669 | // On success return back to the hot patch code, at a point it will perform the store to dest for us.
|
---|
670 | patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
|
---|
671 |
|
---|
672 | CodeLocationLabel entryLabel = patchBuffer.entry();
|
---|
673 |
|
---|
674 | // Track the stub we have created so that it will be deleted later.
|
---|
675 | structure->ref();
|
---|
676 | chain->ref();
|
---|
677 | prototypeStructures->list[currentIndex].set(entryLabel, structure, chain);
|
---|
678 |
|
---|
679 | // Finally patch the jump to slow case back in the hot path to jump here instead.
|
---|
680 | CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
|
---|
681 | jumpLocation.relink(entryLabel);
|
---|
682 | }
|
---|
683 |
|
---|
684 | void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, ProcessorReturnAddress returnAddress, CallFrame* callFrame)
|
---|
685 | {
|
---|
686 | // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
|
---|
687 | returnAddress.relinkCallerToFunction(JITStubs::cti_op_get_by_id_proto_list);
|
---|
688 |
|
---|
689 | ASSERT(count);
|
---|
690 |
|
---|
691 | JumpList bucketsOfFail;
|
---|
692 |
|
---|
693 | // Check eax is an object of the right Structure.
|
---|
694 | bucketsOfFail.append(checkStructure(regT0, structure));
|
---|
695 |
|
---|
696 | Structure* currStructure = structure;
|
---|
697 | RefPtr<Structure>* chainEntries = chain->head();
|
---|
698 | JSObject* protoObject = 0;
|
---|
699 | for (unsigned i = 0; i < count; ++i) {
|
---|
700 | protoObject = asObject(currStructure->prototypeForLookup(callFrame));
|
---|
701 | currStructure = chainEntries[i].get();
|
---|
702 |
|
---|
703 | // Check the prototype object's Structure had not changed.
|
---|
704 | Structure** prototypeStructureAddress = &(protoObject->m_structure);
|
---|
705 | #if PLATFORM(X86_64)
|
---|
706 | move(ImmPtr(currStructure), regT3);
|
---|
707 | bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
|
---|
708 | #else
|
---|
709 | bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
|
---|
710 | #endif
|
---|
711 | }
|
---|
712 | ASSERT(protoObject);
|
---|
713 |
|
---|
714 | compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
|
---|
715 | Jump success = jump();
|
---|
716 |
|
---|
717 | void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
|
---|
718 | PatchBuffer patchBuffer(code);
|
---|
719 |
|
---|
720 | // Use the patch information to link the failure cases back to the original slow case routine.
|
---|
721 | patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall));
|
---|
722 |
|
---|
723 | // On success return back to the hot patch code, at a point it will perform the store to dest for us.
|
---|
724 | patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
|
---|
725 |
|
---|
726 | // Track the stub we have created so that it will be deleted later.
|
---|
727 | CodeLocationLabel entryLabel = patchBuffer.entry();
|
---|
728 | stubInfo->stubRoutine = entryLabel;
|
---|
729 |
|
---|
730 | // Finally patch the jump to slow case back in the hot path to jump here instead.
|
---|
731 | CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
|
---|
732 | jumpLocation.relink(entryLabel);
|
---|
733 | }
|
---|
734 |
|
---|
735 | /* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
|
---|
736 |
|
---|
737 | #endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
|
---|
738 |
|
---|
739 | } // namespace JSC
|
---|
740 |
|
---|
741 | #endif // ENABLE(JIT)
|
---|