1 | /*
|
---|
2 | * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
|
---|
3 | *
|
---|
4 | * Redistribution and use in source and binary forms, with or without
|
---|
5 | * modification, are permitted provided that the following conditions
|
---|
6 | * are met:
|
---|
7 | * 1. Redistributions of source code must retain the above copyright
|
---|
8 | * notice, this list of conditions and the following disclaimer.
|
---|
9 | * 2. Redistributions in binary form must reproduce the above copyright
|
---|
10 | * notice, this list of conditions and the following disclaimer in the
|
---|
11 | * documentation and/or other materials provided with the distribution.
|
---|
12 | *
|
---|
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
|
---|
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
---|
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
---|
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
|
---|
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
---|
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
---|
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
---|
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
|
---|
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
---|
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
---|
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
---|
24 | */
|
---|
25 |
|
---|
26 | #include "config.h"
|
---|
27 |
|
---|
28 | #if !USE(JSVALUE32_64)
|
---|
29 |
|
---|
30 | #include "JIT.h"
|
---|
31 |
|
---|
32 | #if ENABLE(JIT)
|
---|
33 |
|
---|
34 | #include "CodeBlock.h"
|
---|
35 | #include "GetterSetter.h"
|
---|
36 | #include "JITInlineMethods.h"
|
---|
37 | #include "JITStubCall.h"
|
---|
38 | #include "JSArray.h"
|
---|
39 | #include "JSFunction.h"
|
---|
40 | #include "JSPropertyNameIterator.h"
|
---|
41 | #include "Interpreter.h"
|
---|
42 | #include "LinkBuffer.h"
|
---|
43 | #include "RepatchBuffer.h"
|
---|
44 | #include "ResultType.h"
|
---|
45 | #include "SamplingTool.h"
|
---|
46 |
|
---|
47 | #ifndef NDEBUG
|
---|
48 | #include <stdio.h>
|
---|
49 | #endif
|
---|
50 |
|
---|
51 | using namespace std;
|
---|
52 |
|
---|
53 | namespace JSC {
|
---|
54 |
|
---|
55 | void JIT::emit_op_get_by_val(Instruction* currentInstruction)
|
---|
56 | {
|
---|
57 | unsigned dst = currentInstruction[1].u.operand;
|
---|
58 | unsigned base = currentInstruction[2].u.operand;
|
---|
59 | unsigned property = currentInstruction[3].u.operand;
|
---|
60 |
|
---|
61 | emitGetVirtualRegisters(base, regT0, property, regT1);
|
---|
62 | emitJumpSlowCaseIfNotImmediateInteger(regT1);
|
---|
63 | #if USE(JSVALUE64)
|
---|
64 | // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter.
|
---|
65 | // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if
|
---|
66 | // number was signed since m_vectorLength is always less than intmax (since the total allocation
|
---|
67 | // size is always less than 4Gb). As such zero extending wil have been correct (and extending the value
|
---|
68 | // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign
|
---|
69 | // extending since it makes it easier to re-tag the value in the slow case.
|
---|
70 | zeroExtend32ToPtr(regT1, regT1);
|
---|
71 | #else
|
---|
72 | emitFastArithImmToInt(regT1);
|
---|
73 | #endif
|
---|
74 | emitJumpSlowCaseIfNotJSCell(regT0, base);
|
---|
75 | addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
|
---|
76 |
|
---|
77 | loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
|
---|
78 | addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength))));
|
---|
79 |
|
---|
80 | loadPtr(BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0);
|
---|
81 | addSlowCase(branchTestPtr(Zero, regT0));
|
---|
82 |
|
---|
83 | emitPutVirtualRegister(dst);
|
---|
84 | }
|
---|
85 |
|
---|
86 | void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID structure, RegisterID offset, RegisterID scratch)
|
---|
87 | {
|
---|
88 | ASSERT(sizeof(((Structure*)0)->m_propertyStorageCapacity) == sizeof(int32_t));
|
---|
89 | ASSERT(sizeof(JSObject::inlineStorageCapacity) == sizeof(int32_t));
|
---|
90 |
|
---|
91 | Jump notUsingInlineStorage = branch32(NotEqual, Address(structure, OBJECT_OFFSETOF(Structure, m_propertyStorageCapacity)), Imm32(JSObject::inlineStorageCapacity));
|
---|
92 | loadPtr(BaseIndex(base, offset, ScalePtr, OBJECT_OFFSETOF(JSObject, m_inlineStorage)), result);
|
---|
93 | Jump finishedLoad = jump();
|
---|
94 | notUsingInlineStorage.link(this);
|
---|
95 | loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), scratch);
|
---|
96 | loadPtr(BaseIndex(scratch, offset, ScalePtr, 0), result);
|
---|
97 | finishedLoad.link(this);
|
---|
98 | }
|
---|
99 |
|
---|
100 | void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
|
---|
101 | {
|
---|
102 | unsigned dst = currentInstruction[1].u.operand;
|
---|
103 | unsigned base = currentInstruction[2].u.operand;
|
---|
104 | unsigned property = currentInstruction[3].u.operand;
|
---|
105 | unsigned expected = currentInstruction[4].u.operand;
|
---|
106 | unsigned iter = currentInstruction[5].u.operand;
|
---|
107 | unsigned i = currentInstruction[6].u.operand;
|
---|
108 |
|
---|
109 | emitGetVirtualRegister(property, regT0);
|
---|
110 | addSlowCase(branchPtr(NotEqual, regT0, addressFor(expected)));
|
---|
111 | emitGetVirtualRegisters(base, regT0, iter, regT1);
|
---|
112 | emitJumpSlowCaseIfNotJSCell(regT0, base);
|
---|
113 |
|
---|
114 | // Test base's structure
|
---|
115 | loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
|
---|
116 | addSlowCase(branchPtr(NotEqual, regT2, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure))));
|
---|
117 | load32(addressFor(i), regT3);
|
---|
118 | sub32(Imm32(1), regT3);
|
---|
119 | addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots))));
|
---|
120 | compileGetDirectOffset(regT0, regT0, regT2, regT3, regT1);
|
---|
121 |
|
---|
122 | emitPutVirtualRegister(dst, regT0);
|
---|
123 | }
|
---|
124 |
|
---|
125 | void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
|
---|
126 | {
|
---|
127 | unsigned dst = currentInstruction[1].u.operand;
|
---|
128 | unsigned base = currentInstruction[2].u.operand;
|
---|
129 | unsigned property = currentInstruction[3].u.operand;
|
---|
130 |
|
---|
131 | linkSlowCase(iter);
|
---|
132 | linkSlowCaseIfNotJSCell(iter, base);
|
---|
133 | linkSlowCase(iter);
|
---|
134 | linkSlowCase(iter);
|
---|
135 |
|
---|
136 | JITStubCall stubCall(this, cti_op_get_by_val);
|
---|
137 | stubCall.addArgument(base, regT2);
|
---|
138 | stubCall.addArgument(property, regT2);
|
---|
139 | stubCall.call(dst);
|
---|
140 | }
|
---|
141 |
|
---|
142 | void JIT::emit_op_put_by_val(Instruction* currentInstruction)
|
---|
143 | {
|
---|
144 | unsigned base = currentInstruction[1].u.operand;
|
---|
145 | unsigned property = currentInstruction[2].u.operand;
|
---|
146 | unsigned value = currentInstruction[3].u.operand;
|
---|
147 |
|
---|
148 | emitGetVirtualRegisters(base, regT0, property, regT1);
|
---|
149 | emitJumpSlowCaseIfNotImmediateInteger(regT1);
|
---|
150 | #if USE(JSVALUE64)
|
---|
151 | // See comment in op_get_by_val.
|
---|
152 | zeroExtend32ToPtr(regT1, regT1);
|
---|
153 | #else
|
---|
154 | emitFastArithImmToInt(regT1);
|
---|
155 | #endif
|
---|
156 | emitJumpSlowCaseIfNotJSCell(regT0, base);
|
---|
157 | addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
|
---|
158 | addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength))));
|
---|
159 |
|
---|
160 | loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
|
---|
161 |
|
---|
162 | Jump empty = branchTestPtr(Zero, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
|
---|
163 |
|
---|
164 | Label storeResult(this);
|
---|
165 | emitGetVirtualRegister(value, regT0);
|
---|
166 | storePtr(regT0, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
|
---|
167 | Jump end = jump();
|
---|
168 |
|
---|
169 | empty.link(this);
|
---|
170 | add32(Imm32(1), Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
|
---|
171 | branch32(Below, regT1, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length))).linkTo(storeResult, this);
|
---|
172 |
|
---|
173 | move(regT1, regT0);
|
---|
174 | add32(Imm32(1), regT0);
|
---|
175 | store32(regT0, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)));
|
---|
176 | jump().linkTo(storeResult, this);
|
---|
177 |
|
---|
178 | end.link(this);
|
---|
179 | }
|
---|
180 |
|
---|
181 | void JIT::emit_op_put_by_index(Instruction* currentInstruction)
|
---|
182 | {
|
---|
183 | JITStubCall stubCall(this, cti_op_put_by_index);
|
---|
184 | stubCall.addArgument(currentInstruction[1].u.operand, regT2);
|
---|
185 | stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
|
---|
186 | stubCall.addArgument(currentInstruction[3].u.operand, regT2);
|
---|
187 | stubCall.call();
|
---|
188 | }
|
---|
189 |
|
---|
190 | void JIT::emit_op_put_getter(Instruction* currentInstruction)
|
---|
191 | {
|
---|
192 | JITStubCall stubCall(this, cti_op_put_getter);
|
---|
193 | stubCall.addArgument(currentInstruction[1].u.operand, regT2);
|
---|
194 | stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
|
---|
195 | stubCall.addArgument(currentInstruction[3].u.operand, regT2);
|
---|
196 | stubCall.call();
|
---|
197 | }
|
---|
198 |
|
---|
199 | void JIT::emit_op_put_setter(Instruction* currentInstruction)
|
---|
200 | {
|
---|
201 | JITStubCall stubCall(this, cti_op_put_setter);
|
---|
202 | stubCall.addArgument(currentInstruction[1].u.operand, regT2);
|
---|
203 | stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
|
---|
204 | stubCall.addArgument(currentInstruction[3].u.operand, regT2);
|
---|
205 | stubCall.call();
|
---|
206 | }
|
---|
207 |
|
---|
208 | void JIT::emit_op_del_by_id(Instruction* currentInstruction)
|
---|
209 | {
|
---|
210 | JITStubCall stubCall(this, cti_op_del_by_id);
|
---|
211 | stubCall.addArgument(currentInstruction[2].u.operand, regT2);
|
---|
212 | stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
|
---|
213 | stubCall.call(currentInstruction[1].u.operand);
|
---|
214 | }
|
---|
215 |
|
---|
216 |
|
---|
217 | #if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
|
---|
218 |
|
---|
219 | /* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
|
---|
220 |
|
---|
221 | // Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
|
---|
222 | void JIT::emit_op_method_check(Instruction*) {}
|
---|
223 | void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
|
---|
224 | #if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
|
---|
225 | #error "JIT_OPTIMIZE_METHOD_CALLS requires JIT_OPTIMIZE_PROPERTY_ACCESS"
|
---|
226 | #endif
|
---|
227 |
|
---|
228 | void JIT::emit_op_get_by_id(Instruction* currentInstruction)
|
---|
229 | {
|
---|
230 | unsigned resultVReg = currentInstruction[1].u.operand;
|
---|
231 | unsigned baseVReg = currentInstruction[2].u.operand;
|
---|
232 | Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
|
---|
233 |
|
---|
234 | emitGetVirtualRegister(baseVReg, regT0);
|
---|
235 | JITStubCall stubCall(this, cti_op_get_by_id_generic);
|
---|
236 | stubCall.addArgument(regT0);
|
---|
237 | stubCall.addArgument(ImmPtr(ident));
|
---|
238 | stubCall.call(resultVReg);
|
---|
239 |
|
---|
240 | m_propertyAccessInstructionIndex++;
|
---|
241 | }
|
---|
242 |
|
---|
243 | void JIT::emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
|
---|
244 | {
|
---|
245 | ASSERT_NOT_REACHED();
|
---|
246 | }
|
---|
247 |
|
---|
248 | void JIT::emit_op_put_by_id(Instruction* currentInstruction)
|
---|
249 | {
|
---|
250 | unsigned baseVReg = currentInstruction[1].u.operand;
|
---|
251 | Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
|
---|
252 | unsigned valueVReg = currentInstruction[3].u.operand;
|
---|
253 |
|
---|
254 | emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1);
|
---|
255 |
|
---|
256 | JITStubCall stubCall(this, cti_op_put_by_id_generic);
|
---|
257 | stubCall.addArgument(regT0);
|
---|
258 | stubCall.addArgument(ImmPtr(ident));
|
---|
259 | stubCall.addArgument(regT1);
|
---|
260 | stubCall.call();
|
---|
261 |
|
---|
262 | m_propertyAccessInstructionIndex++;
|
---|
263 | }
|
---|
264 |
|
---|
265 | void JIT::emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
|
---|
266 | {
|
---|
267 | ASSERT_NOT_REACHED();
|
---|
268 | }
|
---|
269 |
|
---|
270 | #else // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
|
---|
271 |
|
---|
272 | /* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
|
---|
273 |
|
---|
274 | #if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
|
---|
275 |
|
---|
276 | void JIT::emit_op_method_check(Instruction* currentInstruction)
|
---|
277 | {
|
---|
278 | // Assert that the following instruction is a get_by_id.
|
---|
279 | ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id);
|
---|
280 |
|
---|
281 | currentInstruction += OPCODE_LENGTH(op_method_check);
|
---|
282 | unsigned resultVReg = currentInstruction[1].u.operand;
|
---|
283 | unsigned baseVReg = currentInstruction[2].u.operand;
|
---|
284 | Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
|
---|
285 |
|
---|
286 | emitGetVirtualRegister(baseVReg, regT0);
|
---|
287 |
|
---|
288 | // Do the method check - check the object & its prototype's structure inline (this is the common case).
|
---|
289 | m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_propertyAccessInstructionIndex));
|
---|
290 | MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last();
|
---|
291 |
|
---|
292 | Jump notCell = emitJumpIfNotJSCell(regT0);
|
---|
293 |
|
---|
294 | BEGIN_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
|
---|
295 |
|
---|
296 | Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), info.structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
|
---|
297 | DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(ImmPtr(0), regT1);
|
---|
298 | Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT1, OBJECT_OFFSETOF(JSCell, m_structure)), protoStructureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
|
---|
299 |
|
---|
300 | // This will be relinked to load the function without doing a load.
|
---|
301 | DataLabelPtr putFunction = moveWithPatch(ImmPtr(0), regT0);
|
---|
302 |
|
---|
303 | END_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
|
---|
304 |
|
---|
305 | Jump match = jump();
|
---|
306 |
|
---|
307 | ASSERT_JIT_OFFSET(differenceBetween(info.structureToCompare, protoObj), patchOffsetMethodCheckProtoObj);
|
---|
308 | ASSERT_JIT_OFFSET(differenceBetween(info.structureToCompare, protoStructureToCompare), patchOffsetMethodCheckProtoStruct);
|
---|
309 | ASSERT_JIT_OFFSET(differenceBetween(info.structureToCompare, putFunction), patchOffsetMethodCheckPutFunction);
|
---|
310 |
|
---|
311 | // Link the failure cases here.
|
---|
312 | notCell.link(this);
|
---|
313 | structureCheck.link(this);
|
---|
314 | protoStructureCheck.link(this);
|
---|
315 |
|
---|
316 | // Do a regular(ish) get_by_id (the slow case will be link to
|
---|
317 | // cti_op_get_by_id_method_check instead of cti_op_get_by_id.
|
---|
318 | compileGetByIdHotPath(resultVReg, baseVReg, ident, m_propertyAccessInstructionIndex++);
|
---|
319 |
|
---|
320 | match.link(this);
|
---|
321 | emitPutVirtualRegister(resultVReg);
|
---|
322 |
|
---|
323 | // We've already generated the following get_by_id, so make sure it's skipped over.
|
---|
324 | m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id);
|
---|
325 | }
|
---|
326 |
|
---|
327 | void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
|
---|
328 | {
|
---|
329 | currentInstruction += OPCODE_LENGTH(op_method_check);
|
---|
330 | unsigned resultVReg = currentInstruction[1].u.operand;
|
---|
331 | unsigned baseVReg = currentInstruction[2].u.operand;
|
---|
332 | Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
|
---|
333 |
|
---|
334 | compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, true);
|
---|
335 |
|
---|
336 | // We've already generated the following get_by_id, so make sure it's skipped over.
|
---|
337 | m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id);
|
---|
338 | }
|
---|
339 |
|
---|
340 | #else //!ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
|
---|
341 |
|
---|
342 | // Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
|
---|
343 | void JIT::emit_op_method_check(Instruction*) {}
|
---|
344 | void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
|
---|
345 |
|
---|
346 | #endif
|
---|
347 |
|
---|
348 | void JIT::emit_op_get_by_id(Instruction* currentInstruction)
|
---|
349 | {
|
---|
350 | unsigned resultVReg = currentInstruction[1].u.operand;
|
---|
351 | unsigned baseVReg = currentInstruction[2].u.operand;
|
---|
352 | Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
|
---|
353 |
|
---|
354 | emitGetVirtualRegister(baseVReg, regT0);
|
---|
355 | compileGetByIdHotPath(resultVReg, baseVReg, ident, m_propertyAccessInstructionIndex++);
|
---|
356 | emitPutVirtualRegister(resultVReg);
|
---|
357 | }
|
---|
358 |
|
---|
359 | void JIT::compileGetByIdHotPath(int, int baseVReg, Identifier*, unsigned propertyAccessInstructionIndex)
|
---|
360 | {
|
---|
361 | // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
|
---|
362 | // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
|
---|
363 | // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
|
---|
364 | // to jump back to if one of these trampolies finds a match.
|
---|
365 |
|
---|
366 | emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
|
---|
367 |
|
---|
368 | BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
|
---|
369 |
|
---|
370 | Label hotPathBegin(this);
|
---|
371 | m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
|
---|
372 |
|
---|
373 | DataLabelPtr structureToCompare;
|
---|
374 | Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
|
---|
375 | addSlowCase(structureCheck);
|
---|
376 | ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetGetByIdStructure);
|
---|
377 | ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureCheck), patchOffsetGetByIdBranchToSlowCase)
|
---|
378 |
|
---|
379 | Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT0);
|
---|
380 | Label externalLoadComplete(this);
|
---|
381 | ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, externalLoad), patchOffsetGetByIdExternalLoad);
|
---|
382 | ASSERT_JIT_OFFSET(differenceBetween(externalLoad, externalLoadComplete), patchLengthGetByIdExternalLoad);
|
---|
383 |
|
---|
384 | DataLabel32 displacementLabel = loadPtrWithAddressOffsetPatch(Address(regT0, patchGetByIdDefaultOffset), regT0);
|
---|
385 | ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, displacementLabel), patchOffsetGetByIdPropertyMapOffset);
|
---|
386 |
|
---|
387 | Label putResult(this);
|
---|
388 |
|
---|
389 | END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
|
---|
390 |
|
---|
391 | ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, putResult), patchOffsetGetByIdPutResult);
|
---|
392 | }
|
---|
393 |
|
---|
394 | void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
|
---|
395 | {
|
---|
396 | unsigned resultVReg = currentInstruction[1].u.operand;
|
---|
397 | unsigned baseVReg = currentInstruction[2].u.operand;
|
---|
398 | Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
|
---|
399 |
|
---|
400 | compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, false);
|
---|
401 | }
|
---|
402 |
|
---|
403 | void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck)
|
---|
404 | {
|
---|
405 | // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
|
---|
406 | // so that we only need track one pointer into the slow case code - we track a pointer to the location
|
---|
407 | // of the call (which we can use to look up the patch information), but should a array-length or
|
---|
408 | // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back
|
---|
409 | // the distance from the call to the head of the slow case.
|
---|
410 |
|
---|
411 | linkSlowCaseIfNotJSCell(iter, baseVReg);
|
---|
412 | linkSlowCase(iter);
|
---|
413 |
|
---|
414 | BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
|
---|
415 |
|
---|
416 | #ifndef NDEBUG
|
---|
417 | Label coldPathBegin(this);
|
---|
418 | #endif
|
---|
419 | JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id);
|
---|
420 | stubCall.addArgument(regT0);
|
---|
421 | stubCall.addArgument(ImmPtr(ident));
|
---|
422 | Call call = stubCall.call(resultVReg);
|
---|
423 |
|
---|
424 | END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
|
---|
425 |
|
---|
426 | ASSERT_JIT_OFFSET(differenceBetween(coldPathBegin, call), patchOffsetGetByIdSlowCaseCall);
|
---|
427 |
|
---|
428 | // Track the location of the call; this will be used to recover patch information.
|
---|
429 | m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
|
---|
430 | m_propertyAccessInstructionIndex++;
|
---|
431 | }
|
---|
432 |
|
---|
433 | void JIT::emit_op_put_by_id(Instruction* currentInstruction)
|
---|
434 | {
|
---|
435 | unsigned baseVReg = currentInstruction[1].u.operand;
|
---|
436 | unsigned valueVReg = currentInstruction[3].u.operand;
|
---|
437 |
|
---|
438 | unsigned propertyAccessInstructionIndex = m_propertyAccessInstructionIndex++;
|
---|
439 |
|
---|
440 | // In order to be able to patch both the Structure, and the object offset, we store one pointer,
|
---|
441 | // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
|
---|
442 | // such that the Structure & offset are always at the same distance from this.
|
---|
443 |
|
---|
444 | emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1);
|
---|
445 |
|
---|
446 | // Jump to a slow case if either the base object is an immediate, or if the Structure does not match.
|
---|
447 | emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
|
---|
448 |
|
---|
449 | BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById);
|
---|
450 |
|
---|
451 | Label hotPathBegin(this);
|
---|
452 | m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
|
---|
453 |
|
---|
454 | // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
|
---|
455 | DataLabelPtr structureToCompare;
|
---|
456 | addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
|
---|
457 | ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetPutByIdStructure);
|
---|
458 |
|
---|
459 | // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
|
---|
460 | Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT0);
|
---|
461 | Label externalLoadComplete(this);
|
---|
462 | ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, externalLoad), patchOffsetPutByIdExternalLoad);
|
---|
463 | ASSERT_JIT_OFFSET(differenceBetween(externalLoad, externalLoadComplete), patchLengthPutByIdExternalLoad);
|
---|
464 |
|
---|
465 | DataLabel32 displacementLabel = storePtrWithAddressOffsetPatch(regT1, Address(regT0, patchGetByIdDefaultOffset));
|
---|
466 |
|
---|
467 | END_UNINTERRUPTED_SEQUENCE(sequencePutById);
|
---|
468 |
|
---|
469 | ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, displacementLabel), patchOffsetPutByIdPropertyMapOffset);
|
---|
470 | }
|
---|
471 |
|
---|
472 | void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
|
---|
473 | {
|
---|
474 | unsigned baseVReg = currentInstruction[1].u.operand;
|
---|
475 | Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
|
---|
476 |
|
---|
477 | unsigned propertyAccessInstructionIndex = m_propertyAccessInstructionIndex++;
|
---|
478 |
|
---|
479 | linkSlowCaseIfNotJSCell(iter, baseVReg);
|
---|
480 | linkSlowCase(iter);
|
---|
481 |
|
---|
482 | JITStubCall stubCall(this, cti_op_put_by_id);
|
---|
483 | stubCall.addArgument(regT0);
|
---|
484 | stubCall.addArgument(ImmPtr(ident));
|
---|
485 | stubCall.addArgument(regT1);
|
---|
486 | Call call = stubCall.call();
|
---|
487 |
|
---|
488 | // Track the location of the call; this will be used to recover patch information.
|
---|
489 | m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call;
|
---|
490 | }
|
---|
491 |
|
---|
492 | // Compile a store into an object's property storage. May overwrite the
|
---|
493 | // value in objectReg.
|
---|
494 | void JIT::compilePutDirectOffset(RegisterID base, RegisterID value, Structure* structure, size_t cachedOffset)
|
---|
495 | {
|
---|
496 | int offset = cachedOffset * sizeof(JSValue);
|
---|
497 | if (structure->isUsingInlineStorage())
|
---|
498 | offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage);
|
---|
499 | else
|
---|
500 | loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
|
---|
501 | storePtr(value, Address(base, offset));
|
---|
502 | }
|
---|
503 |
|
---|
504 | // Compile a load from an object's property storage. May overwrite base.
|
---|
505 | void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, Structure* structure, size_t cachedOffset)
|
---|
506 | {
|
---|
507 | int offset = cachedOffset * sizeof(JSValue);
|
---|
508 | if (structure->isUsingInlineStorage())
|
---|
509 | offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage);
|
---|
510 | else
|
---|
511 | loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
|
---|
512 | loadPtr(Address(base, offset), result);
|
---|
513 | }
|
---|
514 |
|
---|
515 | void JIT::compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID result, size_t cachedOffset)
|
---|
516 | {
|
---|
517 | if (base->isUsingInlineStorage())
|
---|
518 | loadPtr(static_cast<void*>(&base->m_inlineStorage[cachedOffset]), result);
|
---|
519 | else {
|
---|
520 | PropertyStorage* protoPropertyStorage = &base->m_externalStorage;
|
---|
521 | loadPtr(static_cast<void*>(protoPropertyStorage), temp);
|
---|
522 | loadPtr(Address(temp, cachedOffset * sizeof(JSValue)), result);
|
---|
523 | }
|
---|
524 | }
|
---|
525 |
|
---|
526 | void JIT::testPrototype(Structure* structure, JumpList& failureCases)
|
---|
527 | {
|
---|
528 | if (structure->m_prototype.isNull())
|
---|
529 | return;
|
---|
530 |
|
---|
531 | move(ImmPtr(&asCell(structure->m_prototype)->m_structure), regT2);
|
---|
532 | move(ImmPtr(asCell(structure->m_prototype)->m_structure), regT3);
|
---|
533 | failureCases.append(branchPtr(NotEqual, Address(regT2), regT3));
|
---|
534 | }
|
---|
535 |
|
---|
536 | void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress)
|
---|
537 | {
|
---|
538 | JumpList failureCases;
|
---|
539 | // Check eax is an object of the right Structure.
|
---|
540 | failureCases.append(emitJumpIfNotJSCell(regT0));
|
---|
541 | failureCases.append(branchPtr(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), ImmPtr(oldStructure)));
|
---|
542 | testPrototype(oldStructure, failureCases);
|
---|
543 |
|
---|
544 | // ecx = baseObject->m_structure
|
---|
545 | for (RefPtr<Structure>* it = chain->head(); *it; ++it)
|
---|
546 | testPrototype(it->get(), failureCases);
|
---|
547 |
|
---|
548 | Call callTarget;
|
---|
549 |
|
---|
550 | // emit a call only if storage realloc is needed
|
---|
551 | bool willNeedStorageRealloc = oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity();
|
---|
552 | if (willNeedStorageRealloc) {
|
---|
553 | // This trampoline was called to like a JIT stub; before we can can call again we need to
|
---|
554 | // remove the return address from the stack, to prevent the stack from becoming misaligned.
|
---|
555 | preserveReturnAddressAfterCall(regT3);
|
---|
556 |
|
---|
557 | JITStubCall stubCall(this, cti_op_put_by_id_transition_realloc);
|
---|
558 | stubCall.skipArgument(); // base
|
---|
559 | stubCall.skipArgument(); // ident
|
---|
560 | stubCall.skipArgument(); // value
|
---|
561 | stubCall.addArgument(Imm32(oldStructure->propertyStorageCapacity()));
|
---|
562 | stubCall.addArgument(Imm32(newStructure->propertyStorageCapacity()));
|
---|
563 | stubCall.call(regT0);
|
---|
564 | emitGetJITStubArg(2, regT1);
|
---|
565 |
|
---|
566 | restoreReturnAddressBeforeReturn(regT3);
|
---|
567 | }
|
---|
568 |
|
---|
569 | // Assumes m_refCount can be decremented easily, refcount decrement is safe as
|
---|
570 | // codeblock should ensure oldStructure->m_refCount > 0
|
---|
571 | sub32(Imm32(1), AbsoluteAddress(oldStructure->addressOfCount()));
|
---|
572 | add32(Imm32(1), AbsoluteAddress(newStructure->addressOfCount()));
|
---|
573 | storePtr(ImmPtr(newStructure), Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)));
|
---|
574 |
|
---|
575 | // write the value
|
---|
576 | compilePutDirectOffset(regT0, regT1, newStructure, cachedOffset);
|
---|
577 |
|
---|
578 | ret();
|
---|
579 |
|
---|
580 | ASSERT(!failureCases.empty());
|
---|
581 | failureCases.link(this);
|
---|
582 | restoreArgumentReferenceForTrampoline();
|
---|
583 | Call failureCall = tailRecursiveCall();
|
---|
584 |
|
---|
585 | LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
|
---|
586 |
|
---|
587 | patchBuffer.link(failureCall, FunctionPtr(cti_op_put_by_id_fail));
|
---|
588 |
|
---|
589 | if (willNeedStorageRealloc) {
|
---|
590 | ASSERT(m_calls.size() == 1);
|
---|
591 | patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc));
|
---|
592 | }
|
---|
593 |
|
---|
594 | CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
|
---|
595 | stubInfo->stubRoutine = entryLabel;
|
---|
596 | RepatchBuffer repatchBuffer(m_codeBlock);
|
---|
597 | repatchBuffer.relinkCallerToTrampoline(returnAddress, entryLabel);
|
---|
598 | }
|
---|
599 |
|
---|
600 | void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
|
---|
601 | {
|
---|
602 | RepatchBuffer repatchBuffer(codeBlock);
|
---|
603 |
|
---|
604 | // We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
|
---|
605 | // Should probably go to cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
|
---|
606 | repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_self_fail));
|
---|
607 |
|
---|
608 | int offset = sizeof(JSValue) * cachedOffset;
|
---|
609 |
|
---|
610 | // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
|
---|
611 | // and makes the subsequent load's offset automatically correct
|
---|
612 | if (structure->isUsingInlineStorage())
|
---|
613 | repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetGetByIdExternalLoad));
|
---|
614 |
|
---|
615 | // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
|
---|
616 | repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure), structure);
|
---|
617 | repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset), offset);
|
---|
618 | }
|
---|
619 |
|
---|
620 | void JIT::patchMethodCallProto(CodeBlock* codeBlock, MethodCallLinkInfo& methodCallLinkInfo, JSFunction* callee, Structure* structure, JSObject* proto, ReturnAddressPtr returnAddress)
|
---|
621 | {
|
---|
622 | RepatchBuffer repatchBuffer(codeBlock);
|
---|
623 |
|
---|
624 | ASSERT(!methodCallLinkInfo.cachedStructure);
|
---|
625 | methodCallLinkInfo.cachedStructure = structure;
|
---|
626 | structure->ref();
|
---|
627 |
|
---|
628 | Structure* prototypeStructure = proto->structure();
|
---|
629 | methodCallLinkInfo.cachedPrototypeStructure = prototypeStructure;
|
---|
630 | prototypeStructure->ref();
|
---|
631 |
|
---|
632 | repatchBuffer.repatch(methodCallLinkInfo.structureLabel, structure);
|
---|
633 | repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoObj), proto);
|
---|
634 | repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoStruct), prototypeStructure);
|
---|
635 | repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckPutFunction), callee);
|
---|
636 |
|
---|
637 | repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id));
|
---|
638 | }
|
---|
639 |
|
---|
640 | void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
|
---|
641 | {
|
---|
642 | RepatchBuffer repatchBuffer(codeBlock);
|
---|
643 |
|
---|
644 | // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
|
---|
645 | // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
|
---|
646 | repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_put_by_id_generic));
|
---|
647 |
|
---|
648 | int offset = sizeof(JSValue) * cachedOffset;
|
---|
649 |
|
---|
650 | // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
|
---|
651 | // and makes the subsequent load's offset automatically correct
|
---|
652 | if (structure->isUsingInlineStorage())
|
---|
653 | repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetPutByIdExternalLoad));
|
---|
654 |
|
---|
655 | // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
|
---|
656 | repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure), structure);
|
---|
657 | repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset), offset);
|
---|
658 | }
|
---|
659 |
|
---|
660 | void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
|
---|
661 | {
|
---|
662 | StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress);
|
---|
663 |
|
---|
664 | // Check eax is an array
|
---|
665 | Jump failureCases1 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr));
|
---|
666 |
|
---|
667 | // Checks out okay! - get the length from the storage
|
---|
668 | loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
|
---|
669 | load32(Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT2);
|
---|
670 |
|
---|
671 | Jump failureCases2 = branch32(Above, regT2, Imm32(JSImmediate::maxImmediateInt));
|
---|
672 |
|
---|
673 | emitFastArithIntToImmNoCheck(regT2, regT0);
|
---|
674 | Jump success = jump();
|
---|
675 |
|
---|
676 | LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
|
---|
677 |
|
---|
678 | // Use the patch information to link the failure cases back to the original slow case routine.
|
---|
679 | CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
|
---|
680 | patchBuffer.link(failureCases1, slowCaseBegin);
|
---|
681 | patchBuffer.link(failureCases2, slowCaseBegin);
|
---|
682 |
|
---|
683 | // On success return back to the hot patch code, at a point it will perform the store to dest for us.
|
---|
684 | patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
|
---|
685 |
|
---|
686 | // Track the stub we have created so that it will be deleted later.
|
---|
687 | CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
|
---|
688 | stubInfo->stubRoutine = entryLabel;
|
---|
689 |
|
---|
690 | // Finally patch the jump to slow case back in the hot path to jump here instead.
|
---|
691 | CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
|
---|
692 | RepatchBuffer repatchBuffer(m_codeBlock);
|
---|
693 | repatchBuffer.relink(jumpLocation, entryLabel);
|
---|
694 |
|
---|
695 | // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
|
---|
696 | repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail));
|
---|
697 | }
|
---|
698 |
|
---|
699 | void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, bool isGetter, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
|
---|
700 | {
|
---|
701 | // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
|
---|
702 | // referencing the prototype object - let's speculatively load it's table nice and early!)
|
---|
703 | JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
|
---|
704 |
|
---|
705 | // Check eax is an object of the right Structure.
|
---|
706 | Jump failureCases1 = checkStructure(regT0, structure);
|
---|
707 |
|
---|
708 | // Check the prototype object's Structure had not changed.
|
---|
709 | Structure** prototypeStructureAddress = &(protoObject->m_structure);
|
---|
710 | #if CPU(X86_64)
|
---|
711 | move(ImmPtr(prototypeStructure), regT3);
|
---|
712 | Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
|
---|
713 | #else
|
---|
714 | Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
|
---|
715 | #endif
|
---|
716 |
|
---|
717 | // Checks out okay!
|
---|
718 | if (isGetter) {
|
---|
719 | compileGetDirectOffset(protoObject, regT1, regT1, cachedOffset);
|
---|
720 | JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
|
---|
721 | stubCall.addArgument(regT1);
|
---|
722 | stubCall.addArgument(regT0);
|
---|
723 | stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
|
---|
724 | stubCall.call();
|
---|
725 | } else
|
---|
726 | compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
|
---|
727 | Jump success = jump();
|
---|
728 | LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
|
---|
729 |
|
---|
730 | // Use the patch information to link the failure cases back to the original slow case routine.
|
---|
731 | CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
|
---|
732 | patchBuffer.link(failureCases1, slowCaseBegin);
|
---|
733 | patchBuffer.link(failureCases2, slowCaseBegin);
|
---|
734 |
|
---|
735 | // On success return back to the hot patch code, at a point it will perform the store to dest for us.
|
---|
736 | patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
|
---|
737 |
|
---|
738 | if (isGetter) {
|
---|
739 | for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
|
---|
740 | if (iter->to)
|
---|
741 | patchBuffer.link(iter->from, FunctionPtr(iter->to));
|
---|
742 | }
|
---|
743 | }
|
---|
744 | // Track the stub we have created so that it will be deleted later.
|
---|
745 | CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
|
---|
746 | stubInfo->stubRoutine = entryLabel;
|
---|
747 |
|
---|
748 | // Finally patch the jump to slow case back in the hot path to jump here instead.
|
---|
749 | CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
|
---|
750 | RepatchBuffer repatchBuffer(m_codeBlock);
|
---|
751 | repatchBuffer.relink(jumpLocation, entryLabel);
|
---|
752 |
|
---|
753 | // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
|
---|
754 | repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
|
---|
755 | }
|
---|
756 |
|
---|
757 | void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, bool isGetter, size_t cachedOffset)
|
---|
758 | {
|
---|
759 | Jump failureCase = checkStructure(regT0, structure);
|
---|
760 | if (isGetter) {
|
---|
761 | if (!structure->isUsingInlineStorage()) {
|
---|
762 | move(regT0, regT1);
|
---|
763 | compileGetDirectOffset(regT1, regT1, structure, cachedOffset);
|
---|
764 | } else
|
---|
765 | compileGetDirectOffset(regT0, regT1, structure, cachedOffset);
|
---|
766 | JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
|
---|
767 | stubCall.addArgument(regT1);
|
---|
768 | stubCall.addArgument(regT0);
|
---|
769 | stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
|
---|
770 | stubCall.call();
|
---|
771 | } else
|
---|
772 | compileGetDirectOffset(regT0, regT0, structure, cachedOffset);
|
---|
773 | Jump success = jump();
|
---|
774 |
|
---|
775 | LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
|
---|
776 |
|
---|
777 | if (isGetter) {
|
---|
778 | for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
|
---|
779 | if (iter->to)
|
---|
780 | patchBuffer.link(iter->from, FunctionPtr(iter->to));
|
---|
781 | }
|
---|
782 | }
|
---|
783 |
|
---|
784 | // Use the patch information to link the failure cases back to the original slow case routine.
|
---|
785 | CodeLocationLabel lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine;
|
---|
786 | if (!lastProtoBegin)
|
---|
787 | lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
|
---|
788 |
|
---|
789 | patchBuffer.link(failureCase, lastProtoBegin);
|
---|
790 |
|
---|
791 | // On success return back to the hot patch code, at a point it will perform the store to dest for us.
|
---|
792 | patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
|
---|
793 |
|
---|
794 | CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
|
---|
795 |
|
---|
796 | structure->ref();
|
---|
797 | polymorphicStructures->list[currentIndex].set(entryLabel, structure);
|
---|
798 |
|
---|
799 | // Finally patch the jump to slow case back in the hot path to jump here instead.
|
---|
800 | CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
|
---|
801 | RepatchBuffer repatchBuffer(m_codeBlock);
|
---|
802 | repatchBuffer.relink(jumpLocation, entryLabel);
|
---|
803 | }
|
---|
804 |
|
---|
805 | void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, bool isGetter, size_t cachedOffset, CallFrame* callFrame)
|
---|
806 | {
|
---|
807 | // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
|
---|
808 | // referencing the prototype object - let's speculatively load it's table nice and early!)
|
---|
809 | JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
|
---|
810 |
|
---|
811 | // Check eax is an object of the right Structure.
|
---|
812 | Jump failureCases1 = checkStructure(regT0, structure);
|
---|
813 |
|
---|
814 | // Check the prototype object's Structure had not changed.
|
---|
815 | Structure** prototypeStructureAddress = &(protoObject->m_structure);
|
---|
816 | #if CPU(X86_64)
|
---|
817 | move(ImmPtr(prototypeStructure), regT3);
|
---|
818 | Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
|
---|
819 | #else
|
---|
820 | Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
|
---|
821 | #endif
|
---|
822 |
|
---|
823 | // Checks out okay!
|
---|
824 | if (isGetter) {
|
---|
825 | compileGetDirectOffset(protoObject, regT1, regT1, cachedOffset);
|
---|
826 | JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
|
---|
827 | stubCall.addArgument(regT1);
|
---|
828 | stubCall.addArgument(regT0);
|
---|
829 | stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
|
---|
830 | stubCall.call();
|
---|
831 | } else
|
---|
832 | compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
|
---|
833 |
|
---|
834 | Jump success = jump();
|
---|
835 |
|
---|
836 | LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
|
---|
837 |
|
---|
838 | if (isGetter) {
|
---|
839 | for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
|
---|
840 | if (iter->to)
|
---|
841 | patchBuffer.link(iter->from, FunctionPtr(iter->to));
|
---|
842 | }
|
---|
843 | }
|
---|
844 |
|
---|
845 | // Use the patch information to link the failure cases back to the original slow case routine.
|
---|
846 | CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
|
---|
847 | patchBuffer.link(failureCases1, lastProtoBegin);
|
---|
848 | patchBuffer.link(failureCases2, lastProtoBegin);
|
---|
849 |
|
---|
850 | // On success return back to the hot patch code, at a point it will perform the store to dest for us.
|
---|
851 | patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
|
---|
852 |
|
---|
853 | CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
|
---|
854 |
|
---|
855 | structure->ref();
|
---|
856 | prototypeStructure->ref();
|
---|
857 | prototypeStructures->list[currentIndex].set(entryLabel, structure, prototypeStructure);
|
---|
858 |
|
---|
859 | // Finally patch the jump to slow case back in the hot path to jump here instead.
|
---|
860 | CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
|
---|
861 | RepatchBuffer repatchBuffer(m_codeBlock);
|
---|
862 | repatchBuffer.relink(jumpLocation, entryLabel);
|
---|
863 | }
|
---|
864 |
|
---|
865 | void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, bool isGetter, size_t cachedOffset, CallFrame* callFrame)
|
---|
866 | {
|
---|
867 | ASSERT(count);
|
---|
868 | JumpList bucketsOfFail;
|
---|
869 |
|
---|
870 | // Check eax is an object of the right Structure.
|
---|
871 | Jump baseObjectCheck = checkStructure(regT0, structure);
|
---|
872 | bucketsOfFail.append(baseObjectCheck);
|
---|
873 |
|
---|
874 | Structure* currStructure = structure;
|
---|
875 | RefPtr<Structure>* chainEntries = chain->head();
|
---|
876 | JSObject* protoObject = 0;
|
---|
877 | for (unsigned i = 0; i < count; ++i) {
|
---|
878 | protoObject = asObject(currStructure->prototypeForLookup(callFrame));
|
---|
879 | currStructure = chainEntries[i].get();
|
---|
880 |
|
---|
881 | // Check the prototype object's Structure had not changed.
|
---|
882 | Structure** prototypeStructureAddress = &(protoObject->m_structure);
|
---|
883 | #if CPU(X86_64)
|
---|
884 | move(ImmPtr(currStructure), regT3);
|
---|
885 | bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
|
---|
886 | #else
|
---|
887 | bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
|
---|
888 | #endif
|
---|
889 | }
|
---|
890 | ASSERT(protoObject);
|
---|
891 |
|
---|
892 | if (isGetter) {
|
---|
893 | compileGetDirectOffset(protoObject, regT1, regT1, cachedOffset);
|
---|
894 | JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
|
---|
895 | stubCall.addArgument(regT1);
|
---|
896 | stubCall.addArgument(regT0);
|
---|
897 | stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
|
---|
898 | stubCall.call();
|
---|
899 | } else
|
---|
900 | compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
|
---|
901 | Jump success = jump();
|
---|
902 |
|
---|
903 | LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
|
---|
904 |
|
---|
905 | if (isGetter) {
|
---|
906 | for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
|
---|
907 | if (iter->to)
|
---|
908 | patchBuffer.link(iter->from, FunctionPtr(iter->to));
|
---|
909 | }
|
---|
910 | }
|
---|
911 |
|
---|
912 | // Use the patch information to link the failure cases back to the original slow case routine.
|
---|
913 | CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
|
---|
914 |
|
---|
915 | patchBuffer.link(bucketsOfFail, lastProtoBegin);
|
---|
916 |
|
---|
917 | // On success return back to the hot patch code, at a point it will perform the store to dest for us.
|
---|
918 | patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
|
---|
919 |
|
---|
920 | CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
|
---|
921 |
|
---|
922 | // Track the stub we have created so that it will be deleted later.
|
---|
923 | structure->ref();
|
---|
924 | chain->ref();
|
---|
925 | prototypeStructures->list[currentIndex].set(entryLabel, structure, chain);
|
---|
926 |
|
---|
927 | // Finally patch the jump to slow case back in the hot path to jump here instead.
|
---|
928 | CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
|
---|
929 | RepatchBuffer repatchBuffer(m_codeBlock);
|
---|
930 | repatchBuffer.relink(jumpLocation, entryLabel);
|
---|
931 | }
|
---|
932 |
|
---|
933 | void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, bool isGetter, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
|
---|
934 | {
|
---|
935 | ASSERT(count);
|
---|
936 |
|
---|
937 | JumpList bucketsOfFail;
|
---|
938 |
|
---|
939 | // Check eax is an object of the right Structure.
|
---|
940 | bucketsOfFail.append(checkStructure(regT0, structure));
|
---|
941 |
|
---|
942 | Structure* currStructure = structure;
|
---|
943 | RefPtr<Structure>* chainEntries = chain->head();
|
---|
944 | JSObject* protoObject = 0;
|
---|
945 | for (unsigned i = 0; i < count; ++i) {
|
---|
946 | protoObject = asObject(currStructure->prototypeForLookup(callFrame));
|
---|
947 | currStructure = chainEntries[i].get();
|
---|
948 |
|
---|
949 | // Check the prototype object's Structure had not changed.
|
---|
950 | Structure** prototypeStructureAddress = &(protoObject->m_structure);
|
---|
951 | #if CPU(X86_64)
|
---|
952 | move(ImmPtr(currStructure), regT3);
|
---|
953 | bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
|
---|
954 | #else
|
---|
955 | bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
|
---|
956 | #endif
|
---|
957 | }
|
---|
958 | ASSERT(protoObject);
|
---|
959 |
|
---|
960 | if (isGetter) {
|
---|
961 | compileGetDirectOffset(protoObject, regT1, regT1, cachedOffset);
|
---|
962 | JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
|
---|
963 | stubCall.addArgument(regT1);
|
---|
964 | stubCall.addArgument(regT0);
|
---|
965 | stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
|
---|
966 | stubCall.call();
|
---|
967 | } else
|
---|
968 | compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
|
---|
969 | Jump success = jump();
|
---|
970 |
|
---|
971 | LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
|
---|
972 |
|
---|
973 | if (isGetter) {
|
---|
974 | for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
|
---|
975 | if (iter->to)
|
---|
976 | patchBuffer.link(iter->from, FunctionPtr(iter->to));
|
---|
977 | }
|
---|
978 | }
|
---|
979 |
|
---|
980 | // Use the patch information to link the failure cases back to the original slow case routine.
|
---|
981 | patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall));
|
---|
982 |
|
---|
983 | // On success return back to the hot patch code, at a point it will perform the store to dest for us.
|
---|
984 | patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
|
---|
985 |
|
---|
986 | // Track the stub we have created so that it will be deleted later.
|
---|
987 | CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
|
---|
988 | stubInfo->stubRoutine = entryLabel;
|
---|
989 |
|
---|
990 | // Finally patch the jump to slow case back in the hot path to jump here instead.
|
---|
991 | CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
|
---|
992 | RepatchBuffer repatchBuffer(m_codeBlock);
|
---|
993 | repatchBuffer.relink(jumpLocation, entryLabel);
|
---|
994 |
|
---|
995 | // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
|
---|
996 | repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
|
---|
997 | }
|
---|
998 |
|
---|
999 | /* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
|
---|
1000 |
|
---|
1001 | #endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
|
---|
1002 |
|
---|
1003 | } // namespace JSC
|
---|
1004 |
|
---|
1005 | #endif // ENABLE(JIT)
|
---|
1006 |
|
---|
1007 | #endif // !USE(JSVALUE32_64)
|
---|