1 | /*
|
---|
2 | * Copyright (C) 2009-2022 Apple Inc. All rights reserved.
|
---|
3 | * Copyright (C) 2010 Patrick Gansterer <[email protected]>
|
---|
4 | *
|
---|
5 | * Redistribution and use in source and binary forms, with or without
|
---|
6 | * modification, are permitted provided that the following conditions
|
---|
7 | * are met:
|
---|
8 | * 1. Redistributions of source code must retain the above copyright
|
---|
9 | * notice, this list of conditions and the following disclaimer.
|
---|
10 | * 2. Redistributions in binary form must reproduce the above copyright
|
---|
11 | * notice, this list of conditions and the following disclaimer in the
|
---|
12 | * documentation and/or other materials provided with the distribution.
|
---|
13 | *
|
---|
14 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
|
---|
15 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
---|
16 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
---|
17 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
|
---|
18 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
---|
19 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
---|
20 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
---|
21 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
|
---|
22 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
---|
23 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
---|
24 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
---|
25 | */
|
---|
26 |
|
---|
27 | #include "config.h"
|
---|
28 | #if ENABLE(JIT)
|
---|
29 | #include "JIT.h"
|
---|
30 |
|
---|
31 | #include "BaselineJITRegisters.h"
|
---|
32 | #include "BasicBlockLocation.h"
|
---|
33 | #include "BytecodeGenerator.h"
|
---|
34 | #include "Exception.h"
|
---|
35 | #include "JITInlines.h"
|
---|
36 | #include "JITThunks.h"
|
---|
37 | #include "JSCast.h"
|
---|
38 | #include "JSFunction.h"
|
---|
39 | #include "JSPropertyNameEnumerator.h"
|
---|
40 | #include "LinkBuffer.h"
|
---|
41 | #include "SuperSampler.h"
|
---|
42 | #include "ThunkGenerators.h"
|
---|
43 | #include "TypeLocation.h"
|
---|
44 | #include "TypeProfilerLog.h"
|
---|
45 | #include "VirtualRegister.h"
|
---|
46 |
|
---|
47 | namespace JSC {
|
---|
48 |
|
---|
49 | void JIT::emit_op_mov(const JSInstruction* currentInstruction)
|
---|
50 | {
|
---|
51 | auto bytecode = currentInstruction->as<OpMov>();
|
---|
52 | VirtualRegister dst = bytecode.m_dst;
|
---|
53 | VirtualRegister src = bytecode.m_src;
|
---|
54 |
|
---|
55 | if (src.isConstant()) {
|
---|
56 | if (m_profiledCodeBlock->isConstantOwnedByUnlinkedCodeBlock(src)) {
|
---|
57 | storeValue(m_unlinkedCodeBlock->getConstant(src), addressFor(dst), jsRegT10);
|
---|
58 | } else {
|
---|
59 | loadCodeBlockConstant(src, jsRegT10);
|
---|
60 | storeValue(jsRegT10, addressFor(dst));
|
---|
61 | }
|
---|
62 | return;
|
---|
63 | }
|
---|
64 |
|
---|
65 | loadValue(addressFor(src), jsRegT10);
|
---|
66 | storeValue(jsRegT10, addressFor(dst));
|
---|
67 | }
|
---|
68 |
|
---|
69 | void JIT::emit_op_end(const JSInstruction* currentInstruction)
|
---|
70 | {
|
---|
71 | auto bytecode = currentInstruction->as<OpEnd>();
|
---|
72 | static_assert(noOverlap(returnValueJSR, callFrameRegister));
|
---|
73 | emitGetVirtualRegister(bytecode.m_value, returnValueJSR);
|
---|
74 | emitRestoreCalleeSaves();
|
---|
75 | emitFunctionEpilogue();
|
---|
76 | ret();
|
---|
77 | }
|
---|
78 |
|
---|
79 | void JIT::emit_op_jmp(const JSInstruction* currentInstruction)
|
---|
80 | {
|
---|
81 | auto bytecode = currentInstruction->as<OpJmp>();
|
---|
82 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel);
|
---|
83 | addJump(jump(), target);
|
---|
84 | }
|
---|
85 |
|
---|
86 |
|
---|
87 | void JIT::emit_op_new_object(const JSInstruction* currentInstruction)
|
---|
88 | {
|
---|
89 | auto bytecode = currentInstruction->as<OpNewObject>();
|
---|
90 |
|
---|
91 | RegisterID resultReg = regT0;
|
---|
92 | RegisterID allocatorReg = regT1;
|
---|
93 | RegisterID scratchReg = regT2;
|
---|
94 | RegisterID structureReg = regT3;
|
---|
95 |
|
---|
96 | loadPtrFromMetadata(bytecode, OpNewObject::Metadata::offsetOfObjectAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator(), allocatorReg);
|
---|
97 | loadPtrFromMetadata(bytecode, OpNewObject::Metadata::offsetOfObjectAllocationProfile() + ObjectAllocationProfile::offsetOfStructure(), structureReg);
|
---|
98 |
|
---|
99 | JumpList slowCases;
|
---|
100 | auto butterfly = TrustedImmPtr(nullptr);
|
---|
101 | emitAllocateJSObject(resultReg, JITAllocator::variable(), allocatorReg, structureReg, butterfly, scratchReg, slowCases);
|
---|
102 | load8(Address(structureReg, Structure::inlineCapacityOffset()), scratchReg);
|
---|
103 | emitInitializeInlineStorage(resultReg, scratchReg);
|
---|
104 | mutatorFence(*m_vm);
|
---|
105 | boxCell(resultReg, jsRegT10);
|
---|
106 | emitPutVirtualRegister(bytecode.m_dst, jsRegT10);
|
---|
107 |
|
---|
108 | addSlowCase(slowCases);
|
---|
109 | }
|
---|
110 |
|
---|
111 | void JIT::emitSlow_op_new_object(const JSInstruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
|
---|
112 | {
|
---|
113 | linkAllSlowCases(iter);
|
---|
114 |
|
---|
115 | RegisterID structureReg = regT3;
|
---|
116 |
|
---|
117 | auto bytecode = currentInstruction->as<OpNewObject>();
|
---|
118 | VirtualRegister dst = bytecode.m_dst;
|
---|
119 | callOperationNoExceptionCheck(operationNewObject, TrustedImmPtr(&vm()), structureReg);
|
---|
120 | boxCell(returnValueGPR, returnValueJSR);
|
---|
121 | emitPutVirtualRegister(dst, returnValueJSR);
|
---|
122 | }
|
---|
123 |
|
---|
124 |
|
---|
125 | void JIT::emit_op_overrides_has_instance(const JSInstruction* currentInstruction)
|
---|
126 | {
|
---|
127 | auto bytecode = currentInstruction->as<OpOverridesHasInstance>();
|
---|
128 | VirtualRegister dst = bytecode.m_dst;
|
---|
129 | VirtualRegister constructor = bytecode.m_constructor;
|
---|
130 | VirtualRegister hasInstanceValue = bytecode.m_hasInstanceValue;
|
---|
131 |
|
---|
132 | emitGetVirtualRegisterPayload(hasInstanceValue, regT2);
|
---|
133 |
|
---|
134 | // We don't jump if we know what Symbol.hasInstance would do.
|
---|
135 | move(TrustedImm32(1), regT0);
|
---|
136 | loadGlobalObject(regT1);
|
---|
137 | Jump customHasInstanceValue = branchPtr(NotEqual, regT2, Address(regT1, JSGlobalObject::offsetOfFunctionProtoHasInstanceSymbolFunction()));
|
---|
138 | // We know that constructor is an object from the way bytecode is emitted for instanceof expressions.
|
---|
139 | emitGetVirtualRegisterPayload(constructor, regT2);
|
---|
140 | // Check that constructor 'ImplementsDefaultHasInstance' i.e. the object is not a C-API user nor a bound function.
|
---|
141 | test8(Zero, Address(regT2, JSCell::typeInfoFlagsOffset()), TrustedImm32(ImplementsDefaultHasInstance), regT0);
|
---|
142 | customHasInstanceValue.link(this);
|
---|
143 |
|
---|
144 | boxBoolean(regT0, jsRegT10);
|
---|
145 | emitPutVirtualRegister(dst, jsRegT10);
|
---|
146 | }
|
---|
147 |
|
---|
148 |
|
---|
149 | void JIT::emit_op_instanceof(const JSInstruction* currentInstruction)
|
---|
150 | {
|
---|
151 | auto bytecode = currentInstruction->as<OpInstanceof>();
|
---|
152 | VirtualRegister dst = bytecode.m_dst;
|
---|
153 | VirtualRegister value = bytecode.m_value;
|
---|
154 | VirtualRegister proto = bytecode.m_prototype;
|
---|
155 |
|
---|
156 | using BaselineJITRegisters::Instanceof::resultJSR;
|
---|
157 | using BaselineJITRegisters::Instanceof::valueJSR;
|
---|
158 | using BaselineJITRegisters::Instanceof::protoJSR;
|
---|
159 | using BaselineJITRegisters::Instanceof::FastPath::stubInfoGPR;
|
---|
160 |
|
---|
161 | emitGetVirtualRegister(value, valueJSR);
|
---|
162 | emitGetVirtualRegister(proto, protoJSR);
|
---|
163 |
|
---|
164 | // Check that proto are cells. baseVal must be a cell - this is checked by the get_by_id for Symbol.hasInstance.
|
---|
165 | emitJumpSlowCaseIfNotJSCell(valueJSR, value);
|
---|
166 | emitJumpSlowCaseIfNotJSCell(protoJSR, proto);
|
---|
167 |
|
---|
168 | auto [ stubInfo, stubInfoIndex ] = addUnlinkedStructureStubInfo();
|
---|
169 | JITInstanceOfGenerator gen(
|
---|
170 | nullptr, stubInfo, JITType::BaselineJIT, CodeOrigin(m_bytecodeIndex), CallSiteIndex(m_bytecodeIndex),
|
---|
171 | RegisterSet::stubUnavailableRegisters(),
|
---|
172 | resultJSR.payloadGPR(),
|
---|
173 | valueJSR.payloadGPR(),
|
---|
174 | protoJSR.payloadGPR(),
|
---|
175 | stubInfoGPR);
|
---|
176 | gen.m_unlinkedStubInfoConstantIndex = stubInfoIndex;
|
---|
177 |
|
---|
178 | gen.generateBaselineDataICFastPath(*this, stubInfoIndex, stubInfoGPR);
|
---|
179 | #if USE(JSVALUE32_64)
|
---|
180 | boxBoolean(resultJSR.payloadGPR(), resultJSR);
|
---|
181 | #endif
|
---|
182 | addSlowCase();
|
---|
183 | m_instanceOfs.append(gen);
|
---|
184 |
|
---|
185 | setFastPathResumePoint();
|
---|
186 | emitPutVirtualRegister(dst, resultJSR);
|
---|
187 | }
|
---|
188 |
|
---|
189 | void JIT::emitSlow_op_instanceof(const JSInstruction*, Vector<SlowCaseEntry>::iterator& iter)
|
---|
190 | {
|
---|
191 | linkAllSlowCases(iter);
|
---|
192 |
|
---|
193 | JITInstanceOfGenerator& gen = m_instanceOfs[m_instanceOfIndex++];
|
---|
194 |
|
---|
195 | Label coldPathBegin = label();
|
---|
196 |
|
---|
197 | using BaselineJITRegisters::Instanceof::valueJSR;
|
---|
198 | using BaselineJITRegisters::Instanceof::protoJSR;
|
---|
199 | using BaselineJITRegisters::Instanceof::SlowPath::globalObjectGPR;
|
---|
200 | using BaselineJITRegisters::Instanceof::SlowPath::stubInfoGPR;
|
---|
201 |
|
---|
202 | loadGlobalObject(globalObjectGPR);
|
---|
203 | loadConstant(gen.m_unlinkedStubInfoConstantIndex, stubInfoGPR);
|
---|
204 | callOperation<decltype(operationInstanceOfOptimize)>(
|
---|
205 | Address(stubInfoGPR, StructureStubInfo::offsetOfSlowOperation()),
|
---|
206 | globalObjectGPR, stubInfoGPR, valueJSR, protoJSR);
|
---|
207 | static_assert(BaselineJITRegisters::Instanceof::resultJSR == returnValueJSR);
|
---|
208 | gen.reportSlowPathCall(coldPathBegin, Call());
|
---|
209 | }
|
---|
210 |
|
---|
211 | void JIT::emit_op_is_empty(const JSInstruction* currentInstruction)
|
---|
212 | {
|
---|
213 | auto bytecode = currentInstruction->as<OpIsEmpty>();
|
---|
214 | VirtualRegister dst = bytecode.m_dst;
|
---|
215 | VirtualRegister value = bytecode.m_operand;
|
---|
216 |
|
---|
217 | #if USE(JSVALUE64)
|
---|
218 | emitGetVirtualRegister(value, regT0);
|
---|
219 | #elif USE(JSVALUE32_64)
|
---|
220 | emitGetVirtualRegisterTag(value, regT0);
|
---|
221 | #endif
|
---|
222 | isEmpty(regT0, regT0);
|
---|
223 |
|
---|
224 | boxBoolean(regT0, jsRegT10);
|
---|
225 | emitPutVirtualRegister(dst, jsRegT10);
|
---|
226 | }
|
---|
227 |
|
---|
228 | void JIT::emit_op_typeof_is_undefined(const JSInstruction* currentInstruction)
|
---|
229 | {
|
---|
230 | auto bytecode = currentInstruction->as<OpTypeofIsUndefined>();
|
---|
231 | VirtualRegister dst = bytecode.m_dst;
|
---|
232 | VirtualRegister value = bytecode.m_operand;
|
---|
233 |
|
---|
234 | emitGetVirtualRegister(value, jsRegT10);
|
---|
235 | Jump isCell = branchIfCell(jsRegT10);
|
---|
236 |
|
---|
237 | isUndefined(jsRegT10, regT0);
|
---|
238 | Jump done = jump();
|
---|
239 |
|
---|
240 | isCell.link(this);
|
---|
241 | Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(jsRegT10.payloadGPR(), JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
|
---|
242 | move(TrustedImm32(0), regT0);
|
---|
243 | Jump notMasqueradesAsUndefined = jump();
|
---|
244 |
|
---|
245 | isMasqueradesAsUndefined.link(this);
|
---|
246 | emitLoadStructure(vm(), jsRegT10.payloadGPR(), regT1);
|
---|
247 | loadGlobalObject(regT0);
|
---|
248 | loadPtr(Address(regT1, Structure::globalObjectOffset()), regT1);
|
---|
249 | comparePtr(Equal, regT0, regT1, regT0);
|
---|
250 |
|
---|
251 | notMasqueradesAsUndefined.link(this);
|
---|
252 | done.link(this);
|
---|
253 | boxBoolean(regT0, jsRegT10);
|
---|
254 | emitPutVirtualRegister(dst, jsRegT10);
|
---|
255 | }
|
---|
256 |
|
---|
257 | void JIT::emit_op_is_undefined_or_null(const JSInstruction* currentInstruction)
|
---|
258 | {
|
---|
259 | auto bytecode = currentInstruction->as<OpIsUndefinedOrNull>();
|
---|
260 | VirtualRegister dst = bytecode.m_dst;
|
---|
261 | VirtualRegister value = bytecode.m_operand;
|
---|
262 |
|
---|
263 | emitGetVirtualRegister(value, jsRegT10);
|
---|
264 |
|
---|
265 | emitTurnUndefinedIntoNull(jsRegT10);
|
---|
266 | isNull(jsRegT10, regT0);
|
---|
267 |
|
---|
268 | boxBoolean(regT0, jsRegT10);
|
---|
269 | emitPutVirtualRegister(dst, jsRegT10);
|
---|
270 | }
|
---|
271 |
|
---|
272 |
|
---|
273 | void JIT::emit_op_is_boolean(const JSInstruction* currentInstruction)
|
---|
274 | {
|
---|
275 | auto bytecode = currentInstruction->as<OpIsBoolean>();
|
---|
276 | VirtualRegister dst = bytecode.m_dst;
|
---|
277 | VirtualRegister value = bytecode.m_operand;
|
---|
278 |
|
---|
279 | #if USE(JSVALUE64)
|
---|
280 | emitGetVirtualRegister(value, regT0);
|
---|
281 | xor64(TrustedImm32(JSValue::ValueFalse), regT0);
|
---|
282 | test64(Zero, regT0, TrustedImm32(static_cast<int32_t>(~1)), regT0);
|
---|
283 | #elif USE(JSVALUE32_64)
|
---|
284 | emitGetVirtualRegisterTag(value, regT0);
|
---|
285 | compare32(Equal, regT0, TrustedImm32(JSValue::BooleanTag), regT0);
|
---|
286 | #endif
|
---|
287 |
|
---|
288 | boxBoolean(regT0, jsRegT10);
|
---|
289 | emitPutVirtualRegister(dst, jsRegT10);
|
---|
290 | }
|
---|
291 |
|
---|
292 | void JIT::emit_op_is_number(const JSInstruction* currentInstruction)
|
---|
293 | {
|
---|
294 | auto bytecode = currentInstruction->as<OpIsNumber>();
|
---|
295 | VirtualRegister dst = bytecode.m_dst;
|
---|
296 | VirtualRegister value = bytecode.m_operand;
|
---|
297 |
|
---|
298 | #if USE(JSVALUE64)
|
---|
299 | emitGetVirtualRegister(value, regT0);
|
---|
300 | test64(NonZero, regT0, numberTagRegister, regT0);
|
---|
301 | #elif USE(JSVALUE32_64)
|
---|
302 | emitGetVirtualRegisterTag(value, regT0);
|
---|
303 | add32(TrustedImm32(1), regT0);
|
---|
304 | compare32(Below, regT0, TrustedImm32(JSValue::LowestTag + 1), regT0);
|
---|
305 | #endif
|
---|
306 |
|
---|
307 | boxBoolean(regT0, jsRegT10);
|
---|
308 | emitPutVirtualRegister(dst, jsRegT10);
|
---|
309 | }
|
---|
310 |
|
---|
311 | #if USE(BIGINT32)
|
---|
312 | void JIT::emit_op_is_big_int(const JSInstruction* currentInstruction)
|
---|
313 | {
|
---|
314 | auto bytecode = currentInstruction->as<OpIsBigInt>();
|
---|
315 | VirtualRegister dst = bytecode.m_dst;
|
---|
316 | VirtualRegister value = bytecode.m_operand;
|
---|
317 |
|
---|
318 | emitGetVirtualRegister(value, regT0);
|
---|
319 | Jump isCell = branchIfCell(regT0);
|
---|
320 |
|
---|
321 | move(TrustedImm64(JSValue::BigInt32Mask), regT1);
|
---|
322 | and64(regT1, regT0);
|
---|
323 | compare64(Equal, regT0, TrustedImm32(JSValue::BigInt32Tag), regT0);
|
---|
324 | boxBoolean(regT0, jsRegT10);
|
---|
325 | Jump done = jump();
|
---|
326 |
|
---|
327 | isCell.link(this);
|
---|
328 | compare8(Equal, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(HeapBigIntType), regT0);
|
---|
329 | boxBoolean(regT0, jsRegT10);
|
---|
330 |
|
---|
331 | done.link(this);
|
---|
332 | emitPutVirtualRegister(dst, jsRegT10);
|
---|
333 | }
|
---|
334 | #else // if !USE(BIGINT32)
|
---|
335 | NO_RETURN void JIT::emit_op_is_big_int(const JSInstruction*)
|
---|
336 | {
|
---|
337 | // If we only have HeapBigInts, then we emit isCellWithType instead of isBigInt.
|
---|
338 | RELEASE_ASSERT_NOT_REACHED();
|
---|
339 | }
|
---|
340 | #endif
|
---|
341 |
|
---|
342 | void JIT::emit_op_is_cell_with_type(const JSInstruction* currentInstruction)
|
---|
343 | {
|
---|
344 | auto bytecode = currentInstruction->as<OpIsCellWithType>();
|
---|
345 | VirtualRegister dst = bytecode.m_dst;
|
---|
346 | VirtualRegister value = bytecode.m_operand;
|
---|
347 | int type = bytecode.m_type;
|
---|
348 |
|
---|
349 | emitGetVirtualRegister(value, jsRegT32);
|
---|
350 |
|
---|
351 | move(TrustedImm32(0), regT0);
|
---|
352 | Jump isNotCell = branchIfNotCell(jsRegT32);
|
---|
353 | compare8(Equal, Address(jsRegT32.payloadGPR(), JSCell::typeInfoTypeOffset()), TrustedImm32(type), regT0);
|
---|
354 | isNotCell.link(this);
|
---|
355 |
|
---|
356 | boxBoolean(regT0, jsRegT10);
|
---|
357 | emitPutVirtualRegister(dst, jsRegT10);
|
---|
358 | }
|
---|
359 |
|
---|
360 | void JIT::emit_op_is_object(const JSInstruction* currentInstruction)
|
---|
361 | {
|
---|
362 | auto bytecode = currentInstruction->as<OpIsObject>();
|
---|
363 | VirtualRegister dst = bytecode.m_dst;
|
---|
364 | VirtualRegister value = bytecode.m_operand;
|
---|
365 |
|
---|
366 | emitGetVirtualRegister(value, jsRegT32);
|
---|
367 |
|
---|
368 | move(TrustedImm32(0), regT0);
|
---|
369 | Jump isNotCell = branchIfNotCell(jsRegT32);
|
---|
370 | compare8(AboveOrEqual, Address(jsRegT32.payloadGPR(), JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType), regT0);
|
---|
371 | isNotCell.link(this);
|
---|
372 |
|
---|
373 | boxBoolean(regT0, jsRegT10);
|
---|
374 | emitPutVirtualRegister(dst, jsRegT10);
|
---|
375 | }
|
---|
376 |
|
---|
377 | void JIT::emit_op_to_primitive(const JSInstruction* currentInstruction)
|
---|
378 | {
|
---|
379 | auto bytecode = currentInstruction->as<OpToPrimitive>();
|
---|
380 | VirtualRegister dst = bytecode.m_dst;
|
---|
381 | VirtualRegister src = bytecode.m_src;
|
---|
382 |
|
---|
383 | emitGetVirtualRegister(src, jsRegT10);
|
---|
384 |
|
---|
385 | Jump isImm = branchIfNotCell(jsRegT10);
|
---|
386 | addSlowCase(branchIfObject(jsRegT10.payloadGPR()));
|
---|
387 | isImm.link(this);
|
---|
388 |
|
---|
389 | if (dst != src)
|
---|
390 | emitPutVirtualRegister(dst, jsRegT10);
|
---|
391 | }
|
---|
392 |
|
---|
393 | void JIT::emit_op_to_property_key(const JSInstruction* currentInstruction)
|
---|
394 | {
|
---|
395 | auto bytecode = currentInstruction->as<OpToPropertyKey>();
|
---|
396 | VirtualRegister dst = bytecode.m_dst;
|
---|
397 | VirtualRegister src = bytecode.m_src;
|
---|
398 |
|
---|
399 | emitGetVirtualRegister(src, jsRegT10);
|
---|
400 |
|
---|
401 | addSlowCase(branchIfNotCell(jsRegT10));
|
---|
402 | Jump done = branchIfSymbol(jsRegT10.payloadGPR());
|
---|
403 | addSlowCase(branchIfNotString(jsRegT10.payloadGPR()));
|
---|
404 |
|
---|
405 | done.link(this);
|
---|
406 | if (src != dst)
|
---|
407 | emitPutVirtualRegister(dst, jsRegT10);
|
---|
408 | }
|
---|
409 |
|
---|
410 | void JIT::emit_op_set_function_name(const JSInstruction* currentInstruction)
|
---|
411 | {
|
---|
412 | auto bytecode = currentInstruction->as<OpSetFunctionName>();
|
---|
413 |
|
---|
414 | using SlowOperation = decltype(operationSetFunctionName);
|
---|
415 | constexpr GPRReg globalObjectGPR = preferredArgumentGPR<SlowOperation, 0>();
|
---|
416 | constexpr GPRReg functionGPR = preferredArgumentGPR<SlowOperation, 1>();
|
---|
417 | constexpr JSValueRegs nameJSR = preferredArgumentJSR<SlowOperation, 2>();
|
---|
418 |
|
---|
419 | emitGetVirtualRegisterPayload(bytecode.m_function, functionGPR);
|
---|
420 | emitGetVirtualRegister(bytecode.m_name, nameJSR);
|
---|
421 | loadGlobalObject(globalObjectGPR);
|
---|
422 | callOperation(operationSetFunctionName, globalObjectGPR, functionGPR, nameJSR);
|
---|
423 | }
|
---|
424 |
|
---|
425 | void JIT::emit_op_not(const JSInstruction* currentInstruction)
|
---|
426 | {
|
---|
427 | auto bytecode = currentInstruction->as<OpNot>();
|
---|
428 | emitGetVirtualRegister(bytecode.m_operand, jsRegT10);
|
---|
429 |
|
---|
430 | addSlowCase(branchIfNotBoolean(jsRegT10, regT2));
|
---|
431 | xorPtr(TrustedImm32(1), jsRegT10.payloadGPR());
|
---|
432 |
|
---|
433 | emitPutVirtualRegister(bytecode.m_dst, jsRegT10);
|
---|
434 | }
|
---|
435 |
|
---|
436 | void JIT::emit_op_jfalse(const JSInstruction* currentInstruction)
|
---|
437 | {
|
---|
438 | auto bytecode = currentInstruction->as<OpJfalse>();
|
---|
439 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel);
|
---|
440 |
|
---|
441 | using BaselineJITRegisters::JFalse::valueJSR;
|
---|
442 |
|
---|
443 | emitGetVirtualRegister(bytecode.m_condition, valueJSR);
|
---|
444 | emitNakedNearCall(vm().getCTIStub(valueIsFalseyGenerator).retaggedCode<NoPtrTag>());
|
---|
445 | addJump(branchTest32(NonZero, regT0), target);
|
---|
446 | }
|
---|
447 |
|
---|
448 | MacroAssemblerCodeRef<JITThunkPtrTag> JIT::valueIsFalseyGenerator(VM& vm)
|
---|
449 | {
|
---|
450 | // The thunk generated by this function can only work with the LLInt / Baseline JIT because
|
---|
451 | // it makes assumptions about the right globalObject being available from CallFrame::codeBlock().
|
---|
452 | // DFG/FTL may inline functions belonging to other globalObjects, which may not match
|
---|
453 | // CallFrame::codeBlock().
|
---|
454 | CCallHelpers jit;
|
---|
455 |
|
---|
456 | using BaselineJITRegisters::JFalse::valueJSR; // Incoming
|
---|
457 | constexpr GPRReg scratch1GPR = regT1;
|
---|
458 | constexpr GPRReg scratch2GPR = regT5;
|
---|
459 | constexpr GPRReg globalObjectGPR = regT4;
|
---|
460 | static_assert(noOverlap(valueJSR, scratch1GPR, scratch2GPR, globalObjectGPR));
|
---|
461 |
|
---|
462 | constexpr bool shouldCheckMasqueradesAsUndefined = true;
|
---|
463 |
|
---|
464 | jit.tagReturnAddress();
|
---|
465 |
|
---|
466 | loadGlobalObject(jit, globalObjectGPR);
|
---|
467 | jit.move(TrustedImm32(1), regT0);
|
---|
468 | auto isFalsey = jit.branchIfFalsey(vm, valueJSR, scratch1GPR, scratch2GPR, fpRegT0, fpRegT1, shouldCheckMasqueradesAsUndefined, globalObjectGPR);
|
---|
469 | jit.move(TrustedImm32(0), regT0);
|
---|
470 | isFalsey.link(&jit);
|
---|
471 | jit.ret();
|
---|
472 |
|
---|
473 | LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID, LinkBuffer::Profile::ExtraCTIThunk);
|
---|
474 | return FINALIZE_THUNK(patchBuffer, JITThunkPtrTag, "Baseline: valueIsFalsey");
|
---|
475 | }
|
---|
476 |
|
---|
477 | void JIT::emit_op_jeq_null(const JSInstruction* currentInstruction)
|
---|
478 | {
|
---|
479 | auto bytecode = currentInstruction->as<OpJeqNull>();
|
---|
480 | VirtualRegister src = bytecode.m_value;
|
---|
481 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel);
|
---|
482 |
|
---|
483 | emitGetVirtualRegister(src, jsRegT10);
|
---|
484 | Jump isImmediate = branchIfNotCell(jsRegT10);
|
---|
485 |
|
---|
486 | // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
|
---|
487 | Jump isNotMasqueradesAsUndefined = branchTest8(Zero, Address(jsRegT10.payloadGPR(), JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
|
---|
488 | emitLoadStructure(vm(), jsRegT10.payloadGPR(), regT2);
|
---|
489 | loadGlobalObject(regT0);
|
---|
490 | addJump(branchPtr(Equal, Address(regT2, Structure::globalObjectOffset()), regT0), target);
|
---|
491 | Jump masqueradesGlobalObjectIsForeign = jump();
|
---|
492 |
|
---|
493 | // Now handle the immediate cases - undefined & null
|
---|
494 | isImmediate.link(this);
|
---|
495 | emitTurnUndefinedIntoNull(jsRegT10);
|
---|
496 | addJump(branchIfNull(jsRegT10), target);
|
---|
497 |
|
---|
498 | isNotMasqueradesAsUndefined.link(this);
|
---|
499 | masqueradesGlobalObjectIsForeign.link(this);
|
---|
500 | }
|
---|
501 |
|
---|
502 | void JIT::emit_op_jneq_null(const JSInstruction* currentInstruction)
|
---|
503 | {
|
---|
504 | auto bytecode = currentInstruction->as<OpJneqNull>();
|
---|
505 | VirtualRegister src = bytecode.m_value;
|
---|
506 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel);
|
---|
507 |
|
---|
508 | emitGetVirtualRegister(src, jsRegT10);
|
---|
509 | Jump isImmediate = branchIfNotCell(jsRegT10);
|
---|
510 |
|
---|
511 | // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
|
---|
512 | addJump(branchTest8(Zero, Address(jsRegT10.payloadGPR(), JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target);
|
---|
513 | emitLoadStructure(vm(), jsRegT10.payloadGPR(), regT2);
|
---|
514 | loadGlobalObject(regT0);
|
---|
515 | addJump(branchPtr(NotEqual, Address(regT2, Structure::globalObjectOffset()), regT0), target);
|
---|
516 | Jump wasNotImmediate = jump();
|
---|
517 |
|
---|
518 | // Now handle the immediate cases - undefined & null
|
---|
519 | isImmediate.link(this);
|
---|
520 | emitTurnUndefinedIntoNull(jsRegT10);
|
---|
521 | addJump(branchIfNotNull(jsRegT10), target);
|
---|
522 |
|
---|
523 | wasNotImmediate.link(this);
|
---|
524 | }
|
---|
525 |
|
---|
526 | void JIT::emit_op_jundefined_or_null(const JSInstruction* currentInstruction)
|
---|
527 | {
|
---|
528 | auto bytecode = currentInstruction->as<OpJundefinedOrNull>();
|
---|
529 | VirtualRegister value = bytecode.m_value;
|
---|
530 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel);
|
---|
531 |
|
---|
532 | #if USE(JSVALUE64)
|
---|
533 | emitGetVirtualRegister(value, jsRegT10);
|
---|
534 | #elif USE(JSVALUE32_64)
|
---|
535 | emitGetVirtualRegisterTag(value, jsRegT10.tagGPR());
|
---|
536 | #endif
|
---|
537 |
|
---|
538 | emitTurnUndefinedIntoNull(jsRegT10);
|
---|
539 | addJump(branchIfNull(jsRegT10), target);
|
---|
540 | }
|
---|
541 |
|
---|
542 | void JIT::emit_op_jnundefined_or_null(const JSInstruction* currentInstruction)
|
---|
543 | {
|
---|
544 | auto bytecode = currentInstruction->as<OpJnundefinedOrNull>();
|
---|
545 | VirtualRegister value = bytecode.m_value;
|
---|
546 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel);
|
---|
547 |
|
---|
548 | #if USE(JSVALUE64)
|
---|
549 | emitGetVirtualRegister(value, jsRegT10);
|
---|
550 | #elif USE(JSVALUE32_64)
|
---|
551 | emitGetVirtualRegisterTag(value, jsRegT10.tagGPR());
|
---|
552 | #endif
|
---|
553 |
|
---|
554 | emitTurnUndefinedIntoNull(jsRegT10);
|
---|
555 | addJump(branchIfNotNull(jsRegT10), target);
|
---|
556 | }
|
---|
557 |
|
---|
558 | void JIT::emit_op_jeq_ptr(const JSInstruction* currentInstruction)
|
---|
559 | {
|
---|
560 | auto bytecode = currentInstruction->as<OpJeqPtr>();
|
---|
561 | VirtualRegister src = bytecode.m_value;
|
---|
562 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel);
|
---|
563 |
|
---|
564 | emitGetVirtualRegister(src, jsRegT10);
|
---|
565 | #if USE(JSVALUE32_64)
|
---|
566 | // ON JSVALUE64 the pointer comparison below catches this case
|
---|
567 | Jump notCell = branchIfNotCell(jsRegT10);
|
---|
568 | #endif
|
---|
569 | loadCodeBlockConstantPayload(bytecode.m_specialPointer, regT2);
|
---|
570 | addJump(branchPtr(Equal, jsRegT10.payloadGPR(), regT2), target);
|
---|
571 | #if USE(JSVALUE32_64)
|
---|
572 | notCell.link(this);
|
---|
573 | #endif
|
---|
574 | }
|
---|
575 |
|
---|
576 | void JIT::emit_op_jneq_ptr(const JSInstruction* currentInstruction)
|
---|
577 | {
|
---|
578 | auto bytecode = currentInstruction->as<OpJneqPtr>();
|
---|
579 | VirtualRegister src = bytecode.m_value;
|
---|
580 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel);
|
---|
581 |
|
---|
582 | emitGetVirtualRegister(src, jsRegT10);
|
---|
583 | #if USE(JSVALUE32_64)
|
---|
584 | // ON JSVALUE64 the pointer comparison below catches this case
|
---|
585 | Jump notCell = branchIfNotCell(jsRegT10);
|
---|
586 | #endif
|
---|
587 | loadCodeBlockConstantPayload(bytecode.m_specialPointer, regT2);
|
---|
588 | CCallHelpers::Jump equal = branchPtr(Equal, jsRegT10.payloadGPR(), regT2);
|
---|
589 | #if USE(JSVALUE32_64)
|
---|
590 | notCell.link(this);
|
---|
591 | #endif
|
---|
592 | store8ToMetadata(TrustedImm32(1), bytecode, OpJneqPtr::Metadata::offsetOfHasJumped());
|
---|
593 | addJump(jump(), target);
|
---|
594 | equal.link(this);
|
---|
595 | }
|
---|
596 |
|
---|
597 | #if USE(JSVALUE64)
|
---|
598 |
|
---|
599 | void JIT::emit_op_eq(const JSInstruction* currentInstruction)
|
---|
600 | {
|
---|
601 | auto bytecode = currentInstruction->as<OpEq>();
|
---|
602 | emitGetVirtualRegister(bytecode.m_lhs, regT0);
|
---|
603 | emitGetVirtualRegister(bytecode.m_rhs, regT1);
|
---|
604 | emitJumpSlowCaseIfNotInt(regT0, regT1, regT2);
|
---|
605 | compare32(Equal, regT1, regT0, regT0);
|
---|
606 | boxBoolean(regT0, jsRegT10);
|
---|
607 | emitPutVirtualRegister(bytecode.m_dst, jsRegT10);
|
---|
608 | }
|
---|
609 |
|
---|
610 | void JIT::emit_op_jeq(const JSInstruction* currentInstruction)
|
---|
611 | {
|
---|
612 | auto bytecode = currentInstruction->as<OpJeq>();
|
---|
613 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel);
|
---|
614 | emitGetVirtualRegister(bytecode.m_lhs, regT0);
|
---|
615 | emitGetVirtualRegister(bytecode.m_rhs, regT1);
|
---|
616 | emitJumpSlowCaseIfNotInt(regT0, regT1, regT2);
|
---|
617 | addJump(branch32(Equal, regT0, regT1), target);
|
---|
618 | }
|
---|
619 |
|
---|
620 | #endif
|
---|
621 |
|
---|
622 | void JIT::emit_op_jtrue(const JSInstruction* currentInstruction)
|
---|
623 | {
|
---|
624 | auto bytecode = currentInstruction->as<OpJtrue>();
|
---|
625 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel);
|
---|
626 |
|
---|
627 | using BaselineJITRegisters::JTrue::valueJSR;
|
---|
628 |
|
---|
629 | emitGetVirtualRegister(bytecode.m_condition, valueJSR);
|
---|
630 | emitNakedNearCall(vm().getCTIStub(valueIsTruthyGenerator).retaggedCode<NoPtrTag>());
|
---|
631 | addJump(branchTest32(NonZero, regT0), target);
|
---|
632 | }
|
---|
633 |
|
---|
634 | MacroAssemblerCodeRef<JITThunkPtrTag> JIT::valueIsTruthyGenerator(VM& vm)
|
---|
635 | {
|
---|
636 | // The thunk generated by this function can only work with the LLInt / Baseline JIT because
|
---|
637 | // it makes assumptions about the right globalObject being available from CallFrame::codeBlock().
|
---|
638 | // DFG/FTL may inline functions belonging to other globalObjects, which may not match
|
---|
639 | // CallFrame::codeBlock().
|
---|
640 | CCallHelpers jit;
|
---|
641 |
|
---|
642 | using BaselineJITRegisters::JTrue::valueJSR; // Incoming
|
---|
643 | constexpr GPRReg scratch1GPR = regT1;
|
---|
644 | constexpr GPRReg scratch2GPR = regT5;
|
---|
645 | constexpr GPRReg globalObjectGPR = regT4;
|
---|
646 | static_assert(noOverlap(valueJSR, scratch1GPR, scratch2GPR, globalObjectGPR));
|
---|
647 |
|
---|
648 | constexpr bool shouldCheckMasqueradesAsUndefined = true;
|
---|
649 |
|
---|
650 | jit.tagReturnAddress();
|
---|
651 |
|
---|
652 | loadGlobalObject(jit, globalObjectGPR);
|
---|
653 | jit.move(TrustedImm32(1), regT0);
|
---|
654 | auto isTruthy = jit.branchIfTruthy(vm, valueJSR, scratch1GPR, scratch2GPR, fpRegT0, fpRegT1, shouldCheckMasqueradesAsUndefined, globalObjectGPR);
|
---|
655 | jit.move(TrustedImm32(0), regT0);
|
---|
656 | isTruthy.link(&jit);
|
---|
657 | jit.ret();
|
---|
658 |
|
---|
659 | LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID, LinkBuffer::Profile::ExtraCTIThunk);
|
---|
660 | return FINALIZE_THUNK(patchBuffer, JITThunkPtrTag, "Baseline: valueIsTruthy");
|
---|
661 | }
|
---|
662 |
|
---|
663 | #if USE(JSVALUE64)
|
---|
664 |
|
---|
665 | void JIT::emit_op_neq(const JSInstruction* currentInstruction)
|
---|
666 | {
|
---|
667 | auto bytecode = currentInstruction->as<OpNeq>();
|
---|
668 | emitGetVirtualRegister(bytecode.m_lhs, regT0);
|
---|
669 | emitGetVirtualRegister(bytecode.m_rhs, regT1);
|
---|
670 | emitJumpSlowCaseIfNotInt(regT0, regT1, regT2);
|
---|
671 | compare32(NotEqual, regT1, regT0, regT0);
|
---|
672 | boxBoolean(regT0, jsRegT10);
|
---|
673 |
|
---|
674 | emitPutVirtualRegister(bytecode.m_dst, jsRegT10);
|
---|
675 | }
|
---|
676 |
|
---|
677 | void JIT::emit_op_jneq(const JSInstruction* currentInstruction)
|
---|
678 | {
|
---|
679 | auto bytecode = currentInstruction->as<OpJneq>();
|
---|
680 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel);
|
---|
681 | emitGetVirtualRegister(bytecode.m_lhs, regT0);
|
---|
682 | emitGetVirtualRegister(bytecode.m_rhs, regT1);
|
---|
683 | emitJumpSlowCaseIfNotInt(regT0, regT1, regT2);
|
---|
684 | addJump(branch32(NotEqual, regT0, regT1), target);
|
---|
685 | }
|
---|
686 |
|
---|
687 | #endif
|
---|
688 |
|
---|
689 | void JIT::emit_op_throw(const JSInstruction* currentInstruction)
|
---|
690 | {
|
---|
691 | auto bytecode = currentInstruction->as<OpThrow>();
|
---|
692 | uint32_t bytecodeOffset = m_bytecodeIndex.offset();
|
---|
693 |
|
---|
694 | using BaselineJITRegisters::Throw::thrownValueJSR;
|
---|
695 | using BaselineJITRegisters::Throw::bytecodeOffsetGPR;
|
---|
696 |
|
---|
697 | emitGetVirtualRegister(bytecode.m_value, thrownValueJSR);
|
---|
698 | move(TrustedImm32(bytecodeOffset), bytecodeOffsetGPR);
|
---|
699 | emitNakedNearJump(vm().getCTIStub(op_throw_handlerGenerator).code());
|
---|
700 | }
|
---|
701 |
|
---|
702 | MacroAssemblerCodeRef<JITThunkPtrTag> JIT::op_throw_handlerGenerator(VM& vm)
|
---|
703 | {
|
---|
704 | CCallHelpers jit;
|
---|
705 |
|
---|
706 | using BaselineJITRegisters::Throw::globalObjectGPR;
|
---|
707 | using BaselineJITRegisters::Throw::thrownValueJSR; // Incoming
|
---|
708 | using BaselineJITRegisters::Throw::bytecodeOffsetGPR; // Incoming
|
---|
709 |
|
---|
710 | #if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
|
---|
711 | {
|
---|
712 | constexpr GPRReg scratchGPR = globalObjectGPR;
|
---|
713 | static_assert(noOverlap(scratchGPR, thrownValueJSR, bytecodeOffsetGPR), "Should not clobber incoming parameters");
|
---|
714 | jit.loadPtr(&vm.topEntryFrame, scratchGPR);
|
---|
715 | jit.copyCalleeSavesToEntryFrameCalleeSavesBuffer(scratchGPR);
|
---|
716 | }
|
---|
717 | #endif
|
---|
718 |
|
---|
719 | // Call slow operation
|
---|
720 | jit.store32(bytecodeOffsetGPR, tagFor(CallFrameSlot::argumentCountIncludingThis));
|
---|
721 | jit.prepareCallOperation(vm);
|
---|
722 | loadGlobalObject(jit, globalObjectGPR);
|
---|
723 | jit.setupArguments<decltype(operationThrow)>(globalObjectGPR, thrownValueJSR);
|
---|
724 | Call operation = jit.call(OperationPtrTag);
|
---|
725 |
|
---|
726 | jit.jumpToExceptionHandler(vm);
|
---|
727 |
|
---|
728 | LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID, LinkBuffer::Profile::ExtraCTIThunk);
|
---|
729 | patchBuffer.link(operation, FunctionPtr<OperationPtrTag>(operationThrow));
|
---|
730 | return FINALIZE_THUNK(patchBuffer, JITThunkPtrTag, "Baseline: op_throw_handler");
|
---|
731 | }
|
---|
732 |
|
---|
733 | #if USE(JSVALUE64)
|
---|
734 |
|
---|
735 | template<typename Op>
|
---|
736 | void JIT::compileOpStrictEq(const JSInstruction* currentInstruction)
|
---|
737 | {
|
---|
738 | auto bytecode = currentInstruction->as<Op>();
|
---|
739 | VirtualRegister dst = bytecode.m_dst;
|
---|
740 | VirtualRegister src1 = bytecode.m_lhs;
|
---|
741 | VirtualRegister src2 = bytecode.m_rhs;
|
---|
742 |
|
---|
743 | emitGetVirtualRegister(src1, regT0);
|
---|
744 | emitGetVirtualRegister(src2, regT1);
|
---|
745 |
|
---|
746 | #if USE(BIGINT32)
|
---|
747 | /* At a high level we do (assuming 'type' to be StrictEq):
|
---|
748 | If (left is Double || right is Double)
|
---|
749 | goto slowPath;
|
---|
750 | result = (left == right);
|
---|
751 | if (result)
|
---|
752 | goto done;
|
---|
753 | if (left is Cell || right is Cell)
|
---|
754 | goto slowPath;
|
---|
755 | done:
|
---|
756 | return result;
|
---|
757 | */
|
---|
758 |
|
---|
759 | // This fragment implements (left is Double || right is Double), with a single branch instead of the 4 that would be naively required if we used branchIfInt32/branchIfNumber
|
---|
760 | // The trick is that if a JSValue is an Int32, then adding 1<<49 to it will make it overflow, leaving all high bits at 0
|
---|
761 | // If it is not a number at all, then 1<<49 will be its only high bit set
|
---|
762 | // Leaving only doubles above or equal 1<<50.
|
---|
763 | move(regT0, regT2);
|
---|
764 | move(regT1, regT3);
|
---|
765 | move(TrustedImm64(JSValue::LowestOfHighBits), regT5);
|
---|
766 | add64(regT5, regT2);
|
---|
767 | add64(regT5, regT3);
|
---|
768 | lshift64(TrustedImm32(1), regT5);
|
---|
769 | or64(regT2, regT3);
|
---|
770 | addSlowCase(branch64(AboveOrEqual, regT3, regT5));
|
---|
771 |
|
---|
772 | compare64(Equal, regT0, regT1, regT5);
|
---|
773 | Jump done = branchTest64(NonZero, regT5);
|
---|
774 |
|
---|
775 | move(regT0, regT2);
|
---|
776 | // Jump slow if at least one is a cell (to cover strings and BigInts).
|
---|
777 | and64(regT1, regT2);
|
---|
778 | // FIXME: we could do something more precise: unless there is a BigInt32, we only need to do the slow path if both are strings
|
---|
779 | addSlowCase(branchIfCell(regT2));
|
---|
780 |
|
---|
781 | done.link(this);
|
---|
782 | if constexpr (std::is_same<Op, OpNstricteq>::value)
|
---|
783 | xor64(TrustedImm64(1), regT5);
|
---|
784 | boxBoolean(regT5, JSValueRegs { regT5 });
|
---|
785 | emitPutVirtualRegister(dst, regT5);
|
---|
786 | #else // if !USE(BIGINT32)
|
---|
787 | // Jump slow if both are cells (to cover strings).
|
---|
788 | move(regT0, regT2);
|
---|
789 | or64(regT1, regT2);
|
---|
790 | addSlowCase(branchIfCell(regT2));
|
---|
791 |
|
---|
792 | // Jump slow if either is a double. First test if it's an integer, which is fine, and then test
|
---|
793 | // if it's a double.
|
---|
794 | Jump leftOK = branchIfInt32(regT0);
|
---|
795 | addSlowCase(branchIfNumber(regT0));
|
---|
796 | leftOK.link(this);
|
---|
797 | Jump rightOK = branchIfInt32(regT1);
|
---|
798 | addSlowCase(branchIfNumber(regT1));
|
---|
799 | rightOK.link(this);
|
---|
800 |
|
---|
801 | if constexpr (std::is_same<Op, OpStricteq>::value)
|
---|
802 | compare64(Equal, regT1, regT0, regT0);
|
---|
803 | else
|
---|
804 | compare64(NotEqual, regT1, regT0, regT0);
|
---|
805 | boxBoolean(regT0, jsRegT10);
|
---|
806 |
|
---|
807 | emitPutVirtualRegister(dst, jsRegT10);
|
---|
808 | #endif
|
---|
809 | }
|
---|
810 |
|
---|
811 | void JIT::emit_op_stricteq(const JSInstruction* currentInstruction)
|
---|
812 | {
|
---|
813 | compileOpStrictEq<OpStricteq>(currentInstruction);
|
---|
814 | }
|
---|
815 |
|
---|
816 | void JIT::emit_op_nstricteq(const JSInstruction* currentInstruction)
|
---|
817 | {
|
---|
818 | compileOpStrictEq<OpNstricteq>(currentInstruction);
|
---|
819 | }
|
---|
820 |
|
---|
821 | template<typename Op>
|
---|
822 | void JIT::compileOpStrictEqJump(const JSInstruction* currentInstruction)
|
---|
823 | {
|
---|
824 | auto bytecode = currentInstruction->as<Op>();
|
---|
825 | int target = jumpTarget(currentInstruction, bytecode.m_targetLabel);
|
---|
826 | VirtualRegister src1 = bytecode.m_lhs;
|
---|
827 | VirtualRegister src2 = bytecode.m_rhs;
|
---|
828 |
|
---|
829 | emitGetVirtualRegister(src1, regT0);
|
---|
830 | emitGetVirtualRegister(src2, regT1);
|
---|
831 |
|
---|
832 | #if USE(BIGINT32)
|
---|
833 | /* At a high level we do (assuming 'type' to be StrictEq):
|
---|
834 | If (left is Double || right is Double)
|
---|
835 | goto slowPath;
|
---|
836 | if (left == right)
|
---|
837 | goto taken;
|
---|
838 | if (left is Cell || right is Cell)
|
---|
839 | goto slowPath;
|
---|
840 | goto notTaken;
|
---|
841 | */
|
---|
842 |
|
---|
843 | // This fragment implements (left is Double || right is Double), with a single branch instead of the 4 that would be naively required if we used branchIfInt32/branchIfNumber
|
---|
844 | // The trick is that if a JSValue is an Int32, then adding 1<<49 to it will make it overflow, leaving all high bits at 0
|
---|
845 | // If it is not a number at all, then 1<<49 will be its only high bit set
|
---|
846 | // Leaving only doubles above or equal 1<<50.
|
---|
847 | move(regT0, regT2);
|
---|
848 | move(regT1, regT3);
|
---|
849 | move(TrustedImm64(JSValue::LowestOfHighBits), regT5);
|
---|
850 | add64(regT5, regT2);
|
---|
851 | add64(regT5, regT3);
|
---|
852 | lshift64(TrustedImm32(1), regT5);
|
---|
853 | or64(regT2, regT3);
|
---|
854 | addSlowCase(branch64(AboveOrEqual, regT3, regT5));
|
---|
855 |
|
---|
856 | Jump areEqual = branch64(Equal, regT0, regT1);
|
---|
857 | if constexpr (std::is_same<Op, OpJstricteq>::value)
|
---|
858 | addJump(areEqual, target);
|
---|
859 |
|
---|
860 | move(regT0, regT2);
|
---|
861 | // Jump slow if at least one is a cell (to cover strings and BigInts).
|
---|
862 | and64(regT1, regT2);
|
---|
863 | // FIXME: we could do something more precise: unless there is a BigInt32, we only need to do the slow path if both are strings
|
---|
864 | addSlowCase(branchIfCell(regT2));
|
---|
865 |
|
---|
866 | if constexpr (std::is_same<Op, OpJnstricteq>::value) {
|
---|
867 | addJump(jump(), target);
|
---|
868 | areEqual.link(this);
|
---|
869 | }
|
---|
870 | #else // if !USE(BIGINT32)
|
---|
871 | // Jump slow if both are cells (to cover strings).
|
---|
872 | move(regT0, regT2);
|
---|
873 | or64(regT1, regT2);
|
---|
874 | addSlowCase(branchIfCell(regT2));
|
---|
875 |
|
---|
876 | // Jump slow if either is a double. First test if it's an integer, which is fine, and then test
|
---|
877 | // if it's a double.
|
---|
878 | Jump leftOK = branchIfInt32(regT0);
|
---|
879 | addSlowCase(branchIfNumber(regT0));
|
---|
880 | leftOK.link(this);
|
---|
881 | Jump rightOK = branchIfInt32(regT1);
|
---|
882 | addSlowCase(branchIfNumber(regT1));
|
---|
883 | rightOK.link(this);
|
---|
884 | if constexpr (std::is_same<Op, OpJstricteq>::value)
|
---|
885 | addJump(branch64(Equal, regT1, regT0), target);
|
---|
886 | else
|
---|
887 | addJump(branch64(NotEqual, regT1, regT0), target);
|
---|
888 | #endif
|
---|
889 | }
|
---|
890 |
|
---|
891 | void JIT::emit_op_jstricteq(const JSInstruction* currentInstruction)
|
---|
892 | {
|
---|
893 | compileOpStrictEqJump<OpJstricteq>(currentInstruction);
|
---|
894 | }
|
---|
895 |
|
---|
896 | void JIT::emit_op_jnstricteq(const JSInstruction* currentInstruction)
|
---|
897 | {
|
---|
898 | compileOpStrictEqJump<OpJnstricteq>(currentInstruction);
|
---|
899 | }
|
---|
900 |
|
---|
901 | void JIT::emitSlow_op_jstricteq(const JSInstruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
|
---|
902 | {
|
---|
903 | linkAllSlowCases(iter);
|
---|
904 |
|
---|
905 | auto bytecode = currentInstruction->as<OpJstricteq>();
|
---|
906 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel);
|
---|
907 | loadGlobalObject(regT2);
|
---|
908 | callOperation(operationCompareStrictEq, regT2, regT0, regT1);
|
---|
909 | emitJumpSlowToHot(branchTest32(NonZero, returnValueGPR), target);
|
---|
910 | }
|
---|
911 |
|
---|
912 | void JIT::emitSlow_op_jnstricteq(const JSInstruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
|
---|
913 | {
|
---|
914 | linkAllSlowCases(iter);
|
---|
915 |
|
---|
916 | auto bytecode = currentInstruction->as<OpJnstricteq>();
|
---|
917 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel);
|
---|
918 | loadGlobalObject(regT2);
|
---|
919 | callOperation(operationCompareStrictEq, regT2, regT0, regT1);
|
---|
920 | emitJumpSlowToHot(branchTest32(Zero, returnValueGPR), target);
|
---|
921 | }
|
---|
922 |
|
---|
923 | #endif
|
---|
924 |
|
---|
925 | void JIT::emit_op_to_number(const JSInstruction* currentInstruction)
|
---|
926 | {
|
---|
927 | auto bytecode = currentInstruction->as<OpToNumber>();
|
---|
928 | VirtualRegister dstVReg = bytecode.m_dst;
|
---|
929 | VirtualRegister srcVReg = bytecode.m_operand;
|
---|
930 |
|
---|
931 | emitGetVirtualRegister(srcVReg, jsRegT10);
|
---|
932 |
|
---|
933 | addSlowCase(branchIfNotNumber(jsRegT10, regT2));
|
---|
934 |
|
---|
935 | emitValueProfilingSite(bytecode, jsRegT10);
|
---|
936 | if (srcVReg != dstVReg)
|
---|
937 | emitPutVirtualRegister(dstVReg, jsRegT10);
|
---|
938 | }
|
---|
939 |
|
---|
940 | void JIT::emit_op_to_numeric(const JSInstruction* currentInstruction)
|
---|
941 | {
|
---|
942 | auto bytecode = currentInstruction->as<OpToNumeric>();
|
---|
943 | VirtualRegister dstVReg = bytecode.m_dst;
|
---|
944 | VirtualRegister srcVReg = bytecode.m_operand;
|
---|
945 |
|
---|
946 | emitGetVirtualRegister(srcVReg, jsRegT10);
|
---|
947 |
|
---|
948 | Jump isNotCell = branchIfNotCell(jsRegT10);
|
---|
949 | addSlowCase(branchIfNotHeapBigInt(jsRegT10.payloadGPR()));
|
---|
950 | Jump isBigInt = jump();
|
---|
951 |
|
---|
952 | isNotCell.link(this);
|
---|
953 | addSlowCase(branchIfNotNumber(jsRegT10, regT2));
|
---|
954 | isBigInt.link(this);
|
---|
955 |
|
---|
956 | emitValueProfilingSite(bytecode, jsRegT10);
|
---|
957 | if (srcVReg != dstVReg)
|
---|
958 | emitPutVirtualRegister(dstVReg, jsRegT10);
|
---|
959 | }
|
---|
960 |
|
---|
961 | void JIT::emit_op_to_string(const JSInstruction* currentInstruction)
|
---|
962 | {
|
---|
963 | auto bytecode = currentInstruction->as<OpToString>();
|
---|
964 | VirtualRegister dstVReg = bytecode.m_dst;
|
---|
965 | VirtualRegister srcVReg = bytecode.m_operand;
|
---|
966 |
|
---|
967 | emitGetVirtualRegister(srcVReg, jsRegT10);
|
---|
968 |
|
---|
969 | addSlowCase(branchIfNotCell(jsRegT10));
|
---|
970 | addSlowCase(branchIfNotString(jsRegT10.payloadGPR()));
|
---|
971 |
|
---|
972 | if (srcVReg != dstVReg)
|
---|
973 | emitPutVirtualRegister(dstVReg, jsRegT10);
|
---|
974 | }
|
---|
975 |
|
---|
976 | void JIT::emit_op_to_object(const JSInstruction* currentInstruction)
|
---|
977 | {
|
---|
978 | auto bytecode = currentInstruction->as<OpToObject>();
|
---|
979 | VirtualRegister dstVReg = bytecode.m_dst;
|
---|
980 | VirtualRegister srcVReg = bytecode.m_operand;
|
---|
981 |
|
---|
982 | emitGetVirtualRegister(srcVReg, jsRegT10);
|
---|
983 |
|
---|
984 | addSlowCase(branchIfNotCell(jsRegT10));
|
---|
985 | addSlowCase(branchIfNotObject(jsRegT10.payloadGPR()));
|
---|
986 |
|
---|
987 | emitValueProfilingSite(bytecode, jsRegT10);
|
---|
988 | if (srcVReg != dstVReg)
|
---|
989 | emitPutVirtualRegister(dstVReg, jsRegT10);
|
---|
990 | }
|
---|
991 |
|
---|
992 | void JIT::emit_op_catch(const JSInstruction* currentInstruction)
|
---|
993 | {
|
---|
994 | auto bytecode = currentInstruction->as<OpCatch>();
|
---|
995 |
|
---|
996 | restoreCalleeSavesFromEntryFrameCalleeSavesBuffer(vm().topEntryFrame);
|
---|
997 |
|
---|
998 | move(TrustedImmPtr(m_vm), regT3);
|
---|
999 | loadPtr(Address(regT3, VM::callFrameForCatchOffset()), callFrameRegister);
|
---|
1000 | storePtr(TrustedImmPtr(nullptr), Address(regT3, VM::callFrameForCatchOffset()));
|
---|
1001 |
|
---|
1002 | addPtr(TrustedImm32(stackPointerOffsetFor(m_unlinkedCodeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
|
---|
1003 |
|
---|
1004 | // When the LLInt throws an exception, there is a chance that we've already tiered up
|
---|
1005 | // the same CodeBlock to baseline, and we'll catch the exception in the baseline JIT (because
|
---|
1006 | // we updated the exception handlers to point here). Because the LLInt uses a different value
|
---|
1007 | // inside s_constantsGPR, the callee saves we restore above may not contain the correct register.
|
---|
1008 | // So we replenish it here.
|
---|
1009 | {
|
---|
1010 | loadPtr(addressFor(CallFrameSlot::codeBlock), regT0);
|
---|
1011 | loadPtr(Address(regT0, CodeBlock::offsetOfJITData()), s_constantsGPR);
|
---|
1012 | }
|
---|
1013 |
|
---|
1014 | callOperationNoExceptionCheck(operationRetrieveAndClearExceptionIfCatchable, TrustedImmPtr(&vm()));
|
---|
1015 | Jump isCatchableException = branchTest32(NonZero, returnValueGPR);
|
---|
1016 | jumpToExceptionHandler(vm());
|
---|
1017 | isCatchableException.link(this);
|
---|
1018 |
|
---|
1019 | boxCell(returnValueGPR, jsRegT10);
|
---|
1020 | emitPutVirtualRegister(bytecode.m_exception, jsRegT10);
|
---|
1021 |
|
---|
1022 | loadValue(Address(jsRegT10.payloadGPR(), Exception::valueOffset()), jsRegT10);
|
---|
1023 | emitPutVirtualRegister(bytecode.m_thrownValue, jsRegT10);
|
---|
1024 |
|
---|
1025 | #if ENABLE(DFG_JIT)
|
---|
1026 | // FIXME: consider inline caching the process of doing OSR entry, including
|
---|
1027 | // argument type proofs, storing locals to the buffer, etc
|
---|
1028 | // https://bugs.webkit.org/show_bug.cgi?id=175598
|
---|
1029 |
|
---|
1030 | callOperationNoExceptionCheck(operationTryOSREnterAtCatchAndValueProfile, TrustedImmPtr(&vm()), m_bytecodeIndex.asBits());
|
---|
1031 | auto skipOSREntry = branchTestPtr(Zero, returnValueGPR);
|
---|
1032 | emitPutToCallFrameHeader(returnValueGPR2, CallFrameSlot::codeBlock);
|
---|
1033 | emitRestoreCalleeSaves();
|
---|
1034 | farJump(returnValueGPR, ExceptionHandlerPtrTag);
|
---|
1035 | skipOSREntry.link(this);
|
---|
1036 | #endif // ENABLE(DFG_JIT)
|
---|
1037 | }
|
---|
1038 |
|
---|
1039 | void JIT::emit_op_identity_with_profile(const JSInstruction*)
|
---|
1040 | {
|
---|
1041 | // We don't need to do anything here...
|
---|
1042 | }
|
---|
1043 |
|
---|
1044 | void JIT::emit_op_get_parent_scope(const JSInstruction* currentInstruction)
|
---|
1045 | {
|
---|
1046 | auto bytecode = currentInstruction->as<OpGetParentScope>();
|
---|
1047 | VirtualRegister currentScope = bytecode.m_scope;
|
---|
1048 | emitGetVirtualRegisterPayload(currentScope, regT0);
|
---|
1049 | loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0);
|
---|
1050 | boxCell(regT0, jsRegT10);
|
---|
1051 | emitPutVirtualRegister(bytecode.m_dst, jsRegT10);
|
---|
1052 | }
|
---|
1053 |
|
---|
1054 | void JIT::emit_op_switch_imm(const JSInstruction* currentInstruction)
|
---|
1055 | {
|
---|
1056 | auto bytecode = currentInstruction->as<OpSwitchImm>();
|
---|
1057 | size_t tableIndex = bytecode.m_tableIndex;
|
---|
1058 | unsigned defaultOffset = jumpTarget(currentInstruction, bytecode.m_defaultOffset);
|
---|
1059 | VirtualRegister scrutinee = bytecode.m_scrutinee;
|
---|
1060 |
|
---|
1061 | // create jump table for switch destinations, track this switch statement.
|
---|
1062 | const UnlinkedSimpleJumpTable& unlinkedTable = m_unlinkedCodeBlock->unlinkedSwitchJumpTable(tableIndex);
|
---|
1063 | SimpleJumpTable& linkedTable = m_switchJumpTables[tableIndex];
|
---|
1064 | m_switches.append(SwitchRecord(tableIndex, m_bytecodeIndex, defaultOffset, SwitchRecord::Immediate));
|
---|
1065 | linkedTable.ensureCTITable(unlinkedTable);
|
---|
1066 |
|
---|
1067 | emitGetVirtualRegister(scrutinee, jsRegT10);
|
---|
1068 | auto notInt32 = branchIfNotInt32(jsRegT10);
|
---|
1069 | sub32(Imm32(unlinkedTable.m_min), jsRegT10.payloadGPR());
|
---|
1070 |
|
---|
1071 | addJump(branch32(AboveOrEqual, jsRegT10.payloadGPR(), Imm32(linkedTable.m_ctiOffsets.size())), defaultOffset);
|
---|
1072 | move(TrustedImmPtr(linkedTable.m_ctiOffsets.data()), regT2);
|
---|
1073 | loadPtr(BaseIndex(regT2, jsRegT10.payloadGPR(), ScalePtr), regT2);
|
---|
1074 | farJump(regT2, JSSwitchPtrTag);
|
---|
1075 |
|
---|
1076 | notInt32.link(this);
|
---|
1077 | callOperationNoExceptionCheck(operationSwitchImmWithUnknownKeyType, TrustedImmPtr(&vm()), jsRegT10, tableIndex, unlinkedTable.m_min);
|
---|
1078 | farJump(returnValueGPR, JSSwitchPtrTag);
|
---|
1079 | }
|
---|
1080 |
|
---|
1081 | void JIT::emit_op_switch_char(const JSInstruction* currentInstruction)
|
---|
1082 | {
|
---|
1083 | // FIXME: We should have a fast path.
|
---|
1084 | // https://bugs.webkit.org/show_bug.cgi?id=224521
|
---|
1085 | auto bytecode = currentInstruction->as<OpSwitchChar>();
|
---|
1086 | size_t tableIndex = bytecode.m_tableIndex;
|
---|
1087 | unsigned defaultOffset = jumpTarget(currentInstruction, bytecode.m_defaultOffset);
|
---|
1088 | VirtualRegister scrutinee = bytecode.m_scrutinee;
|
---|
1089 |
|
---|
1090 | // create jump table for switch destinations, track this switch statement.
|
---|
1091 | const UnlinkedSimpleJumpTable& unlinkedTable = m_unlinkedCodeBlock->unlinkedSwitchJumpTable(tableIndex);
|
---|
1092 | SimpleJumpTable& linkedTable = m_switchJumpTables[tableIndex];
|
---|
1093 | m_switches.append(SwitchRecord(tableIndex, m_bytecodeIndex, defaultOffset, SwitchRecord::Character));
|
---|
1094 | linkedTable.ensureCTITable(unlinkedTable);
|
---|
1095 |
|
---|
1096 | using SlowOperation = decltype(operationSwitchCharWithUnknownKeyType);
|
---|
1097 | constexpr GPRReg globalObjectGPR = preferredArgumentGPR<SlowOperation, 0>();
|
---|
1098 | constexpr JSValueRegs scrutineeJSR = preferredArgumentJSR<SlowOperation, 1>();
|
---|
1099 |
|
---|
1100 | emitGetVirtualRegister(scrutinee, scrutineeJSR);
|
---|
1101 | loadGlobalObject(globalObjectGPR);
|
---|
1102 | callOperation(operationSwitchCharWithUnknownKeyType, globalObjectGPR, scrutineeJSR, tableIndex, unlinkedTable.m_min);
|
---|
1103 | farJump(returnValueGPR, JSSwitchPtrTag);
|
---|
1104 | }
|
---|
1105 |
|
---|
1106 | void JIT::emit_op_switch_string(const JSInstruction* currentInstruction)
|
---|
1107 | {
|
---|
1108 | auto bytecode = currentInstruction->as<OpSwitchString>();
|
---|
1109 | size_t tableIndex = bytecode.m_tableIndex;
|
---|
1110 | unsigned defaultOffset = jumpTarget(currentInstruction, bytecode.m_defaultOffset);
|
---|
1111 | VirtualRegister scrutinee = bytecode.m_scrutinee;
|
---|
1112 |
|
---|
1113 | // create jump table for switch destinations, track this switch statement.
|
---|
1114 | const UnlinkedStringJumpTable& unlinkedTable = m_unlinkedCodeBlock->unlinkedStringSwitchJumpTable(tableIndex);
|
---|
1115 | StringJumpTable& linkedTable = m_stringSwitchJumpTables[tableIndex];
|
---|
1116 | m_switches.append(SwitchRecord(tableIndex, m_bytecodeIndex, defaultOffset, SwitchRecord::String));
|
---|
1117 | linkedTable.ensureCTITable(unlinkedTable);
|
---|
1118 |
|
---|
1119 | using SlowOperation = decltype(operationSwitchStringWithUnknownKeyType);
|
---|
1120 | constexpr GPRReg globalObjectGPR = preferredArgumentGPR<SlowOperation, 0>();
|
---|
1121 | constexpr JSValueRegs scrutineeJSR = preferredArgumentJSR<SlowOperation, 1>();
|
---|
1122 |
|
---|
1123 | emitGetVirtualRegister(scrutinee, scrutineeJSR);
|
---|
1124 | loadGlobalObject(globalObjectGPR);
|
---|
1125 | callOperation(operationSwitchStringWithUnknownKeyType, globalObjectGPR, scrutineeJSR, tableIndex);
|
---|
1126 | farJump(returnValueGPR, JSSwitchPtrTag);
|
---|
1127 | }
|
---|
1128 |
|
---|
1129 | void JIT::emit_op_eq_null(const JSInstruction* currentInstruction)
|
---|
1130 | {
|
---|
1131 | auto bytecode = currentInstruction->as<OpEqNull>();
|
---|
1132 | VirtualRegister dst = bytecode.m_dst;
|
---|
1133 | VirtualRegister src1 = bytecode.m_operand;
|
---|
1134 |
|
---|
1135 | emitGetVirtualRegister(src1, jsRegT10);
|
---|
1136 | Jump isImmediate = branchIfNotCell(jsRegT10);
|
---|
1137 |
|
---|
1138 | Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(jsRegT10.payloadGPR(), JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
|
---|
1139 | move(TrustedImm32(0), regT0);
|
---|
1140 | Jump wasNotMasqueradesAsUndefined = jump();
|
---|
1141 |
|
---|
1142 | isMasqueradesAsUndefined.link(this);
|
---|
1143 | emitLoadStructure(vm(), jsRegT10.payloadGPR(), regT2);
|
---|
1144 | loadGlobalObject(regT0);
|
---|
1145 | loadPtr(Address(regT2, Structure::globalObjectOffset()), regT2);
|
---|
1146 | comparePtr(Equal, regT0, regT2, regT0);
|
---|
1147 | Jump wasNotImmediate = jump();
|
---|
1148 |
|
---|
1149 | isImmediate.link(this);
|
---|
1150 |
|
---|
1151 | emitTurnUndefinedIntoNull(jsRegT10);
|
---|
1152 | isNull(jsRegT10, regT0);
|
---|
1153 |
|
---|
1154 | wasNotImmediate.link(this);
|
---|
1155 | wasNotMasqueradesAsUndefined.link(this);
|
---|
1156 |
|
---|
1157 | boxBoolean(regT0, jsRegT10);
|
---|
1158 | emitPutVirtualRegister(dst, jsRegT10);
|
---|
1159 | }
|
---|
1160 |
|
---|
1161 | void JIT::emit_op_neq_null(const JSInstruction* currentInstruction)
|
---|
1162 | {
|
---|
1163 | auto bytecode = currentInstruction->as<OpNeqNull>();
|
---|
1164 | VirtualRegister dst = bytecode.m_dst;
|
---|
1165 | VirtualRegister src1 = bytecode.m_operand;
|
---|
1166 |
|
---|
1167 | emitGetVirtualRegister(src1, jsRegT10);
|
---|
1168 | Jump isImmediate = branchIfNotCell(jsRegT10);
|
---|
1169 |
|
---|
1170 | Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(jsRegT10.payloadGPR(), JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
|
---|
1171 | move(TrustedImm32(1), regT0);
|
---|
1172 | Jump wasNotMasqueradesAsUndefined = jump();
|
---|
1173 |
|
---|
1174 | isMasqueradesAsUndefined.link(this);
|
---|
1175 | emitLoadStructure(vm(), jsRegT10.payloadGPR(), regT2);
|
---|
1176 | loadGlobalObject(regT0);
|
---|
1177 | loadPtr(Address(regT2, Structure::globalObjectOffset()), regT2);
|
---|
1178 | comparePtr(NotEqual, regT0, regT2, regT0);
|
---|
1179 | Jump wasNotImmediate = jump();
|
---|
1180 |
|
---|
1181 | isImmediate.link(this);
|
---|
1182 |
|
---|
1183 | emitTurnUndefinedIntoNull(jsRegT10);
|
---|
1184 | isNotNull(jsRegT10, regT0);
|
---|
1185 |
|
---|
1186 | wasNotImmediate.link(this);
|
---|
1187 | wasNotMasqueradesAsUndefined.link(this);
|
---|
1188 |
|
---|
1189 | boxBoolean(regT0, jsRegT10);
|
---|
1190 | emitPutVirtualRegister(dst, jsRegT10);
|
---|
1191 | }
|
---|
1192 |
|
---|
1193 | void JIT::emit_op_enter(const JSInstruction*)
|
---|
1194 | {
|
---|
1195 | // Even though CTI doesn't use them, we initialize our constant
|
---|
1196 | // registers to zap stale pointers, to avoid unnecessarily prolonging
|
---|
1197 | // object lifetime and increasing GC pressure.
|
---|
1198 |
|
---|
1199 | ASSERT(m_bytecodeIndex.offset() == 0);
|
---|
1200 | size_t count = m_unlinkedCodeBlock->numVars();
|
---|
1201 | uint32_t localsToInit = count - CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters();
|
---|
1202 | RELEASE_ASSERT(localsToInit < count);
|
---|
1203 |
|
---|
1204 | using BaselineJITRegisters::Enter::canBeOptimizedGPR;
|
---|
1205 | using BaselineJITRegisters::Enter::localsToInitGPR;
|
---|
1206 |
|
---|
1207 | move(TrustedImm32(canBeOptimized()), canBeOptimizedGPR);
|
---|
1208 | move(TrustedImm32(localsToInit), localsToInitGPR);
|
---|
1209 | emitNakedNearCall(vm().getCTIStub(op_enter_handlerGenerator).retaggedCode<NoPtrTag>());
|
---|
1210 | }
|
---|
1211 |
|
---|
1212 | MacroAssemblerCodeRef<JITThunkPtrTag> JIT::op_enter_handlerGenerator(VM& vm)
|
---|
1213 | {
|
---|
1214 | CCallHelpers jit;
|
---|
1215 |
|
---|
1216 | jit.emitCTIThunkPrologue();
|
---|
1217 |
|
---|
1218 | using BaselineJITRegisters::Enter::canBeOptimizedGPR; // Incoming
|
---|
1219 |
|
---|
1220 | {
|
---|
1221 | using BaselineJITRegisters::Enter::localsToInitGPR; // Incoming
|
---|
1222 | constexpr GPRReg iteratorGPR = regT4;
|
---|
1223 | constexpr GPRReg endGPR = regT5;
|
---|
1224 | constexpr JSValueRegs undefinedJSR = jsRegT32;
|
---|
1225 | static_assert(noOverlap(localsToInitGPR, canBeOptimizedGPR, iteratorGPR, undefinedJSR));
|
---|
1226 |
|
---|
1227 | size_t startLocal = CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters();
|
---|
1228 | int startOffset = virtualRegisterForLocal(startLocal).offset();
|
---|
1229 | ASSERT(startOffset <= 0);
|
---|
1230 | jit.subPtr(GPRInfo::callFrameRegister, TrustedImm32(-startOffset * sizeof(Register)), iteratorGPR);
|
---|
1231 | jit.mul32(TrustedImm32(sizeof(Register)), localsToInitGPR, localsToInitGPR);
|
---|
1232 | jit.subPtr(iteratorGPR, localsToInitGPR, endGPR);
|
---|
1233 | jit.moveTrustedValue(jsUndefined(), undefinedJSR);
|
---|
1234 |
|
---|
1235 | auto initLoop = jit.label();
|
---|
1236 | Jump initDone = jit.branch32(LessThanOrEqual, iteratorGPR, endGPR);
|
---|
1237 | {
|
---|
1238 | jit.storeValue(undefinedJSR, Address(iteratorGPR));
|
---|
1239 | jit.subPtr(TrustedImm32(sizeof(Register)), iteratorGPR);
|
---|
1240 | jit.jump(initLoop);
|
---|
1241 | }
|
---|
1242 | initDone.link(&jit);
|
---|
1243 | }
|
---|
1244 |
|
---|
1245 | // emitWriteBarrier(m_codeBlock).
|
---|
1246 | static_assert(noOverlap(canBeOptimizedGPR, argumentGPR1, argumentGPR2));
|
---|
1247 | jit.loadPtr(addressFor(CallFrameSlot::codeBlock), argumentGPR1);
|
---|
1248 | Jump ownerIsRememberedOrInEden = jit.barrierBranch(vm, argumentGPR1, argumentGPR2);
|
---|
1249 |
|
---|
1250 | // op_enter is always at bytecodeOffset 0.
|
---|
1251 | jit.store32(TrustedImm32(0), tagFor(CallFrameSlot::argumentCountIncludingThis));
|
---|
1252 | jit.prepareCallOperation(vm);
|
---|
1253 |
|
---|
1254 | // save canBeOptimizedGPR (arguments to call below are in registers on all platforms, so ok to stack this).
|
---|
1255 | // Note: we will do a call, so can't use pushToSave, as it does not maintain ABI stack alignment.
|
---|
1256 | jit.subPtr(TrustedImmPtr(16), stackPointerRegister);
|
---|
1257 | jit.storePtr(canBeOptimizedGPR, Address(stackPointerRegister));
|
---|
1258 |
|
---|
1259 | jit.setupArguments<decltype(operationWriteBarrierSlowPath)>(TrustedImmPtr(&vm), argumentGPR1);
|
---|
1260 | Call operationWriteBarrierCall = jit.call(OperationPtrTag);
|
---|
1261 |
|
---|
1262 | jit.loadPtr(Address(stackPointerRegister), canBeOptimizedGPR); // Restore canBeOptimizedGPR
|
---|
1263 | jit.addPtr(TrustedImmPtr(16), stackPointerRegister); // Restore stack pointer
|
---|
1264 |
|
---|
1265 | ownerIsRememberedOrInEden.link(&jit);
|
---|
1266 |
|
---|
1267 | #if ENABLE(DFG_JIT)
|
---|
1268 | Call operationOptimizeCall;
|
---|
1269 | if (Options::useDFGJIT()) {
|
---|
1270 | // emitEnterOptimizationCheck().
|
---|
1271 | JumpList skipOptimize;
|
---|
1272 |
|
---|
1273 | skipOptimize.append(jit.branchTest32(Zero, canBeOptimizedGPR));
|
---|
1274 |
|
---|
1275 | jit.loadPtr(addressFor(CallFrameSlot::codeBlock), argumentGPR1);
|
---|
1276 | skipOptimize.append(jit.branchAdd32(Signed, TrustedImm32(Options::executionCounterIncrementForEntry()), Address(argumentGPR1, CodeBlock::offsetOfJITExecuteCounter())));
|
---|
1277 |
|
---|
1278 | jit.copyLLIntBaselineCalleeSavesFromFrameOrRegisterToEntryFrameCalleeSavesBuffer(vm.topEntryFrame);
|
---|
1279 | jit.prepareCallOperation(vm);
|
---|
1280 |
|
---|
1281 | #if OS(WINDOWS) && CPU(X86_64)
|
---|
1282 | // On Windows, return values larger than 8 bytes are retuened via an implicit pointer passed as
|
---|
1283 | // the first argument, and remaining arguments are shifted to the right. Make space for this.
|
---|
1284 | static_assert(sizeof(SlowPathReturnType) == 16, "Assumed by generated call site below");
|
---|
1285 | jit.subPtr(MacroAssembler::TrustedImm32(16), MacroAssembler::stackPointerRegister);
|
---|
1286 | jit.move(MacroAssembler::stackPointerRegister, JIT::argumentGPR0);
|
---|
1287 | constexpr GPRReg vmPointerArgGPR { GPRInfo::argumentGPR1 };
|
---|
1288 | constexpr GPRReg bytecodeIndexBitsArgGPR { GPRInfo::argumentGPR2 };
|
---|
1289 | #else
|
---|
1290 | constexpr GPRReg vmPointerArgGPR { GPRInfo::argumentGPR0 };
|
---|
1291 | constexpr GPRReg bytecodeIndexBitsArgGPR { GPRInfo::argumentGPR1 };
|
---|
1292 | #endif
|
---|
1293 | jit.move(TrustedImmPtr(&vm), vmPointerArgGPR);
|
---|
1294 | jit.move(TrustedImm32(0), bytecodeIndexBitsArgGPR);
|
---|
1295 |
|
---|
1296 | operationOptimizeCall = jit.call(OperationPtrTag);
|
---|
1297 |
|
---|
1298 | #if OS(WINDOWS) && CPU(X86_64)
|
---|
1299 | jit.pop(GPRInfo::returnValueGPR); // targetPC
|
---|
1300 | jit.pop(GPRInfo::returnValueGPR2); // dataBuffer (unused, but needs popping to restore stack)
|
---|
1301 | #endif
|
---|
1302 |
|
---|
1303 | skipOptimize.append(jit.branchTestPtr(Zero, returnValueGPR));
|
---|
1304 | jit.farJump(returnValueGPR, GPRInfo::callFrameRegister);
|
---|
1305 |
|
---|
1306 | skipOptimize.link(&jit);
|
---|
1307 | }
|
---|
1308 | #endif // ENABLE(DFG_JIT)
|
---|
1309 |
|
---|
1310 | jit.emitCTIThunkEpilogue();
|
---|
1311 | jit.ret();
|
---|
1312 |
|
---|
1313 | LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID, LinkBuffer::Profile::ExtraCTIThunk);
|
---|
1314 | patchBuffer.link(operationWriteBarrierCall, FunctionPtr<OperationPtrTag>(operationWriteBarrierSlowPath));
|
---|
1315 | #if ENABLE(DFG_JIT)
|
---|
1316 | if (Options::useDFGJIT())
|
---|
1317 | patchBuffer.link(operationOptimizeCall, FunctionPtr<OperationPtrTag>(operationOptimize));
|
---|
1318 | #endif
|
---|
1319 | return FINALIZE_THUNK(patchBuffer, JITThunkPtrTag, "Baseline: op_enter_handler");
|
---|
1320 | }
|
---|
1321 |
|
---|
1322 | void JIT::emit_op_get_scope(const JSInstruction* currentInstruction)
|
---|
1323 | {
|
---|
1324 | auto bytecode = currentInstruction->as<OpGetScope>();
|
---|
1325 | VirtualRegister dst = bytecode.m_dst;
|
---|
1326 | emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, regT0);
|
---|
1327 | loadPtr(Address(regT0, JSFunction::offsetOfScopeChain()), regT0);
|
---|
1328 | boxCell(regT0, jsRegT10);
|
---|
1329 | emitPutVirtualRegister(dst, jsRegT10);
|
---|
1330 | }
|
---|
1331 |
|
---|
1332 | void JIT::emit_op_to_this(const JSInstruction* currentInstruction)
|
---|
1333 | {
|
---|
1334 | auto bytecode = currentInstruction->as<OpToThis>();
|
---|
1335 | VirtualRegister srcDst = bytecode.m_srcDst;
|
---|
1336 |
|
---|
1337 | emitGetVirtualRegister(srcDst, jsRegT10);
|
---|
1338 |
|
---|
1339 | emitJumpSlowCaseIfNotJSCell(jsRegT10, srcDst);
|
---|
1340 |
|
---|
1341 | addSlowCase(branchIfNotType(jsRegT10.payloadGPR(), FinalObjectType));
|
---|
1342 | load32FromMetadata(bytecode, OpToThis::Metadata::offsetOfCachedStructureID(), regT2);
|
---|
1343 | addSlowCase(branch32(NotEqual, Address(jsRegT10.payloadGPR(), JSCell::structureIDOffset()), regT2));
|
---|
1344 | }
|
---|
1345 |
|
---|
1346 | void JIT::emit_op_create_this(const JSInstruction* currentInstruction)
|
---|
1347 | {
|
---|
1348 | auto bytecode = currentInstruction->as<OpCreateThis>();
|
---|
1349 | VirtualRegister callee = bytecode.m_callee;
|
---|
1350 | RegisterID calleeReg = regT0;
|
---|
1351 | RegisterID rareDataReg = regT4;
|
---|
1352 | RegisterID resultReg = regT0;
|
---|
1353 | RegisterID allocatorReg = regT1;
|
---|
1354 | RegisterID structureReg = regT2;
|
---|
1355 | RegisterID cachedFunctionReg = regT4;
|
---|
1356 | RegisterID scratchReg = regT3;
|
---|
1357 |
|
---|
1358 | emitGetVirtualRegisterPayload(callee, calleeReg);
|
---|
1359 | addSlowCase(branchIfNotFunction(calleeReg));
|
---|
1360 | loadPtr(Address(calleeReg, JSFunction::offsetOfExecutableOrRareData()), rareDataReg);
|
---|
1361 | addSlowCase(branchTestPtr(Zero, rareDataReg, TrustedImm32(JSFunction::rareDataTag)));
|
---|
1362 | loadPtr(Address(rareDataReg, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfileWithPrototype::offsetOfAllocator() - JSFunction::rareDataTag), allocatorReg);
|
---|
1363 | loadPtr(Address(rareDataReg, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfileWithPrototype::offsetOfStructure() - JSFunction::rareDataTag), structureReg);
|
---|
1364 |
|
---|
1365 | loadPtrFromMetadata(bytecode, OpCreateThis::Metadata::offsetOfCachedCallee(), cachedFunctionReg);
|
---|
1366 | Jump hasSeenMultipleCallees = branchPtr(Equal, cachedFunctionReg, TrustedImmPtr(JSCell::seenMultipleCalleeObjects()));
|
---|
1367 | addSlowCase(branchPtr(NotEqual, calleeReg, cachedFunctionReg));
|
---|
1368 | hasSeenMultipleCallees.link(this);
|
---|
1369 |
|
---|
1370 | JumpList slowCases;
|
---|
1371 | auto butterfly = TrustedImmPtr(nullptr);
|
---|
1372 | emitAllocateJSObject(resultReg, JITAllocator::variable(), allocatorReg, structureReg, butterfly, scratchReg, slowCases);
|
---|
1373 | load8(Address(structureReg, Structure::inlineCapacityOffset()), scratchReg);
|
---|
1374 | emitInitializeInlineStorage(resultReg, scratchReg);
|
---|
1375 | mutatorFence(*m_vm);
|
---|
1376 | addSlowCase(slowCases);
|
---|
1377 | boxCell(resultReg, jsRegT10);
|
---|
1378 | emitPutVirtualRegister(bytecode.m_dst, jsRegT10);
|
---|
1379 | }
|
---|
1380 |
|
---|
1381 | void JIT::emit_op_check_tdz(const JSInstruction* currentInstruction)
|
---|
1382 | {
|
---|
1383 | auto bytecode = currentInstruction->as<OpCheckTdz>();
|
---|
1384 | #if USE(JSVALUE64)
|
---|
1385 | emitGetVirtualRegister(bytecode.m_targetVirtualRegister, regT0);
|
---|
1386 | #elif USE(JSVALUE32_64)
|
---|
1387 | emitGetVirtualRegisterTag(bytecode.m_targetVirtualRegister, regT0);
|
---|
1388 | #endif
|
---|
1389 | addSlowCase(branchIfEmpty(regT0));
|
---|
1390 | }
|
---|
1391 |
|
---|
1392 | #if USE(JSVALUE64)
|
---|
1393 |
|
---|
1394 | // Slow cases
|
---|
1395 |
|
---|
1396 | void JIT::emitSlow_op_eq(const JSInstruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
|
---|
1397 | {
|
---|
1398 | linkAllSlowCases(iter);
|
---|
1399 |
|
---|
1400 | auto bytecode = currentInstruction->as<OpEq>();
|
---|
1401 | loadGlobalObject(regT2);
|
---|
1402 | callOperation(operationCompareEq, regT2, regT0, regT1);
|
---|
1403 | boxBoolean(returnValueGPR, JSValueRegs { returnValueGPR });
|
---|
1404 | emitPutVirtualRegister(bytecode.m_dst, returnValueGPR);
|
---|
1405 | }
|
---|
1406 |
|
---|
1407 | void JIT::emitSlow_op_neq(const JSInstruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
|
---|
1408 | {
|
---|
1409 | linkAllSlowCases(iter);
|
---|
1410 |
|
---|
1411 | auto bytecode = currentInstruction->as<OpNeq>();
|
---|
1412 | loadGlobalObject(regT2);
|
---|
1413 | callOperation(operationCompareEq, regT2, regT0, regT1);
|
---|
1414 | xor32(TrustedImm32(0x1), regT0);
|
---|
1415 | boxBoolean(returnValueGPR, JSValueRegs { returnValueGPR });
|
---|
1416 | emitPutVirtualRegister(bytecode.m_dst, returnValueGPR);
|
---|
1417 | }
|
---|
1418 |
|
---|
1419 | void JIT::emitSlow_op_jeq(const JSInstruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
|
---|
1420 | {
|
---|
1421 | linkAllSlowCases(iter);
|
---|
1422 |
|
---|
1423 | auto bytecode = currentInstruction->as<OpJeq>();
|
---|
1424 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel);
|
---|
1425 | loadGlobalObject(regT2);
|
---|
1426 | callOperation(operationCompareEq, regT2, regT0, regT1);
|
---|
1427 | emitJumpSlowToHot(branchTest32(NonZero, returnValueGPR), target);
|
---|
1428 | }
|
---|
1429 |
|
---|
1430 | void JIT::emitSlow_op_jneq(const JSInstruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
|
---|
1431 | {
|
---|
1432 | linkAllSlowCases(iter);
|
---|
1433 |
|
---|
1434 | auto bytecode = currentInstruction->as<OpJneq>();
|
---|
1435 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel);
|
---|
1436 | loadGlobalObject(regT2);
|
---|
1437 | callOperation(operationCompareEq, regT2, regT0, regT1);
|
---|
1438 | emitJumpSlowToHot(branchTest32(Zero, returnValueGPR), target);
|
---|
1439 | }
|
---|
1440 |
|
---|
1441 | #endif // USE(JSVALUE64)
|
---|
1442 |
|
---|
1443 | void JIT::emit_op_debug(const JSInstruction* currentInstruction)
|
---|
1444 | {
|
---|
1445 | auto bytecode = currentInstruction->as<OpDebug>();
|
---|
1446 | loadPtr(addressFor(CallFrameSlot::codeBlock), regT0);
|
---|
1447 | load32(Address(regT0, CodeBlock::offsetOfDebuggerRequests()), regT0);
|
---|
1448 | Jump noDebuggerRequests = branchTest32(Zero, regT0);
|
---|
1449 | callOperation(operationDebug, TrustedImmPtr(&vm()), static_cast<int>(bytecode.m_debugHookType));
|
---|
1450 | noDebuggerRequests.link(this);
|
---|
1451 | }
|
---|
1452 |
|
---|
1453 | void JIT::emit_op_loop_hint(const JSInstruction* instruction)
|
---|
1454 | {
|
---|
1455 | if (UNLIKELY(Options::returnEarlyFromInfiniteLoopsForFuzzing() && m_unlinkedCodeBlock->loopHintsAreEligibleForFuzzingEarlyReturn())) {
|
---|
1456 | uintptr_t* ptr = vm().getLoopHintExecutionCounter(instruction);
|
---|
1457 | loadPtr(ptr, regT0);
|
---|
1458 | auto skipEarlyReturn = branchPtr(Below, regT0, TrustedImmPtr(Options::earlyReturnFromInfiniteLoopsLimit()));
|
---|
1459 |
|
---|
1460 | loadGlobalObject(returnValueJSR.payloadGPR());
|
---|
1461 | boxCell(returnValueJSR.payloadGPR(), returnValueJSR);
|
---|
1462 |
|
---|
1463 | checkStackPointerAlignment();
|
---|
1464 | emitRestoreCalleeSaves();
|
---|
1465 | emitFunctionEpilogue();
|
---|
1466 | ret();
|
---|
1467 |
|
---|
1468 | skipEarlyReturn.link(this);
|
---|
1469 | addPtr(TrustedImm32(1), regT0);
|
---|
1470 | storePtr(regT0, ptr);
|
---|
1471 | }
|
---|
1472 |
|
---|
1473 | // Emit the JIT optimization check:
|
---|
1474 | if (canBeOptimized()) {
|
---|
1475 | loadPtr(addressFor(CallFrameSlot::codeBlock), regT0);
|
---|
1476 | addSlowCase(branchAdd32(PositiveOrZero, TrustedImm32(Options::executionCounterIncrementForLoop()),
|
---|
1477 | Address(regT0, CodeBlock::offsetOfJITExecuteCounter())));
|
---|
1478 | }
|
---|
1479 | }
|
---|
1480 |
|
---|
1481 | void JIT::emitSlow_op_loop_hint(const JSInstruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
|
---|
1482 | {
|
---|
1483 | #if ENABLE(DFG_JIT)
|
---|
1484 | // Emit the slow path for the JIT optimization check:
|
---|
1485 | if (canBeOptimized()) {
|
---|
1486 | linkAllSlowCases(iter);
|
---|
1487 |
|
---|
1488 | copyLLIntBaselineCalleeSavesFromFrameOrRegisterToEntryFrameCalleeSavesBuffer(vm().topEntryFrame);
|
---|
1489 |
|
---|
1490 | callOperationNoExceptionCheck(operationOptimize, TrustedImmPtr(&vm()), m_bytecodeIndex.asBits());
|
---|
1491 | Jump noOptimizedEntry = branchTestPtr(Zero, returnValueGPR);
|
---|
1492 | if (ASSERT_ENABLED) {
|
---|
1493 | Jump ok = branchPtr(MacroAssembler::Above, returnValueGPR, TrustedImmPtr(bitwise_cast<void*>(static_cast<intptr_t>(1000))));
|
---|
1494 | abortWithReason(JITUnreasonableLoopHintJumpTarget);
|
---|
1495 | ok.link(this);
|
---|
1496 | }
|
---|
1497 | farJump(returnValueGPR, GPRInfo::callFrameRegister);
|
---|
1498 | noOptimizedEntry.link(this);
|
---|
1499 |
|
---|
1500 | emitJumpSlowToHot(jump(), currentInstruction->size());
|
---|
1501 | }
|
---|
1502 | #else
|
---|
1503 | UNUSED_PARAM(currentInstruction);
|
---|
1504 | UNUSED_PARAM(iter);
|
---|
1505 | #endif
|
---|
1506 | }
|
---|
1507 |
|
---|
1508 | void JIT::emit_op_check_traps(const JSInstruction*)
|
---|
1509 | {
|
---|
1510 | addSlowCase(branchTest32(NonZero, AbsoluteAddress(m_vm->traps().trapBitsAddress()), TrustedImm32(VMTraps::AsyncEvents)));
|
---|
1511 | }
|
---|
1512 |
|
---|
1513 | void JIT::emit_op_nop(const JSInstruction*)
|
---|
1514 | {
|
---|
1515 | }
|
---|
1516 |
|
---|
1517 | void JIT::emit_op_super_sampler_begin(const JSInstruction*)
|
---|
1518 | {
|
---|
1519 | add32(TrustedImm32(1), AbsoluteAddress(bitwise_cast<void*>(&g_superSamplerCount)));
|
---|
1520 | }
|
---|
1521 |
|
---|
1522 | void JIT::emit_op_super_sampler_end(const JSInstruction*)
|
---|
1523 | {
|
---|
1524 | sub32(TrustedImm32(1), AbsoluteAddress(bitwise_cast<void*>(&g_superSamplerCount)));
|
---|
1525 | }
|
---|
1526 |
|
---|
1527 | void JIT::emitSlow_op_check_traps(const JSInstruction*, Vector<SlowCaseEntry>::iterator& iter)
|
---|
1528 | {
|
---|
1529 | linkAllSlowCases(iter);
|
---|
1530 |
|
---|
1531 | uint32_t bytecodeOffset = m_bytecodeIndex.offset();
|
---|
1532 |
|
---|
1533 | using BaselineJITRegisters::CheckTraps::bytecodeOffsetGPR;
|
---|
1534 |
|
---|
1535 | move(TrustedImm32(bytecodeOffset), bytecodeOffsetGPR);
|
---|
1536 | emitNakedNearCall(vm().getCTIStub(op_check_traps_handlerGenerator).retaggedCode<NoPtrTag>());
|
---|
1537 | }
|
---|
1538 |
|
---|
1539 | MacroAssemblerCodeRef<JITThunkPtrTag> JIT::op_check_traps_handlerGenerator(VM& vm)
|
---|
1540 | {
|
---|
1541 | CCallHelpers jit;
|
---|
1542 |
|
---|
1543 | using BaselineJITRegisters::CheckTraps::bytecodeOffsetGPR; // Incoming
|
---|
1544 | constexpr GPRReg globalObjectGPR = argumentGPR0;
|
---|
1545 | static_assert(noOverlap(bytecodeOffsetGPR, globalObjectGPR));
|
---|
1546 |
|
---|
1547 | jit.emitCTIThunkPrologue();
|
---|
1548 |
|
---|
1549 | // Call slow operation
|
---|
1550 | jit.store32(bytecodeOffsetGPR, tagFor(CallFrameSlot::argumentCountIncludingThis));
|
---|
1551 | jit.prepareCallOperation(vm);
|
---|
1552 | loadGlobalObject(jit, globalObjectGPR);
|
---|
1553 | jit.setupArguments<decltype(operationHandleTraps)>(globalObjectGPR);
|
---|
1554 | CCallHelpers::Call operation = jit.call(OperationPtrTag);
|
---|
1555 |
|
---|
1556 | jit.emitCTIThunkEpilogue();
|
---|
1557 |
|
---|
1558 | // Tail call to exception check thunk
|
---|
1559 | Jump exceptionCheck = jit.jump();
|
---|
1560 |
|
---|
1561 | LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID, LinkBuffer::Profile::ExtraCTIThunk);
|
---|
1562 | patchBuffer.link(operation, FunctionPtr<OperationPtrTag>(operationHandleTraps));
|
---|
1563 | patchBuffer.link(exceptionCheck, CodeLocationLabel(vm.getCTIStub(checkExceptionGenerator).retaggedCode<NoPtrTag>()));
|
---|
1564 | return FINALIZE_THUNK(patchBuffer, JITThunkPtrTag, "Baseline: op_check_traps_handler");
|
---|
1565 | }
|
---|
1566 |
|
---|
1567 | void JIT::emit_op_new_regexp(const JSInstruction* currentInstruction)
|
---|
1568 | {
|
---|
1569 | auto bytecode = currentInstruction->as<OpNewRegexp>();
|
---|
1570 | VirtualRegister dst = bytecode.m_dst;
|
---|
1571 | VirtualRegister regexp = bytecode.m_regexp;
|
---|
1572 | GPRReg globalGPR = argumentGPR0;
|
---|
1573 | loadGlobalObject(globalGPR);
|
---|
1574 | callOperation(operationNewRegexp, globalGPR, TrustedImmPtr(jsCast<RegExp*>(m_unlinkedCodeBlock->getConstant(regexp))));
|
---|
1575 | boxCell(returnValueGPR, returnValueJSR);
|
---|
1576 | emitPutVirtualRegister(dst, returnValueJSR);
|
---|
1577 | }
|
---|
1578 |
|
---|
1579 | template<typename Op>
|
---|
1580 | void JIT::emitNewFuncCommon(const JSInstruction* currentInstruction)
|
---|
1581 | {
|
---|
1582 | auto bytecode = currentInstruction->as<Op>();
|
---|
1583 | VirtualRegister dst = bytecode.m_dst;
|
---|
1584 |
|
---|
1585 | emitGetVirtualRegisterPayload(bytecode.m_scope, argumentGPR1);
|
---|
1586 | auto constant = addToConstantPool(JITConstantPool::Type::FunctionDecl, bitwise_cast<void*>(static_cast<uintptr_t>(bytecode.m_functionDecl)));
|
---|
1587 | loadConstant(constant, argumentGPR2);
|
---|
1588 |
|
---|
1589 | OpcodeID opcodeID = Op::opcodeID;
|
---|
1590 | if (opcodeID == op_new_func)
|
---|
1591 | callOperation(operationNewFunction, dst, TrustedImmPtr(&vm()), argumentGPR1, argumentGPR2);
|
---|
1592 | else if (opcodeID == op_new_generator_func)
|
---|
1593 | callOperation(operationNewGeneratorFunction, dst, TrustedImmPtr(&vm()), argumentGPR1, argumentGPR2);
|
---|
1594 | else if (opcodeID == op_new_async_func)
|
---|
1595 | callOperation(operationNewAsyncFunction, dst, TrustedImmPtr(&vm()), argumentGPR1, argumentGPR2);
|
---|
1596 | else {
|
---|
1597 | ASSERT(opcodeID == op_new_async_generator_func);
|
---|
1598 | callOperation(operationNewAsyncGeneratorFunction, dst, TrustedImmPtr(&vm()), argumentGPR1, argumentGPR2);
|
---|
1599 | }
|
---|
1600 | }
|
---|
1601 |
|
---|
1602 | void JIT::emit_op_new_func(const JSInstruction* currentInstruction)
|
---|
1603 | {
|
---|
1604 | emitNewFuncCommon<OpNewFunc>(currentInstruction);
|
---|
1605 | }
|
---|
1606 |
|
---|
1607 | void JIT::emit_op_new_generator_func(const JSInstruction* currentInstruction)
|
---|
1608 | {
|
---|
1609 | emitNewFuncCommon<OpNewGeneratorFunc>(currentInstruction);
|
---|
1610 | }
|
---|
1611 |
|
---|
1612 | void JIT::emit_op_new_async_generator_func(const JSInstruction* currentInstruction)
|
---|
1613 | {
|
---|
1614 | emitNewFuncCommon<OpNewAsyncGeneratorFunc>(currentInstruction);
|
---|
1615 | }
|
---|
1616 |
|
---|
1617 | void JIT::emit_op_new_async_func(const JSInstruction* currentInstruction)
|
---|
1618 | {
|
---|
1619 | emitNewFuncCommon<OpNewAsyncFunc>(currentInstruction);
|
---|
1620 | }
|
---|
1621 |
|
---|
1622 | template<typename Op>
|
---|
1623 | void JIT::emitNewFuncExprCommon(const JSInstruction* currentInstruction)
|
---|
1624 | {
|
---|
1625 | auto bytecode = currentInstruction->as<Op>();
|
---|
1626 | VirtualRegister dst = bytecode.m_dst;
|
---|
1627 |
|
---|
1628 | emitGetVirtualRegisterPayload(bytecode.m_scope, argumentGPR1);
|
---|
1629 | auto constant = addToConstantPool(JITConstantPool::Type::FunctionExpr, bitwise_cast<void*>(static_cast<uintptr_t>(bytecode.m_functionDecl)));
|
---|
1630 | loadConstant(constant, argumentGPR2);
|
---|
1631 | OpcodeID opcodeID = Op::opcodeID;
|
---|
1632 |
|
---|
1633 | if (opcodeID == op_new_func_exp)
|
---|
1634 | callOperation(operationNewFunction, dst, TrustedImmPtr(&vm()), argumentGPR1, argumentGPR2);
|
---|
1635 | else if (opcodeID == op_new_generator_func_exp)
|
---|
1636 | callOperation(operationNewGeneratorFunction, dst, TrustedImmPtr(&vm()), argumentGPR1, argumentGPR2);
|
---|
1637 | else if (opcodeID == op_new_async_func_exp)
|
---|
1638 | callOperation(operationNewAsyncFunction, dst, TrustedImmPtr(&vm()), argumentGPR1, argumentGPR2);
|
---|
1639 | else {
|
---|
1640 | ASSERT(opcodeID == op_new_async_generator_func_exp);
|
---|
1641 | callOperation(operationNewAsyncGeneratorFunction, dst, TrustedImmPtr(&vm()), argumentGPR1, argumentGPR2);
|
---|
1642 | }
|
---|
1643 | }
|
---|
1644 |
|
---|
1645 | void JIT::emit_op_new_func_exp(const JSInstruction* currentInstruction)
|
---|
1646 | {
|
---|
1647 | emitNewFuncExprCommon<OpNewFuncExp>(currentInstruction);
|
---|
1648 | }
|
---|
1649 |
|
---|
1650 | void JIT::emit_op_new_generator_func_exp(const JSInstruction* currentInstruction)
|
---|
1651 | {
|
---|
1652 | emitNewFuncExprCommon<OpNewGeneratorFuncExp>(currentInstruction);
|
---|
1653 | }
|
---|
1654 |
|
---|
1655 | void JIT::emit_op_new_async_func_exp(const JSInstruction* currentInstruction)
|
---|
1656 | {
|
---|
1657 | emitNewFuncExprCommon<OpNewAsyncFuncExp>(currentInstruction);
|
---|
1658 | }
|
---|
1659 |
|
---|
1660 | void JIT::emit_op_new_async_generator_func_exp(const JSInstruction* currentInstruction)
|
---|
1661 | {
|
---|
1662 | emitNewFuncExprCommon<OpNewAsyncGeneratorFuncExp>(currentInstruction);
|
---|
1663 | }
|
---|
1664 |
|
---|
1665 | void JIT::emit_op_new_array(const JSInstruction* currentInstruction)
|
---|
1666 | {
|
---|
1667 | auto bytecode = currentInstruction->as<OpNewArray>();
|
---|
1668 | VirtualRegister dst = bytecode.m_dst;
|
---|
1669 | VirtualRegister valuesStart = bytecode.m_argv;
|
---|
1670 | int size = bytecode.m_argc;
|
---|
1671 | addPtr(TrustedImm32(valuesStart.offset() * sizeof(Register)), callFrameRegister, argumentGPR2);
|
---|
1672 | materializePointerIntoMetadata(bytecode, OpNewArray::Metadata::offsetOfArrayAllocationProfile(), argumentGPR1);
|
---|
1673 | loadGlobalObject(argumentGPR0);
|
---|
1674 | callOperation(operationNewArrayWithProfile, dst, argumentGPR0, argumentGPR1, argumentGPR2, size);
|
---|
1675 | }
|
---|
1676 |
|
---|
1677 | void JIT::emit_op_new_array_with_size(const JSInstruction* currentInstruction)
|
---|
1678 | {
|
---|
1679 | auto bytecode = currentInstruction->as<OpNewArrayWithSize>();
|
---|
1680 | VirtualRegister dst = bytecode.m_dst;
|
---|
1681 | VirtualRegister sizeIndex = bytecode.m_length;
|
---|
1682 |
|
---|
1683 | using Operation = decltype(operationNewArrayWithSizeAndProfile);
|
---|
1684 | constexpr GPRReg globalObjectGPR = preferredArgumentGPR<Operation, 0>();
|
---|
1685 | constexpr GPRReg profileGPR = preferredArgumentGPR<Operation, 1>();
|
---|
1686 | constexpr JSValueRegs sizeJSR = preferredArgumentJSR<Operation, 2>();
|
---|
1687 |
|
---|
1688 | materializePointerIntoMetadata(bytecode, OpNewArrayWithSize::Metadata::offsetOfArrayAllocationProfile(), profileGPR);
|
---|
1689 | emitGetVirtualRegister(sizeIndex, sizeJSR);
|
---|
1690 | loadGlobalObject(globalObjectGPR);
|
---|
1691 | callOperation(operationNewArrayWithSizeAndProfile, dst, globalObjectGPR, profileGPR, sizeJSR);
|
---|
1692 | }
|
---|
1693 |
|
---|
1694 | void JIT::emit_op_profile_type(const JSInstruction* currentInstruction)
|
---|
1695 | {
|
---|
1696 | m_isShareable = false;
|
---|
1697 |
|
---|
1698 | auto bytecode = currentInstruction->as<OpProfileType>();
|
---|
1699 | auto& metadata = bytecode.metadata(m_profiledCodeBlock);
|
---|
1700 | TypeLocation* cachedTypeLocation = metadata.m_typeLocation;
|
---|
1701 | VirtualRegister valueToProfile = bytecode.m_targetVirtualRegister;
|
---|
1702 |
|
---|
1703 | emitGetVirtualRegister(valueToProfile, jsRegT10);
|
---|
1704 |
|
---|
1705 | JumpList jumpToEnd;
|
---|
1706 |
|
---|
1707 | jumpToEnd.append(branchIfEmpty(jsRegT10));
|
---|
1708 |
|
---|
1709 | // Compile in a predictive type check, if possible, to see if we can skip writing to the log.
|
---|
1710 | // These typechecks are inlined to match those of the 64-bit JSValue type checks.
|
---|
1711 | if (cachedTypeLocation->m_lastSeenType == TypeUndefined)
|
---|
1712 | jumpToEnd.append(branchIfUndefined(jsRegT10));
|
---|
1713 | else if (cachedTypeLocation->m_lastSeenType == TypeNull)
|
---|
1714 | jumpToEnd.append(branchIfNull(jsRegT10));
|
---|
1715 | else if (cachedTypeLocation->m_lastSeenType == TypeBoolean)
|
---|
1716 | jumpToEnd.append(branchIfBoolean(jsRegT10, regT2));
|
---|
1717 | else if (cachedTypeLocation->m_lastSeenType == TypeAnyInt)
|
---|
1718 | jumpToEnd.append(branchIfInt32(jsRegT10));
|
---|
1719 | else if (cachedTypeLocation->m_lastSeenType == TypeNumber)
|
---|
1720 | jumpToEnd.append(branchIfNumber(jsRegT10, regT2));
|
---|
1721 | else if (cachedTypeLocation->m_lastSeenType == TypeString) {
|
---|
1722 | Jump isNotCell = branchIfNotCell(jsRegT10);
|
---|
1723 | jumpToEnd.append(branchIfString(jsRegT10.payloadGPR()));
|
---|
1724 | isNotCell.link(this);
|
---|
1725 | }
|
---|
1726 |
|
---|
1727 | // Load the type profiling log into T2.
|
---|
1728 | TypeProfilerLog* cachedTypeProfilerLog = m_vm->typeProfilerLog();
|
---|
1729 | move(TrustedImmPtr(cachedTypeProfilerLog), regT2);
|
---|
1730 | // Load the next log entry into T3.
|
---|
1731 | loadPtr(Address(regT2, TypeProfilerLog::currentLogEntryOffset()), regT3);
|
---|
1732 |
|
---|
1733 | // Store the JSValue onto the log entry.
|
---|
1734 | storeValue(jsRegT10, Address(regT3, TypeProfilerLog::LogEntry::valueOffset()));
|
---|
1735 |
|
---|
1736 | // Store the structureID of the cell if jsRegT10 is a cell, otherwise, store 0 on the log entry.
|
---|
1737 | Jump notCell = branchIfNotCell(jsRegT10);
|
---|
1738 | load32(Address(jsRegT10.payloadGPR(), JSCell::structureIDOffset()), regT0);
|
---|
1739 | store32(regT0, Address(regT3, TypeProfilerLog::LogEntry::structureIDOffset()));
|
---|
1740 | Jump skipIsCell = jump();
|
---|
1741 | notCell.link(this);
|
---|
1742 | store32(TrustedImm32(0), Address(regT3, TypeProfilerLog::LogEntry::structureIDOffset()));
|
---|
1743 | skipIsCell.link(this);
|
---|
1744 |
|
---|
1745 | // Store the typeLocation on the log entry.
|
---|
1746 | move(TrustedImmPtr(cachedTypeLocation), regT0);
|
---|
1747 | storePtr(regT0, Address(regT3, TypeProfilerLog::LogEntry::locationOffset()));
|
---|
1748 |
|
---|
1749 | // Increment the current log entry.
|
---|
1750 | addPtr(TrustedImm32(sizeof(TypeProfilerLog::LogEntry)), regT3);
|
---|
1751 | storePtr(regT3, Address(regT2, TypeProfilerLog::currentLogEntryOffset()));
|
---|
1752 | Jump skipClearLog = branchPtr(NotEqual, regT3, TrustedImmPtr(cachedTypeProfilerLog->logEndPtr()));
|
---|
1753 | // Clear the log if we're at the end of the log.
|
---|
1754 | callOperationNoExceptionCheck(operationProcessTypeProfilerLog, TrustedImmPtr(&vm()));
|
---|
1755 | skipClearLog.link(this);
|
---|
1756 |
|
---|
1757 | jumpToEnd.link(this);
|
---|
1758 | }
|
---|
1759 |
|
---|
1760 | void JIT::emit_op_log_shadow_chicken_prologue(const JSInstruction* currentInstruction)
|
---|
1761 | {
|
---|
1762 | RELEASE_ASSERT(vm().shadowChicken());
|
---|
1763 | updateTopCallFrame();
|
---|
1764 | static_assert(noOverlap(regT0, nonArgGPR0, regT2), "we will have problems if this is true.");
|
---|
1765 | auto bytecode = currentInstruction->as<OpLogShadowChickenPrologue>();
|
---|
1766 | GPRReg shadowPacketReg = regT0;
|
---|
1767 | GPRReg scratch1Reg = nonArgGPR0; // This must be a non-argument register.
|
---|
1768 | GPRReg scratch2Reg = regT2;
|
---|
1769 | ensureShadowChickenPacket(vm(), shadowPacketReg, scratch1Reg, scratch2Reg);
|
---|
1770 | emitGetVirtualRegisterPayload(bytecode.m_scope, regT3);
|
---|
1771 | logShadowChickenProloguePacket(shadowPacketReg, scratch1Reg, regT3);
|
---|
1772 | }
|
---|
1773 |
|
---|
1774 | void JIT::emit_op_log_shadow_chicken_tail(const JSInstruction* currentInstruction)
|
---|
1775 | {
|
---|
1776 | RELEASE_ASSERT(vm().shadowChicken());
|
---|
1777 | updateTopCallFrame();
|
---|
1778 | static_assert(noOverlap(regT0, nonArgGPR0, regT2), "we will have problems if this is true.");
|
---|
1779 | static_assert(noOverlap(regT0, regT1, jsRegT32, regT4), "we will have problems if this is true.");
|
---|
1780 | auto bytecode = currentInstruction->as<OpLogShadowChickenTail>();
|
---|
1781 | GPRReg shadowPacketReg = regT0;
|
---|
1782 | {
|
---|
1783 | GPRReg scratch1Reg = nonArgGPR0; // This must be a non-argument register.
|
---|
1784 | GPRReg scratch2Reg = regT2;
|
---|
1785 | ensureShadowChickenPacket(vm(), shadowPacketReg, scratch1Reg, scratch2Reg);
|
---|
1786 | }
|
---|
1787 | emitGetVirtualRegister(bytecode.m_thisValue, jsRegT32);
|
---|
1788 | emitGetVirtualRegisterPayload(bytecode.m_scope, regT4);
|
---|
1789 | loadPtr(addressFor(CallFrameSlot::codeBlock), regT1);
|
---|
1790 | logShadowChickenTailPacket(shadowPacketReg, jsRegT32, regT4, regT1, CallSiteIndex(m_bytecodeIndex));
|
---|
1791 | }
|
---|
1792 |
|
---|
1793 | void JIT::emit_op_profile_control_flow(const JSInstruction* currentInstruction)
|
---|
1794 | {
|
---|
1795 | m_isShareable = false;
|
---|
1796 |
|
---|
1797 | auto bytecode = currentInstruction->as<OpProfileControlFlow>();
|
---|
1798 | auto& metadata = bytecode.metadata(m_profiledCodeBlock);
|
---|
1799 | BasicBlockLocation* basicBlockLocation = metadata.m_basicBlockLocation;
|
---|
1800 | #if USE(JSVALUE64)
|
---|
1801 | basicBlockLocation->emitExecuteCode(*this);
|
---|
1802 | #else
|
---|
1803 | basicBlockLocation->emitExecuteCode(*this, regT0);
|
---|
1804 | #endif
|
---|
1805 | }
|
---|
1806 |
|
---|
1807 | void JIT::emit_op_argument_count(const JSInstruction* currentInstruction)
|
---|
1808 | {
|
---|
1809 | auto bytecode = currentInstruction->as<OpArgumentCount>();
|
---|
1810 | VirtualRegister dst = bytecode.m_dst;
|
---|
1811 | load32(payloadFor(CallFrameSlot::argumentCountIncludingThis), regT0);
|
---|
1812 | sub32(TrustedImm32(1), regT0);
|
---|
1813 | JSValueRegs result = JSValueRegs::withTwoAvailableRegs(regT0, regT1);
|
---|
1814 | boxInt32(regT0, result);
|
---|
1815 | emitPutVirtualRegister(dst, result);
|
---|
1816 | }
|
---|
1817 |
|
---|
1818 | void JIT::emit_op_get_rest_length(const JSInstruction* currentInstruction)
|
---|
1819 | {
|
---|
1820 | auto bytecode = currentInstruction->as<OpGetRestLength>();
|
---|
1821 | VirtualRegister dst = bytecode.m_dst;
|
---|
1822 | unsigned numParamsToSkip = bytecode.m_numParametersToSkip;
|
---|
1823 |
|
---|
1824 | load32(payloadFor(CallFrameSlot::argumentCountIncludingThis), regT0);
|
---|
1825 | sub32(TrustedImm32(1), regT0);
|
---|
1826 | Jump zeroLength = branch32(LessThanOrEqual, regT0, Imm32(numParamsToSkip));
|
---|
1827 | sub32(Imm32(numParamsToSkip), regT0);
|
---|
1828 | boxInt32(regT0, jsRegT10);
|
---|
1829 | Jump done = jump();
|
---|
1830 |
|
---|
1831 | zeroLength.link(this);
|
---|
1832 | moveTrustedValue(jsNumber(0), jsRegT10);
|
---|
1833 |
|
---|
1834 | done.link(this);
|
---|
1835 | emitPutVirtualRegister(dst, jsRegT10);
|
---|
1836 | }
|
---|
1837 |
|
---|
1838 | void JIT::emit_op_get_argument(const JSInstruction* currentInstruction)
|
---|
1839 | {
|
---|
1840 | auto bytecode = currentInstruction->as<OpGetArgument>();
|
---|
1841 | VirtualRegister dst = bytecode.m_dst;
|
---|
1842 | int index = bytecode.m_index;
|
---|
1843 |
|
---|
1844 | load32(payloadFor(CallFrameSlot::argumentCountIncludingThis), regT2);
|
---|
1845 | Jump argumentOutOfBounds = branch32(LessThanOrEqual, regT2, TrustedImm32(index));
|
---|
1846 | loadValue(addressFor(VirtualRegister(CallFrameSlot::thisArgument + index)), jsRegT10);
|
---|
1847 | Jump done = jump();
|
---|
1848 |
|
---|
1849 | argumentOutOfBounds.link(this);
|
---|
1850 | moveValue(jsUndefined(), jsRegT10);
|
---|
1851 |
|
---|
1852 | done.link(this);
|
---|
1853 | emitValueProfilingSite(bytecode, jsRegT10);
|
---|
1854 | emitPutVirtualRegister(dst, jsRegT10);
|
---|
1855 | }
|
---|
1856 |
|
---|
1857 | void JIT::emit_op_get_prototype_of(const JSInstruction* currentInstruction)
|
---|
1858 | {
|
---|
1859 | auto bytecode = currentInstruction->as<OpGetPrototypeOf>();
|
---|
1860 |
|
---|
1861 | emitGetVirtualRegister(bytecode.m_value, jsRegT10);
|
---|
1862 |
|
---|
1863 | JumpList slowCases;
|
---|
1864 | slowCases.append(branchIfNotCell(jsRegT10));
|
---|
1865 | slowCases.append(branchIfNotObject(jsRegT10.payloadGPR()));
|
---|
1866 |
|
---|
1867 | emitLoadPrototype(vm(), jsRegT10.payloadGPR(), jsRegT32, slowCases);
|
---|
1868 | addSlowCase(slowCases);
|
---|
1869 |
|
---|
1870 | emitValueProfilingSite(bytecode, jsRegT32);
|
---|
1871 | emitPutVirtualRegister(bytecode.m_dst, jsRegT32);
|
---|
1872 | }
|
---|
1873 |
|
---|
1874 | } // namespace JSC
|
---|
1875 |
|
---|
1876 | #endif // ENABLE(JIT)
|
---|