1 | /*
|
---|
2 | * Copyright (C) 2011-2022 Apple Inc. All rights reserved.
|
---|
3 | *
|
---|
4 | * Redistribution and use in source and binary forms, with or without
|
---|
5 | * modification, are permitted provided that the following conditions
|
---|
6 | * are met:
|
---|
7 | * 1. Redistributions of source code must retain the above copyright
|
---|
8 | * notice, this list of conditions and the following disclaimer.
|
---|
9 | * 2. Redistributions in binary form must reproduce the above copyright
|
---|
10 | * notice, this list of conditions and the following disclaimer in the
|
---|
11 | * documentation and/or other materials provided with the distribution.
|
---|
12 | *
|
---|
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
|
---|
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
---|
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
---|
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
|
---|
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
---|
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
---|
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
---|
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
|
---|
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
---|
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
---|
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
---|
24 | */
|
---|
25 |
|
---|
26 | #include "config.h"
|
---|
27 | #include "AssemblyHelpers.h"
|
---|
28 |
|
---|
29 | #if ENABLE(JIT)
|
---|
30 |
|
---|
31 | #include "AccessCase.h"
|
---|
32 | #include "AssemblyHelpersSpoolers.h"
|
---|
33 | #include "JITOperations.h"
|
---|
34 | #include "JSArrayBufferView.h"
|
---|
35 | #include "JSCJSValueInlines.h"
|
---|
36 | #include "LinkBuffer.h"
|
---|
37 | #include "MaxFrameExtentForSlowPathCall.h"
|
---|
38 | #include "SuperSampler.h"
|
---|
39 | #include "ThunkGenerators.h"
|
---|
40 |
|
---|
41 | #if ENABLE(WEBASSEMBLY)
|
---|
42 | #include "WasmMemoryInformation.h"
|
---|
43 | #include "WasmContextInlines.h"
|
---|
44 | #endif
|
---|
45 |
|
---|
46 | namespace JSC {
|
---|
47 |
|
---|
48 | AssemblyHelpers::Jump AssemblyHelpers::branchIfFastTypedArray(GPRReg baseGPR)
|
---|
49 | {
|
---|
50 | return branch32(
|
---|
51 | Equal,
|
---|
52 | Address(baseGPR, JSArrayBufferView::offsetOfMode()),
|
---|
53 | TrustedImm32(FastTypedArray));
|
---|
54 | }
|
---|
55 |
|
---|
56 | AssemblyHelpers::Jump AssemblyHelpers::branchIfNotFastTypedArray(GPRReg baseGPR)
|
---|
57 | {
|
---|
58 | return branch32(
|
---|
59 | NotEqual,
|
---|
60 | Address(baseGPR, JSArrayBufferView::offsetOfMode()),
|
---|
61 | TrustedImm32(FastTypedArray));
|
---|
62 | }
|
---|
63 |
|
---|
64 | void AssemblyHelpers::incrementSuperSamplerCount()
|
---|
65 | {
|
---|
66 | add32(TrustedImm32(1), AbsoluteAddress(bitwise_cast<const void*>(&g_superSamplerCount)));
|
---|
67 | }
|
---|
68 |
|
---|
69 | void AssemblyHelpers::decrementSuperSamplerCount()
|
---|
70 | {
|
---|
71 | sub32(TrustedImm32(1), AbsoluteAddress(bitwise_cast<const void*>(&g_superSamplerCount)));
|
---|
72 | }
|
---|
73 |
|
---|
74 | void AssemblyHelpers::purifyNaN(FPRReg fpr)
|
---|
75 | {
|
---|
76 | MacroAssembler::Jump notNaN = branchIfNotNaN(fpr);
|
---|
77 | static const double NaN = PNaN;
|
---|
78 | loadDouble(TrustedImmPtr(&NaN), fpr);
|
---|
79 | notNaN.link(this);
|
---|
80 | }
|
---|
81 |
|
---|
82 | #if ENABLE(SAMPLING_FLAGS)
|
---|
83 | void AssemblyHelpers::setSamplingFlag(int32_t flag)
|
---|
84 | {
|
---|
85 | ASSERT(flag >= 1);
|
---|
86 | ASSERT(flag <= 32);
|
---|
87 | or32(TrustedImm32(1u << (flag - 1)), AbsoluteAddress(SamplingFlags::addressOfFlags()));
|
---|
88 | }
|
---|
89 |
|
---|
90 | void AssemblyHelpers::clearSamplingFlag(int32_t flag)
|
---|
91 | {
|
---|
92 | ASSERT(flag >= 1);
|
---|
93 | ASSERT(flag <= 32);
|
---|
94 | and32(TrustedImm32(~(1u << (flag - 1))), AbsoluteAddress(SamplingFlags::addressOfFlags()));
|
---|
95 | }
|
---|
96 | #endif
|
---|
97 |
|
---|
98 | #if ASSERT_ENABLED
|
---|
99 | #if USE(JSVALUE64)
|
---|
100 | void AssemblyHelpers::jitAssertIsInt32(GPRReg gpr)
|
---|
101 | {
|
---|
102 | #if CPU(X86_64) || CPU(ARM64)
|
---|
103 | Jump checkInt32 = branch64(BelowOrEqual, gpr, TrustedImm64(static_cast<uintptr_t>(0xFFFFFFFFu)));
|
---|
104 | abortWithReason(AHIsNotInt32);
|
---|
105 | checkInt32.link(this);
|
---|
106 | #else
|
---|
107 | UNUSED_PARAM(gpr);
|
---|
108 | #endif
|
---|
109 | }
|
---|
110 |
|
---|
111 | void AssemblyHelpers::jitAssertIsJSInt32(GPRReg gpr)
|
---|
112 | {
|
---|
113 | Jump checkJSInt32 = branch64(AboveOrEqual, gpr, GPRInfo::numberTagRegister);
|
---|
114 | abortWithReason(AHIsNotJSInt32);
|
---|
115 | checkJSInt32.link(this);
|
---|
116 | }
|
---|
117 |
|
---|
118 | void AssemblyHelpers::jitAssertIsJSNumber(GPRReg gpr)
|
---|
119 | {
|
---|
120 | Jump checkJSNumber = branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::numberTagRegister);
|
---|
121 | abortWithReason(AHIsNotJSNumber);
|
---|
122 | checkJSNumber.link(this);
|
---|
123 | }
|
---|
124 |
|
---|
125 | void AssemblyHelpers::jitAssertIsJSDouble(GPRReg gpr)
|
---|
126 | {
|
---|
127 | Jump checkJSInt32 = branch64(AboveOrEqual, gpr, GPRInfo::numberTagRegister);
|
---|
128 | Jump checkJSNumber = branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::numberTagRegister);
|
---|
129 | checkJSInt32.link(this);
|
---|
130 | abortWithReason(AHIsNotJSDouble);
|
---|
131 | checkJSNumber.link(this);
|
---|
132 | }
|
---|
133 |
|
---|
134 | void AssemblyHelpers::jitAssertIsCell(GPRReg gpr)
|
---|
135 | {
|
---|
136 | Jump checkCell = branchTest64(MacroAssembler::Zero, gpr, GPRInfo::notCellMaskRegister);
|
---|
137 | abortWithReason(AHIsNotCell);
|
---|
138 | checkCell.link(this);
|
---|
139 | }
|
---|
140 |
|
---|
141 | void AssemblyHelpers::jitAssertTagsInPlace()
|
---|
142 | {
|
---|
143 | Jump ok = branch64(Equal, GPRInfo::numberTagRegister, TrustedImm64(JSValue::NumberTag));
|
---|
144 | abortWithReason(AHNumberTagNotInPlace);
|
---|
145 | breakpoint();
|
---|
146 | ok.link(this);
|
---|
147 |
|
---|
148 | ok = branch64(Equal, GPRInfo::notCellMaskRegister, TrustedImm64(JSValue::NotCellMask));
|
---|
149 | abortWithReason(AHNotCellMaskNotInPlace);
|
---|
150 | ok.link(this);
|
---|
151 | }
|
---|
152 | #elif USE(JSVALUE32_64)
|
---|
153 | void AssemblyHelpers::jitAssertIsInt32(GPRReg gpr)
|
---|
154 | {
|
---|
155 | UNUSED_PARAM(gpr);
|
---|
156 | }
|
---|
157 |
|
---|
158 | void AssemblyHelpers::jitAssertIsJSInt32(GPRReg gpr)
|
---|
159 | {
|
---|
160 | Jump checkJSInt32 = branch32(Equal, gpr, TrustedImm32(JSValue::Int32Tag));
|
---|
161 | abortWithReason(AHIsNotJSInt32);
|
---|
162 | checkJSInt32.link(this);
|
---|
163 | }
|
---|
164 |
|
---|
165 | void AssemblyHelpers::jitAssertIsJSNumber(GPRReg gpr)
|
---|
166 | {
|
---|
167 | Jump checkJSInt32 = branch32(Equal, gpr, TrustedImm32(JSValue::Int32Tag));
|
---|
168 | Jump checkJSDouble = branch32(Below, gpr, TrustedImm32(JSValue::LowestTag));
|
---|
169 | abortWithReason(AHIsNotJSNumber);
|
---|
170 | checkJSInt32.link(this);
|
---|
171 | checkJSDouble.link(this);
|
---|
172 | }
|
---|
173 |
|
---|
174 | void AssemblyHelpers::jitAssertIsJSDouble(GPRReg gpr)
|
---|
175 | {
|
---|
176 | Jump checkJSDouble = branch32(Below, gpr, TrustedImm32(JSValue::LowestTag));
|
---|
177 | abortWithReason(AHIsNotJSDouble);
|
---|
178 | checkJSDouble.link(this);
|
---|
179 | }
|
---|
180 |
|
---|
181 | void AssemblyHelpers::jitAssertIsCell(GPRReg gpr)
|
---|
182 | {
|
---|
183 | Jump checkCell = branchIfCell(gpr);
|
---|
184 | abortWithReason(AHIsNotCell);
|
---|
185 | checkCell.link(this);
|
---|
186 | }
|
---|
187 |
|
---|
188 | void AssemblyHelpers::jitAssertTagsInPlace()
|
---|
189 | {
|
---|
190 | }
|
---|
191 | #endif // USE(JSVALUE32_64)
|
---|
192 |
|
---|
193 | void AssemblyHelpers::jitAssertHasValidCallFrame()
|
---|
194 | {
|
---|
195 | Jump checkCFR = branchTestPtr(Zero, GPRInfo::callFrameRegister, TrustedImm32(7));
|
---|
196 | abortWithReason(AHCallFrameMisaligned);
|
---|
197 | checkCFR.link(this);
|
---|
198 | }
|
---|
199 |
|
---|
200 | void AssemblyHelpers::jitAssertIsNull(GPRReg gpr)
|
---|
201 | {
|
---|
202 | Jump checkNull = branchTestPtr(Zero, gpr);
|
---|
203 | abortWithReason(AHIsNotNull);
|
---|
204 | checkNull.link(this);
|
---|
205 | }
|
---|
206 |
|
---|
207 | void AssemblyHelpers::jitAssertArgumentCountSane()
|
---|
208 | {
|
---|
209 | Jump ok = branch32(Below, payloadFor(CallFrameSlot::argumentCountIncludingThis), TrustedImm32(10000000));
|
---|
210 | abortWithReason(AHInsaneArgumentCount);
|
---|
211 | ok.link(this);
|
---|
212 | }
|
---|
213 |
|
---|
214 | void AssemblyHelpers::jitAssertCodeBlockOnCallFrameWithType(GPRReg scratchGPR, JITType type)
|
---|
215 | {
|
---|
216 | emitGetFromCallFrameHeaderPtr(CallFrameSlot::codeBlock, scratchGPR);
|
---|
217 | loadPtr(Address(scratchGPR, CodeBlock::jitCodeOffset()), scratchGPR);
|
---|
218 | load8(Address(scratchGPR, JITCode::offsetOfJITType()), scratchGPR);
|
---|
219 | Jump ok = branch32(Equal, scratchGPR, TrustedImm32(static_cast<unsigned>(type)));
|
---|
220 | abortWithReason(AHInvalidCodeBlock);
|
---|
221 | ok.link(this);
|
---|
222 | }
|
---|
223 |
|
---|
224 | void AssemblyHelpers::jitAssertCodeBlockOnCallFrameIsOptimizingJIT(GPRReg scratchGPR)
|
---|
225 | {
|
---|
226 | emitGetFromCallFrameHeaderPtr(CallFrameSlot::codeBlock, scratchGPR);
|
---|
227 | loadPtr(Address(scratchGPR, CodeBlock::jitCodeOffset()), scratchGPR);
|
---|
228 | load8(Address(scratchGPR, JITCode::offsetOfJITType()), scratchGPR);
|
---|
229 | JumpList ok;
|
---|
230 | ok.append(branch32(Equal, scratchGPR, TrustedImm32(static_cast<unsigned>(JITType::DFGJIT))));
|
---|
231 | ok.append(branch32(Equal, scratchGPR, TrustedImm32(static_cast<unsigned>(JITType::FTLJIT))));
|
---|
232 | abortWithReason(AHInvalidCodeBlock);
|
---|
233 | ok.link(this);
|
---|
234 | }
|
---|
235 |
|
---|
236 | #endif // ASSERT_ENABLED
|
---|
237 |
|
---|
238 | void AssemblyHelpers::jitReleaseAssertNoException(VM& vm)
|
---|
239 | {
|
---|
240 | Jump noException;
|
---|
241 | #if USE(JSVALUE64)
|
---|
242 | noException = branchTest64(Zero, AbsoluteAddress(vm.addressOfException()));
|
---|
243 | #elif USE(JSVALUE32_64)
|
---|
244 | noException = branch32(Equal, AbsoluteAddress(vm.addressOfException()), TrustedImm32(0));
|
---|
245 | #endif
|
---|
246 | abortWithReason(JITUncaughtExceptionAfterCall);
|
---|
247 | noException.link(this);
|
---|
248 | }
|
---|
249 |
|
---|
250 | void AssemblyHelpers::callExceptionFuzz(VM& vm)
|
---|
251 | {
|
---|
252 | RELEASE_ASSERT(Options::useExceptionFuzz());
|
---|
253 |
|
---|
254 | EncodedJSValue* buffer = vm.exceptionFuzzingBuffer(sizeof(EncodedJSValue) * (GPRInfo::numberOfRegisters + FPRInfo::numberOfRegisters));
|
---|
255 |
|
---|
256 | for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
|
---|
257 | #if USE(JSVALUE64)
|
---|
258 | store64(GPRInfo::toRegister(i), buffer + i);
|
---|
259 | #else
|
---|
260 | store32(GPRInfo::toRegister(i), buffer + i);
|
---|
261 | #endif
|
---|
262 | }
|
---|
263 | for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
|
---|
264 | move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
|
---|
265 | storeDouble(FPRInfo::toRegister(i), Address(GPRInfo::regT0));
|
---|
266 | }
|
---|
267 |
|
---|
268 | // Set up one argument.
|
---|
269 | move(TrustedImmPtr(&vm), GPRInfo::argumentGPR0);
|
---|
270 | move(TrustedImmPtr(tagCFunction<OperationPtrTag>(operationExceptionFuzzWithCallFrame)), GPRInfo::nonPreservedNonReturnGPR);
|
---|
271 | prepareCallOperation(vm);
|
---|
272 | call(GPRInfo::nonPreservedNonReturnGPR, OperationPtrTag);
|
---|
273 |
|
---|
274 | for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
|
---|
275 | move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
|
---|
276 | loadDouble(Address(GPRInfo::regT0), FPRInfo::toRegister(i));
|
---|
277 | }
|
---|
278 | for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
|
---|
279 | #if USE(JSVALUE64)
|
---|
280 | load64(buffer + i, GPRInfo::toRegister(i));
|
---|
281 | #else
|
---|
282 | load32(buffer + i, GPRInfo::toRegister(i));
|
---|
283 | #endif
|
---|
284 | }
|
---|
285 | }
|
---|
286 |
|
---|
287 | AssemblyHelpers::Jump AssemblyHelpers::emitJumpIfException(VM& vm)
|
---|
288 | {
|
---|
289 | return emitExceptionCheck(vm, NormalExceptionCheck);
|
---|
290 | }
|
---|
291 |
|
---|
292 | AssemblyHelpers::Jump AssemblyHelpers::emitExceptionCheck(VM& vm, ExceptionCheckKind kind, ExceptionJumpWidth width)
|
---|
293 | {
|
---|
294 | if (UNLIKELY(Options::useExceptionFuzz()))
|
---|
295 | callExceptionFuzz(vm);
|
---|
296 |
|
---|
297 | if (width == FarJumpWidth)
|
---|
298 | kind = (kind == NormalExceptionCheck ? InvertedExceptionCheck : NormalExceptionCheck);
|
---|
299 |
|
---|
300 | Jump result;
|
---|
301 | #if USE(JSVALUE64)
|
---|
302 | result = branchTest64(kind == NormalExceptionCheck ? NonZero : Zero, AbsoluteAddress(vm.addressOfException()));
|
---|
303 | #elif USE(JSVALUE32_64)
|
---|
304 | result = branch32(kind == NormalExceptionCheck ? NotEqual : Equal, AbsoluteAddress(vm.addressOfException()), TrustedImm32(0));
|
---|
305 | #endif
|
---|
306 |
|
---|
307 | if (width == NormalJumpWidth)
|
---|
308 | return result;
|
---|
309 |
|
---|
310 | PatchableJump realJump = patchableJump();
|
---|
311 | result.link(this);
|
---|
312 |
|
---|
313 | return realJump.m_jump;
|
---|
314 | }
|
---|
315 |
|
---|
316 | AssemblyHelpers::Jump AssemblyHelpers::emitNonPatchableExceptionCheck(VM& vm)
|
---|
317 | {
|
---|
318 | if (UNLIKELY(Options::useExceptionFuzz()))
|
---|
319 | callExceptionFuzz(vm);
|
---|
320 |
|
---|
321 | Jump result;
|
---|
322 | #if USE(JSVALUE64)
|
---|
323 | result = branchTest64(NonZero, AbsoluteAddress(vm.addressOfException()));
|
---|
324 | #elif USE(JSVALUE32_64)
|
---|
325 | result = branch32(NotEqual, AbsoluteAddress(vm.addressOfException()), TrustedImm32(0));
|
---|
326 | #endif
|
---|
327 |
|
---|
328 | return result;
|
---|
329 | }
|
---|
330 |
|
---|
331 | void AssemblyHelpers::emitStoreStructureWithTypeInfo(AssemblyHelpers& jit, TrustedImmPtr structure, RegisterID dest)
|
---|
332 | {
|
---|
333 | const Structure* structurePtr = reinterpret_cast<const Structure*>(structure.m_value);
|
---|
334 | #if USE(JSVALUE64)
|
---|
335 | jit.store32(TrustedImm32(structurePtr->id().bits()), MacroAssembler::Address(dest, JSCell::structureIDOffset()));
|
---|
336 | jit.store32(TrustedImm32(structurePtr->typeInfoBlob()), MacroAssembler::Address(dest, JSCell::indexingTypeAndMiscOffset()));
|
---|
337 | if (ASSERT_ENABLED) {
|
---|
338 | Jump correctStructure = jit.branch32(Equal, MacroAssembler::Address(dest, JSCell::structureIDOffset()), TrustedImm32(structurePtr->id().bits()));
|
---|
339 | jit.abortWithReason(AHStructureIDIsValid);
|
---|
340 | correctStructure.link(&jit);
|
---|
341 |
|
---|
342 | Jump correctIndexingType = jit.branch8(Equal, MacroAssembler::Address(dest, JSCell::indexingTypeAndMiscOffset()), TrustedImm32(structurePtr->indexingModeIncludingHistory()));
|
---|
343 | jit.abortWithReason(AHIndexingTypeIsValid);
|
---|
344 | correctIndexingType.link(&jit);
|
---|
345 |
|
---|
346 | Jump correctType = jit.branch8(Equal, MacroAssembler::Address(dest, JSCell::typeInfoTypeOffset()), TrustedImm32(structurePtr->typeInfo().type()));
|
---|
347 | jit.abortWithReason(AHTypeInfoIsValid);
|
---|
348 | correctType.link(&jit);
|
---|
349 |
|
---|
350 | Jump correctFlags = jit.branch8(Equal, MacroAssembler::Address(dest, JSCell::typeInfoFlagsOffset()), TrustedImm32(structurePtr->typeInfo().inlineTypeFlags()));
|
---|
351 | jit.abortWithReason(AHTypeInfoInlineTypeFlagsAreValid);
|
---|
352 | correctFlags.link(&jit);
|
---|
353 | }
|
---|
354 | #else
|
---|
355 | // Do a 32-bit wide store to initialize the cell's fields.
|
---|
356 | jit.store32(TrustedImm32(structurePtr->typeInfoBlob()), MacroAssembler::Address(dest, JSCell::indexingTypeAndMiscOffset()));
|
---|
357 | jit.storePtr(structure, MacroAssembler::Address(dest, JSCell::structureIDOffset()));
|
---|
358 | #endif
|
---|
359 | }
|
---|
360 |
|
---|
361 | void AssemblyHelpers::loadProperty(GPRReg object, GPRReg offset, JSValueRegs result)
|
---|
362 | {
|
---|
363 | Jump isInline = branch32(LessThan, offset, TrustedImm32(firstOutOfLineOffset));
|
---|
364 |
|
---|
365 | loadPtr(Address(object, JSObject::butterflyOffset()), result.payloadGPR());
|
---|
366 | neg32(offset);
|
---|
367 | signExtend32ToPtr(offset, offset);
|
---|
368 | Jump ready = jump();
|
---|
369 |
|
---|
370 | isInline.link(this);
|
---|
371 | addPtr(
|
---|
372 | TrustedImm32(
|
---|
373 | static_cast<int32_t>(sizeof(JSObject)) -
|
---|
374 | (static_cast<int32_t>(firstOutOfLineOffset) - 2) * static_cast<int32_t>(sizeof(EncodedJSValue))),
|
---|
375 | object, result.payloadGPR());
|
---|
376 |
|
---|
377 | ready.link(this);
|
---|
378 |
|
---|
379 | loadValue(
|
---|
380 | BaseIndex(
|
---|
381 | result.payloadGPR(), offset, TimesEight, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)),
|
---|
382 | result);
|
---|
383 | }
|
---|
384 |
|
---|
385 | void AssemblyHelpers::storeProperty(JSValueRegs value, GPRReg object, GPRReg offset, GPRReg scratch)
|
---|
386 | {
|
---|
387 | Jump isInline = branch32(LessThan, offset, TrustedImm32(firstOutOfLineOffset));
|
---|
388 |
|
---|
389 | loadPtr(Address(object, JSObject::butterflyOffset()), scratch);
|
---|
390 | neg32(offset);
|
---|
391 | signExtend32ToPtr(offset, offset);
|
---|
392 | Jump ready = jump();
|
---|
393 |
|
---|
394 | isInline.link(this);
|
---|
395 | addPtr(
|
---|
396 | TrustedImm32(
|
---|
397 | static_cast<int32_t>(sizeof(JSObject)) -
|
---|
398 | (static_cast<int32_t>(firstOutOfLineOffset) - 2) * static_cast<int32_t>(sizeof(EncodedJSValue))),
|
---|
399 | object, scratch);
|
---|
400 |
|
---|
401 | ready.link(this);
|
---|
402 |
|
---|
403 | storeValue(value,
|
---|
404 | BaseIndex(scratch, offset, TimesEight, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)));
|
---|
405 | }
|
---|
406 |
|
---|
407 | void AssemblyHelpers::emitNonNullDecodeZeroExtendedStructureID(RegisterID source, RegisterID dest)
|
---|
408 | {
|
---|
409 | #if ENABLE(STRUCTURE_ID_WITH_SHIFT)
|
---|
410 | lshift64(source, TrustedImm32(StructureID::encodeShiftAmount), dest);
|
---|
411 | #elif CPU(ADDRESS64)
|
---|
412 | // This could use BFI on arm64 but that only helps if the start of structure heap is encodable as a mov and not as an immediate in the add so it's probably not super important.
|
---|
413 | if constexpr (structureHeapAddressSize >= 4 * GB) {
|
---|
414 | ASSERT(structureHeapAddressSize == 4 * GB);
|
---|
415 | move(source, dest);
|
---|
416 | add64(TrustedImm64(g_jscConfig.startOfStructureHeap), dest);
|
---|
417 | } else {
|
---|
418 | and32(TrustedImm32(StructureID::structureIDMask), source, dest);
|
---|
419 | add64(TrustedImm64(g_jscConfig.startOfStructureHeap), dest);
|
---|
420 | }
|
---|
421 | #else // not CPU(ADDRESS64)
|
---|
422 | move(source, dest);
|
---|
423 | #endif
|
---|
424 | }
|
---|
425 |
|
---|
426 | void AssemblyHelpers::emitLoadStructure(VM&, RegisterID source, RegisterID dest)
|
---|
427 | {
|
---|
428 | load32(MacroAssembler::Address(source, JSCell::structureIDOffset()), dest);
|
---|
429 | emitNonNullDecodeZeroExtendedStructureID(dest, dest);
|
---|
430 | }
|
---|
431 |
|
---|
432 | void AssemblyHelpers::emitEncodeStructureID(RegisterID source, RegisterID dest)
|
---|
433 | {
|
---|
434 | #if ENABLE(STRUCTURE_ID_WITH_SHIFT)
|
---|
435 | urshift64(source, TrustedImm32(StructureID::encodeShiftAmount), dest);
|
---|
436 | #elif CPU(ADDRESS64)
|
---|
437 | move(source, dest);
|
---|
438 | static_assert(StructureID::structureIDMask <= UINT32_MAX);
|
---|
439 | and64(TrustedImm32(static_cast<uint32_t>(StructureID::structureIDMask)), dest);
|
---|
440 | #else
|
---|
441 | move(source, dest);
|
---|
442 | #endif
|
---|
443 | }
|
---|
444 |
|
---|
445 | void AssemblyHelpers::emitLoadPrototype(VM& vm, GPRReg objectGPR, JSValueRegs resultRegs, JumpList& slowPath)
|
---|
446 | {
|
---|
447 | ASSERT(resultRegs.payloadGPR() != objectGPR);
|
---|
448 |
|
---|
449 | emitLoadStructure(vm, objectGPR, resultRegs.payloadGPR());
|
---|
450 |
|
---|
451 | auto overridesGetPrototype = branchTest32(MacroAssembler::NonZero, MacroAssembler::Address(resultRegs.payloadGPR(), Structure::outOfLineTypeFlagsOffset()), TrustedImm32(OverridesGetPrototypeOutOfLine));
|
---|
452 | slowPath.append(overridesGetPrototype);
|
---|
453 |
|
---|
454 | loadValue(MacroAssembler::Address(resultRegs.payloadGPR(), Structure::prototypeOffset()), resultRegs);
|
---|
455 | auto hasMonoProto = branchIfNotEmpty(resultRegs);
|
---|
456 | loadValue(MacroAssembler::Address(objectGPR, offsetRelativeToBase(knownPolyProtoOffset)), resultRegs);
|
---|
457 | hasMonoProto.link(this);
|
---|
458 | }
|
---|
459 |
|
---|
460 | void AssemblyHelpers::makeSpaceOnStackForCCall()
|
---|
461 | {
|
---|
462 | unsigned stackOffset = WTF::roundUpToMultipleOf(stackAlignmentBytes(), maxFrameExtentForSlowPathCall);
|
---|
463 | if (stackOffset)
|
---|
464 | subPtr(TrustedImm32(stackOffset), stackPointerRegister);
|
---|
465 | }
|
---|
466 |
|
---|
467 | void AssemblyHelpers::reclaimSpaceOnStackForCCall()
|
---|
468 | {
|
---|
469 | unsigned stackOffset = WTF::roundUpToMultipleOf(stackAlignmentBytes(), maxFrameExtentForSlowPathCall);
|
---|
470 | if (stackOffset)
|
---|
471 | addPtr(TrustedImm32(stackOffset), stackPointerRegister);
|
---|
472 | }
|
---|
473 |
|
---|
474 | #if USE(JSVALUE64)
|
---|
475 | template<typename LoadFromHigh, typename StoreToHigh, typename LoadFromLow, typename StoreToLow>
|
---|
476 | void emitRandomThunkImpl(AssemblyHelpers& jit, GPRReg scratch0, GPRReg scratch1, GPRReg scratch2, FPRReg result, const LoadFromHigh& loadFromHigh, const StoreToHigh& storeToHigh, const LoadFromLow& loadFromLow, const StoreToLow& storeToLow)
|
---|
477 | {
|
---|
478 | // Inlined WeakRandom::advance().
|
---|
479 | // uint64_t x = m_low;
|
---|
480 | loadFromLow(scratch0);
|
---|
481 | // uint64_t y = m_high;
|
---|
482 | loadFromHigh(scratch1);
|
---|
483 | // m_low = y;
|
---|
484 | storeToLow(scratch1);
|
---|
485 |
|
---|
486 | // x ^= x << 23;
|
---|
487 | jit.move(scratch0, scratch2);
|
---|
488 | jit.lshift64(AssemblyHelpers::TrustedImm32(23), scratch2);
|
---|
489 | jit.xor64(scratch2, scratch0);
|
---|
490 |
|
---|
491 | // x ^= x >> 17;
|
---|
492 | jit.move(scratch0, scratch2);
|
---|
493 | jit.rshift64(AssemblyHelpers::TrustedImm32(17), scratch2);
|
---|
494 | jit.xor64(scratch2, scratch0);
|
---|
495 |
|
---|
496 | // x ^= y ^ (y >> 26);
|
---|
497 | jit.move(scratch1, scratch2);
|
---|
498 | jit.rshift64(AssemblyHelpers::TrustedImm32(26), scratch2);
|
---|
499 | jit.xor64(scratch1, scratch2);
|
---|
500 | jit.xor64(scratch2, scratch0);
|
---|
501 |
|
---|
502 | // m_high = x;
|
---|
503 | storeToHigh(scratch0);
|
---|
504 |
|
---|
505 | // return x + y;
|
---|
506 | jit.add64(scratch1, scratch0);
|
---|
507 |
|
---|
508 | // Extract random 53bit. [0, 53] bit is safe integer number ranges in double representation.
|
---|
509 | jit.move(AssemblyHelpers::TrustedImm64((1ULL << 53) - 1), scratch1);
|
---|
510 | jit.and64(scratch1, scratch0);
|
---|
511 | // Now, scratch0 is always in range of int64_t. Safe to convert it to double with cvtsi2sdq.
|
---|
512 | jit.convertInt64ToDouble(scratch0, result);
|
---|
513 |
|
---|
514 | // Convert `(53bit double integer value) / (1 << 53)` to `(53bit double integer value) * (1.0 / (1 << 53))`.
|
---|
515 | // In latter case, `1.0 / (1 << 53)` will become a double value represented as (mantissa = 0 & exp = 970, it means 1e-(2**54)).
|
---|
516 | static constexpr double scale = 1.0 / (1ULL << 53);
|
---|
517 |
|
---|
518 | // Multiplying 1e-(2**54) with the double integer does not change anything of the mantissa part of the double integer.
|
---|
519 | // It just reduces the exp part of the given 53bit double integer.
|
---|
520 | // (Except for 0.0. This is specially handled and in this case, exp just becomes 0.)
|
---|
521 | // Now we get 53bit precision random double value in [0, 1).
|
---|
522 | jit.move(AssemblyHelpers::TrustedImmPtr(&scale), scratch1);
|
---|
523 | jit.mulDouble(AssemblyHelpers::Address(scratch1), result);
|
---|
524 | }
|
---|
525 |
|
---|
526 | void AssemblyHelpers::emitRandomThunk(JSGlobalObject* globalObject, GPRReg scratch0, GPRReg scratch1, GPRReg scratch2, FPRReg result)
|
---|
527 | {
|
---|
528 | void* lowAddress = reinterpret_cast<uint8_t*>(globalObject) + JSGlobalObject::weakRandomOffset() + WeakRandom::lowOffset();
|
---|
529 | void* highAddress = reinterpret_cast<uint8_t*>(globalObject) + JSGlobalObject::weakRandomOffset() + WeakRandom::highOffset();
|
---|
530 |
|
---|
531 | auto loadFromHigh = [&](GPRReg high) {
|
---|
532 | load64(highAddress, high);
|
---|
533 | };
|
---|
534 | auto storeToHigh = [&](GPRReg high) {
|
---|
535 | store64(high, highAddress);
|
---|
536 | };
|
---|
537 | auto loadFromLow = [&](GPRReg low) {
|
---|
538 | load64(lowAddress, low);
|
---|
539 | };
|
---|
540 | auto storeToLow = [&](GPRReg low) {
|
---|
541 | store64(low, lowAddress);
|
---|
542 | };
|
---|
543 |
|
---|
544 | emitRandomThunkImpl(*this, scratch0, scratch1, scratch2, result, loadFromHigh, storeToHigh, loadFromLow, storeToLow);
|
---|
545 | }
|
---|
546 |
|
---|
547 | void AssemblyHelpers::emitRandomThunk(VM& vm, GPRReg scratch0, GPRReg scratch1, GPRReg scratch2, GPRReg scratch3, FPRReg result)
|
---|
548 | {
|
---|
549 | emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, scratch3);
|
---|
550 | emitLoadStructure(vm, scratch3, scratch3);
|
---|
551 | loadPtr(Address(scratch3, Structure::globalObjectOffset()), scratch3);
|
---|
552 | // Now, scratch3 holds JSGlobalObject*.
|
---|
553 |
|
---|
554 | auto loadFromHigh = [&](GPRReg high) {
|
---|
555 | load64(Address(scratch3, JSGlobalObject::weakRandomOffset() + WeakRandom::highOffset()), high);
|
---|
556 | };
|
---|
557 | auto storeToHigh = [&](GPRReg high) {
|
---|
558 | store64(high, Address(scratch3, JSGlobalObject::weakRandomOffset() + WeakRandom::highOffset()));
|
---|
559 | };
|
---|
560 | auto loadFromLow = [&](GPRReg low) {
|
---|
561 | load64(Address(scratch3, JSGlobalObject::weakRandomOffset() + WeakRandom::lowOffset()), low);
|
---|
562 | };
|
---|
563 | auto storeToLow = [&](GPRReg low) {
|
---|
564 | store64(low, Address(scratch3, JSGlobalObject::weakRandomOffset() + WeakRandom::lowOffset()));
|
---|
565 | };
|
---|
566 |
|
---|
567 | emitRandomThunkImpl(*this, scratch0, scratch1, scratch2, result, loadFromHigh, storeToHigh, loadFromLow, storeToLow);
|
---|
568 | }
|
---|
569 | #endif
|
---|
570 |
|
---|
571 | void AssemblyHelpers::emitAllocateWithNonNullAllocator(GPRReg resultGPR, const JITAllocator& allocator, GPRReg allocatorGPR, GPRReg scratchGPR, JumpList& slowPath)
|
---|
572 | {
|
---|
573 | if (Options::forceGCSlowPaths()) {
|
---|
574 | slowPath.append(jump());
|
---|
575 | return;
|
---|
576 | }
|
---|
577 |
|
---|
578 | // NOTE, some invariants of this function:
|
---|
579 | // - When going to the slow path, we must leave resultGPR with zero in it.
|
---|
580 | // - We *can not* use RegisterSet::macroScratchRegisters on x86.
|
---|
581 | // - We *can* use RegisterSet::macroScratchRegisters on ARM.
|
---|
582 |
|
---|
583 | Jump popPath;
|
---|
584 | Jump done;
|
---|
585 |
|
---|
586 | if (allocator.isConstant())
|
---|
587 | move(TrustedImmPtr(allocator.allocator().localAllocator()), allocatorGPR);
|
---|
588 |
|
---|
589 | load32(Address(allocatorGPR, LocalAllocator::offsetOfFreeList() + FreeList::offsetOfRemaining()), resultGPR);
|
---|
590 | popPath = branchTest32(Zero, resultGPR);
|
---|
591 | if (allocator.isConstant())
|
---|
592 | add32(TrustedImm32(-allocator.allocator().cellSize()), resultGPR, scratchGPR);
|
---|
593 | else {
|
---|
594 | move(resultGPR, scratchGPR);
|
---|
595 | sub32(Address(allocatorGPR, LocalAllocator::offsetOfCellSize()), scratchGPR);
|
---|
596 | }
|
---|
597 | negPtr(resultGPR);
|
---|
598 | store32(scratchGPR, Address(allocatorGPR, LocalAllocator::offsetOfFreeList() + FreeList::offsetOfRemaining()));
|
---|
599 | Address payloadEndAddr = Address(allocatorGPR, LocalAllocator::offsetOfFreeList() + FreeList::offsetOfPayloadEnd());
|
---|
600 | addPtr(payloadEndAddr, resultGPR);
|
---|
601 |
|
---|
602 | done = jump();
|
---|
603 |
|
---|
604 | popPath.link(this);
|
---|
605 |
|
---|
606 | loadPtr(Address(allocatorGPR, LocalAllocator::offsetOfFreeList() + FreeList::offsetOfScrambledHead()), resultGPR);
|
---|
607 | xorPtr(Address(allocatorGPR, LocalAllocator::offsetOfFreeList() + FreeList::offsetOfSecret()), resultGPR);
|
---|
608 | slowPath.append(branchTestPtr(Zero, resultGPR));
|
---|
609 |
|
---|
610 | // The object is half-allocated: we have what we know is a fresh object, but
|
---|
611 | // it's still on the GC's free list.
|
---|
612 | loadPtr(Address(resultGPR, FreeCell::offsetOfScrambledNext()), scratchGPR);
|
---|
613 | storePtr(scratchGPR, Address(allocatorGPR, LocalAllocator::offsetOfFreeList() + FreeList::offsetOfScrambledHead()));
|
---|
614 |
|
---|
615 | done.link(this);
|
---|
616 | }
|
---|
617 |
|
---|
618 | void AssemblyHelpers::emitAllocate(GPRReg resultGPR, const JITAllocator& allocator, GPRReg allocatorGPR, GPRReg scratchGPR, JumpList& slowPath)
|
---|
619 | {
|
---|
620 | if (allocator.isConstant()) {
|
---|
621 | if (!allocator.allocator()) {
|
---|
622 | slowPath.append(jump());
|
---|
623 | return;
|
---|
624 | }
|
---|
625 | } else
|
---|
626 | slowPath.append(branchTestPtr(Zero, allocatorGPR));
|
---|
627 | emitAllocateWithNonNullAllocator(resultGPR, allocator, allocatorGPR, scratchGPR, slowPath);
|
---|
628 | }
|
---|
629 |
|
---|
630 | void AssemblyHelpers::emitAllocateVariableSized(GPRReg resultGPR, CompleteSubspace& subspace, GPRReg allocationSize, GPRReg scratchGPR1, GPRReg scratchGPR2, JumpList& slowPath)
|
---|
631 | {
|
---|
632 | static_assert(!(MarkedSpace::sizeStep & (MarkedSpace::sizeStep - 1)), "MarkedSpace::sizeStep must be a power of two.");
|
---|
633 |
|
---|
634 | unsigned stepShift = getLSBSet(MarkedSpace::sizeStep);
|
---|
635 |
|
---|
636 | add32(TrustedImm32(MarkedSpace::sizeStep - 1), allocationSize, scratchGPR1);
|
---|
637 | urshift32(TrustedImm32(stepShift), scratchGPR1);
|
---|
638 | slowPath.append(branch32(Above, scratchGPR1, TrustedImm32(MarkedSpace::largeCutoff >> stepShift)));
|
---|
639 | move(TrustedImmPtr(subspace.allocatorForSizeStep()), scratchGPR2);
|
---|
640 | loadPtr(BaseIndex(scratchGPR2, scratchGPR1, ScalePtr), scratchGPR1);
|
---|
641 |
|
---|
642 | emitAllocate(resultGPR, JITAllocator::variable(), scratchGPR1, scratchGPR2, slowPath);
|
---|
643 | }
|
---|
644 |
|
---|
645 | void AssemblyHelpers::restoreCalleeSavesFromEntryFrameCalleeSavesBuffer(EntryFrame*& topEntryFrame)
|
---|
646 | {
|
---|
647 | #if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
|
---|
648 | RegisterAtOffsetList* allCalleeSaves = RegisterSet::vmCalleeSaveRegisterOffsets();
|
---|
649 | RegisterSet dontRestoreRegisters = RegisterSet::stackRegisters();
|
---|
650 | unsigned registerCount = allCalleeSaves->registerCount();
|
---|
651 |
|
---|
652 | GPRReg scratch = InvalidGPRReg;
|
---|
653 | unsigned scratchGPREntryIndex = 0;
|
---|
654 | #if CPU(ARM64)
|
---|
655 | // We don't need a second scratch GPR, but we'll also defer restoring this
|
---|
656 | // GPR (in the next slot after the scratch) so that we can restore them together
|
---|
657 | // later using a loadPair64.
|
---|
658 | GPRReg unusedNextSlotGPR = InvalidGPRReg;
|
---|
659 | #endif
|
---|
660 |
|
---|
661 | // Use the first GPR entry's register as our baseGPR.
|
---|
662 | for (unsigned i = 0; i < registerCount; i++) {
|
---|
663 | RegisterAtOffset entry = allCalleeSaves->at(i);
|
---|
664 | if (dontRestoreRegisters.contains(entry.reg()))
|
---|
665 | continue;
|
---|
666 | if (entry.reg().isGPR()) {
|
---|
667 | #if CPU(ARM64)
|
---|
668 | if (i + 1 < registerCount) {
|
---|
669 | RegisterAtOffset entry2 = allCalleeSaves->at(i + 1);
|
---|
670 | if (!dontRestoreRegisters.contains(entry2.reg())
|
---|
671 | && entry2.reg().isGPR()
|
---|
672 | && entry2.offset() == entry.offset() + static_cast<ptrdiff_t>(sizeof(CPURegister))) {
|
---|
673 | scratchGPREntryIndex = i;
|
---|
674 | scratch = entry.reg().gpr();
|
---|
675 | unusedNextSlotGPR = entry2.reg().gpr();
|
---|
676 | break;
|
---|
677 | }
|
---|
678 | }
|
---|
679 | #else
|
---|
680 | scratchGPREntryIndex = i;
|
---|
681 | scratch = entry.reg().gpr();
|
---|
682 | break;
|
---|
683 | #endif
|
---|
684 | }
|
---|
685 | }
|
---|
686 | ASSERT(scratch != InvalidGPRReg);
|
---|
687 |
|
---|
688 | RegisterSet skipList;
|
---|
689 | skipList.set(dontRestoreRegisters);
|
---|
690 |
|
---|
691 | // Skip the scratch register(s). We'll restore them later.
|
---|
692 | skipList.add(scratch);
|
---|
693 | #if CPU(ARM64)
|
---|
694 | RELEASE_ASSERT(unusedNextSlotGPR != InvalidGPRReg);
|
---|
695 | skipList.add(unusedNextSlotGPR);
|
---|
696 | #endif
|
---|
697 |
|
---|
698 | loadPtr(&topEntryFrame, scratch);
|
---|
699 | restoreCalleeSavesFromVMEntryFrameCalleeSavesBufferImpl(scratch, skipList);
|
---|
700 |
|
---|
701 | // Restore the callee save value of the scratch.
|
---|
702 | RegisterAtOffset entry = allCalleeSaves->at(scratchGPREntryIndex);
|
---|
703 | ASSERT(!dontRestoreRegisters.get(entry.reg()));
|
---|
704 | ASSERT(entry.reg().isGPR());
|
---|
705 | ASSERT(scratch == entry.reg().gpr());
|
---|
706 | #if CPU(ARM64)
|
---|
707 | RegisterAtOffset entry2 = allCalleeSaves->at(scratchGPREntryIndex + 1);
|
---|
708 | ASSERT_UNUSED(entry2, !dontRestoreRegisters.get(entry2.reg()));
|
---|
709 | ASSERT(entry2.reg().isGPR());
|
---|
710 | ASSERT(unusedNextSlotGPR == entry2.reg().gpr());
|
---|
711 | loadPair64(scratch, TrustedImm32(entry.offset()), scratch, unusedNextSlotGPR);
|
---|
712 | #else
|
---|
713 | loadPtr(Address(scratch, entry.offset()), scratch);
|
---|
714 | #endif
|
---|
715 |
|
---|
716 | #else
|
---|
717 | UNUSED_PARAM(topEntryFrame);
|
---|
718 | #endif // NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
|
---|
719 | }
|
---|
720 |
|
---|
721 | void AssemblyHelpers::restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer(GPRReg vmGPR, GPRReg scratchGPR)
|
---|
722 | {
|
---|
723 | #if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
|
---|
724 | loadPtr(Address(vmGPR, VM::topEntryFrameOffset()), scratchGPR);
|
---|
725 | restoreCalleeSavesFromVMEntryFrameCalleeSavesBufferImpl(scratchGPR, RegisterSet::stackRegisters());
|
---|
726 | #else
|
---|
727 | UNUSED_PARAM(vmGPR);
|
---|
728 | #endif // NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
|
---|
729 | }
|
---|
730 |
|
---|
731 | void AssemblyHelpers::restoreCalleeSavesFromVMEntryFrameCalleeSavesBufferImpl(GPRReg entryFrameGPR, const RegisterSet& skipList)
|
---|
732 | {
|
---|
733 | #if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
|
---|
734 | addPtr(TrustedImm32(EntryFrame::calleeSaveRegistersBufferOffset()), entryFrameGPR);
|
---|
735 |
|
---|
736 | RegisterAtOffsetList* allCalleeSaves = RegisterSet::vmCalleeSaveRegisterOffsets();
|
---|
737 | unsigned registerCount = allCalleeSaves->registerCount();
|
---|
738 |
|
---|
739 | LoadRegSpooler spooler(*this, entryFrameGPR);
|
---|
740 |
|
---|
741 | // Restore all callee saves except for the scratch.
|
---|
742 | unsigned i = 0;
|
---|
743 | for (; i < registerCount; i++) {
|
---|
744 | RegisterAtOffset entry = allCalleeSaves->at(i);
|
---|
745 | if (skipList.contains(entry.reg()))
|
---|
746 | continue;
|
---|
747 | if (!entry.reg().isGPR())
|
---|
748 | break;
|
---|
749 | spooler.loadGPR(entry);
|
---|
750 | }
|
---|
751 | spooler.finalizeGPR();
|
---|
752 | for (; i < registerCount; i++) {
|
---|
753 | RegisterAtOffset entry = allCalleeSaves->at(i);
|
---|
754 | if (skipList.contains(entry.reg()))
|
---|
755 | continue;
|
---|
756 | ASSERT(!entry.reg().isGPR());
|
---|
757 | spooler.loadFPR(entry);
|
---|
758 | }
|
---|
759 | spooler.finalizeFPR();
|
---|
760 |
|
---|
761 | #else
|
---|
762 | UNUSED_PARAM(vmGPR);
|
---|
763 | UNUSED_PARAM(skipList);
|
---|
764 | #endif // NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
|
---|
765 | }
|
---|
766 |
|
---|
767 | void AssemblyHelpers::emitVirtualCall(VM& vm, JSGlobalObject* globalObject, CallLinkInfo* info)
|
---|
768 | {
|
---|
769 | move(TrustedImmPtr(globalObject), GPRInfo::regT3);
|
---|
770 | move(TrustedImmPtr(info), GPRInfo::regT2);
|
---|
771 | emitVirtualCallWithoutMovingGlobalObject(vm, GPRInfo::regT2, info->callMode());
|
---|
772 | }
|
---|
773 |
|
---|
774 | void AssemblyHelpers::emitVirtualCallWithoutMovingGlobalObject(VM& vm, GPRReg callLinkInfoGPR, CallMode callMode)
|
---|
775 | {
|
---|
776 | move(callLinkInfoGPR, GPRInfo::regT2);
|
---|
777 | Call call = nearCall();
|
---|
778 | addLinkTask([=, &vm] (LinkBuffer& linkBuffer) {
|
---|
779 | auto callLocation = linkBuffer.locationOfNearCall<JITCompilationPtrTag>(call);
|
---|
780 | linkBuffer.addMainThreadFinalizationTask([=, &vm] () {
|
---|
781 | MacroAssemblerCodeRef<JITStubRoutinePtrTag> virtualThunk = vm.getCTIVirtualCall(callMode);
|
---|
782 | MacroAssembler::repatchNearCall(callLocation, CodeLocationLabel<JITStubRoutinePtrTag>(virtualThunk.code()));
|
---|
783 | });
|
---|
784 | });
|
---|
785 | }
|
---|
786 |
|
---|
787 | #if USE(JSVALUE64)
|
---|
788 | void AssemblyHelpers::wangsInt64Hash(GPRReg inputAndResult, GPRReg scratch)
|
---|
789 | {
|
---|
790 | GPRReg input = inputAndResult;
|
---|
791 | // key += ~(key << 32);
|
---|
792 | move(input, scratch);
|
---|
793 | lshift64(TrustedImm32(32), scratch);
|
---|
794 | not64(scratch);
|
---|
795 | add64(scratch, input);
|
---|
796 | // key ^= (key >> 22);
|
---|
797 | move(input, scratch);
|
---|
798 | urshift64(TrustedImm32(22), scratch);
|
---|
799 | xor64(scratch, input);
|
---|
800 | // key += ~(key << 13);
|
---|
801 | move(input, scratch);
|
---|
802 | lshift64(TrustedImm32(13), scratch);
|
---|
803 | not64(scratch);
|
---|
804 | add64(scratch, input);
|
---|
805 | // key ^= (key >> 8);
|
---|
806 | move(input, scratch);
|
---|
807 | urshift64(TrustedImm32(8), scratch);
|
---|
808 | xor64(scratch, input);
|
---|
809 | // key += (key << 3);
|
---|
810 | move(input, scratch);
|
---|
811 | lshift64(TrustedImm32(3), scratch);
|
---|
812 | add64(scratch, input);
|
---|
813 | // key ^= (key >> 15);
|
---|
814 | move(input, scratch);
|
---|
815 | urshift64(TrustedImm32(15), scratch);
|
---|
816 | xor64(scratch, input);
|
---|
817 | // key += ~(key << 27);
|
---|
818 | move(input, scratch);
|
---|
819 | lshift64(TrustedImm32(27), scratch);
|
---|
820 | not64(scratch);
|
---|
821 | add64(scratch, input);
|
---|
822 | // key ^= (key >> 31);
|
---|
823 | move(input, scratch);
|
---|
824 | urshift64(TrustedImm32(31), scratch);
|
---|
825 | xor64(scratch, input);
|
---|
826 |
|
---|
827 | // return static_cast<unsigned>(result)
|
---|
828 | void* mask = bitwise_cast<void*>(static_cast<uintptr_t>(UINT_MAX));
|
---|
829 | and64(TrustedImmPtr(mask), inputAndResult);
|
---|
830 | }
|
---|
831 | #endif // USE(JSVALUE64)
|
---|
832 |
|
---|
833 | void AssemblyHelpers::emitConvertValueToBoolean(VM& vm, JSValueRegs value, GPRReg result, GPRReg scratchIfShouldCheckMasqueradesAsUndefined, FPRReg valueAsFPR, FPRReg tempFPR, bool shouldCheckMasqueradesAsUndefined, JSGlobalObject* globalObject, bool invert)
|
---|
834 | {
|
---|
835 | // Implements the following control flow structure:
|
---|
836 | // if (value is cell) {
|
---|
837 | // if (value is string or value is HeapBigInt)
|
---|
838 | // result = !!value->length
|
---|
839 | // else {
|
---|
840 | // do evil things for masquerades-as-undefined
|
---|
841 | // result = true
|
---|
842 | // }
|
---|
843 | // } else if (value is int32) {
|
---|
844 | // result = !!unboxInt32(value)
|
---|
845 | // } else if (value is number) {
|
---|
846 | // result = !!unboxDouble(value)
|
---|
847 | // } else if (value is BigInt32) {
|
---|
848 | // result = !!unboxBigInt32(value)
|
---|
849 | // } else {
|
---|
850 | // result = value == jsTrue
|
---|
851 | // }
|
---|
852 |
|
---|
853 | JumpList done;
|
---|
854 |
|
---|
855 | auto notCell = branchIfNotCell(value);
|
---|
856 | auto isString = branchIfString(value.payloadGPR());
|
---|
857 | auto isHeapBigInt = branchIfHeapBigInt(value.payloadGPR());
|
---|
858 |
|
---|
859 | if (shouldCheckMasqueradesAsUndefined) {
|
---|
860 | ASSERT(scratchIfShouldCheckMasqueradesAsUndefined != InvalidGPRReg);
|
---|
861 | JumpList isNotMasqueradesAsUndefined;
|
---|
862 | isNotMasqueradesAsUndefined.append(branchTest8(Zero, Address(value.payloadGPR(), JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)));
|
---|
863 | emitLoadStructure(vm, value.payloadGPR(), result);
|
---|
864 | move(TrustedImmPtr(globalObject), scratchIfShouldCheckMasqueradesAsUndefined);
|
---|
865 | isNotMasqueradesAsUndefined.append(branchPtr(NotEqual, Address(result, Structure::globalObjectOffset()), scratchIfShouldCheckMasqueradesAsUndefined));
|
---|
866 |
|
---|
867 | // We act like we are "undefined" here.
|
---|
868 | move(invert ? TrustedImm32(1) : TrustedImm32(0), result);
|
---|
869 | done.append(jump());
|
---|
870 | isNotMasqueradesAsUndefined.link(this);
|
---|
871 | }
|
---|
872 | move(invert ? TrustedImm32(0) : TrustedImm32(1), result);
|
---|
873 | done.append(jump());
|
---|
874 |
|
---|
875 | isString.link(this);
|
---|
876 | move(TrustedImmPtr(jsEmptyString(vm)), result);
|
---|
877 | comparePtr(invert ? Equal : NotEqual, value.payloadGPR(), result, result);
|
---|
878 | done.append(jump());
|
---|
879 |
|
---|
880 | isHeapBigInt.link(this);
|
---|
881 | load32(Address(value.payloadGPR(), JSBigInt::offsetOfLength()), result);
|
---|
882 | compare32(invert ? Equal : NotEqual, result, TrustedImm32(0), result);
|
---|
883 | done.append(jump());
|
---|
884 |
|
---|
885 | notCell.link(this);
|
---|
886 | auto notInt32 = branchIfNotInt32(value);
|
---|
887 | compare32(invert ? Equal : NotEqual, value.payloadGPR(), TrustedImm32(0), result);
|
---|
888 | done.append(jump());
|
---|
889 |
|
---|
890 | notInt32.link(this);
|
---|
891 | auto notDouble = branchIfNotDoubleKnownNotInt32(value);
|
---|
892 | #if USE(JSVALUE64)
|
---|
893 | unboxDouble(value.gpr(), result, valueAsFPR);
|
---|
894 | #else
|
---|
895 | unboxDouble(value, valueAsFPR);
|
---|
896 | #endif
|
---|
897 | move(invert ? TrustedImm32(1) : TrustedImm32(0), result);
|
---|
898 | done.append(branchDoubleZeroOrNaN(valueAsFPR, tempFPR));
|
---|
899 | move(invert ? TrustedImm32(0) : TrustedImm32(1), result);
|
---|
900 | done.append(jump());
|
---|
901 |
|
---|
902 | notDouble.link(this);
|
---|
903 | #if USE(BIGINT32)
|
---|
904 | auto isNotBigInt32 = branchIfNotBigInt32(value.gpr(), result);
|
---|
905 | move(value.gpr(), result);
|
---|
906 | urshift64(TrustedImm32(16), result);
|
---|
907 | compare32(invert ? Equal : NotEqual, result, TrustedImm32(0), result);
|
---|
908 | done.append(jump());
|
---|
909 |
|
---|
910 | isNotBigInt32.link(this);
|
---|
911 | #endif // USE(BIGINT32)
|
---|
912 | #if USE(JSVALUE64)
|
---|
913 | compare64(invert ? NotEqual : Equal, value.gpr(), TrustedImm32(JSValue::ValueTrue), result);
|
---|
914 | #else
|
---|
915 | move(invert ? TrustedImm32(1) : TrustedImm32(0), result);
|
---|
916 | done.append(branchIfNotBoolean(value, InvalidGPRReg));
|
---|
917 | compare32(invert ? Equal : NotEqual, value.payloadGPR(), TrustedImm32(0), result);
|
---|
918 | #endif
|
---|
919 |
|
---|
920 | done.link(this);
|
---|
921 | }
|
---|
922 |
|
---|
923 | AssemblyHelpers::JumpList AssemblyHelpers::branchIfValue(VM& vm, JSValueRegs value, GPRReg scratch, GPRReg scratchIfShouldCheckMasqueradesAsUndefined, FPRReg valueAsFPR, FPRReg tempFPR, bool shouldCheckMasqueradesAsUndefined, std::variant<JSGlobalObject*, GPRReg> globalObject, bool invert)
|
---|
924 | {
|
---|
925 | // Implements the following control flow structure:
|
---|
926 | // if (value is cell) {
|
---|
927 | // if (value is string or value is HeapBigInt)
|
---|
928 | // result = !!value->length
|
---|
929 | // else {
|
---|
930 | // do evil things for masquerades-as-undefined
|
---|
931 | // result = true
|
---|
932 | // }
|
---|
933 | // } else if (value is int32) {
|
---|
934 | // result = !!unboxInt32(value)
|
---|
935 | // } else if (value is number) {
|
---|
936 | // result = !!unboxDouble(value)
|
---|
937 | // } else if (value is BigInt32) {
|
---|
938 | // result = !!unboxBigInt32(value)
|
---|
939 | // } else {
|
---|
940 | // result = value == jsTrue
|
---|
941 | // }
|
---|
942 |
|
---|
943 | JumpList done;
|
---|
944 | JumpList truthy;
|
---|
945 |
|
---|
946 | auto notCell = branchIfNotCell(value);
|
---|
947 | auto isString = branchIfString(value.payloadGPR());
|
---|
948 | auto isHeapBigInt = branchIfHeapBigInt(value.payloadGPR());
|
---|
949 |
|
---|
950 | if (shouldCheckMasqueradesAsUndefined) {
|
---|
951 | ASSERT(scratchIfShouldCheckMasqueradesAsUndefined != InvalidGPRReg);
|
---|
952 | JumpList isNotMasqueradesAsUndefined;
|
---|
953 | isNotMasqueradesAsUndefined.append(branchTest8(Zero, Address(value.payloadGPR(), JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)));
|
---|
954 | emitLoadStructure(vm, value.payloadGPR(), scratch);
|
---|
955 | if (std::holds_alternative<JSGlobalObject*>(globalObject))
|
---|
956 | move(TrustedImmPtr(std::get<JSGlobalObject*>(globalObject)), scratchIfShouldCheckMasqueradesAsUndefined);
|
---|
957 | else
|
---|
958 | move(std::get<GPRReg>(globalObject), scratchIfShouldCheckMasqueradesAsUndefined);
|
---|
959 | isNotMasqueradesAsUndefined.append(branchPtr(NotEqual, Address(scratch, Structure::globalObjectOffset()), scratchIfShouldCheckMasqueradesAsUndefined));
|
---|
960 |
|
---|
961 | // We act like we are "undefined" here.
|
---|
962 | if (invert)
|
---|
963 | truthy.append(jump());
|
---|
964 | else
|
---|
965 | done.append(jump());
|
---|
966 |
|
---|
967 | if (invert)
|
---|
968 | done.append(isNotMasqueradesAsUndefined);
|
---|
969 | else
|
---|
970 | truthy.append(isNotMasqueradesAsUndefined);
|
---|
971 | } else {
|
---|
972 | if (invert)
|
---|
973 | done.append(jump());
|
---|
974 | else
|
---|
975 | truthy.append(jump());
|
---|
976 | }
|
---|
977 |
|
---|
978 | isString.link(this);
|
---|
979 | truthy.append(branchPtr(invert ? Equal : NotEqual, value.payloadGPR(), TrustedImmPtr(jsEmptyString(vm))));
|
---|
980 | done.append(jump());
|
---|
981 |
|
---|
982 | isHeapBigInt.link(this);
|
---|
983 | truthy.append(branchTest32(invert ? Zero : NonZero, Address(value.payloadGPR(), JSBigInt::offsetOfLength())));
|
---|
984 | done.append(jump());
|
---|
985 |
|
---|
986 | notCell.link(this);
|
---|
987 | auto notInt32 = branchIfNotInt32(value);
|
---|
988 | truthy.append(branchTest32(invert ? Zero : NonZero, value.payloadGPR()));
|
---|
989 | done.append(jump());
|
---|
990 |
|
---|
991 | notInt32.link(this);
|
---|
992 | auto notDouble = branchIfNotDoubleKnownNotInt32(value);
|
---|
993 | #if USE(JSVALUE64)
|
---|
994 | unboxDouble(value.gpr(), scratch, valueAsFPR);
|
---|
995 | #else
|
---|
996 | unboxDouble(value, valueAsFPR);
|
---|
997 | #endif
|
---|
998 | if (invert) {
|
---|
999 | truthy.append(branchDoubleZeroOrNaN(valueAsFPR, tempFPR));
|
---|
1000 | done.append(jump());
|
---|
1001 | } else {
|
---|
1002 | done.append(branchDoubleZeroOrNaN(valueAsFPR, tempFPR));
|
---|
1003 | truthy.append(jump());
|
---|
1004 | }
|
---|
1005 |
|
---|
1006 | notDouble.link(this);
|
---|
1007 | #if USE(BIGINT32)
|
---|
1008 | auto isNotBigInt32 = branchIfNotBigInt32(value.gpr(), scratch);
|
---|
1009 | move(value.gpr(), scratch);
|
---|
1010 | urshift64(TrustedImm32(16), scratch);
|
---|
1011 | truthy.append(branchTest32(invert ? Zero : NonZero, scratch));
|
---|
1012 | done.append(jump());
|
---|
1013 |
|
---|
1014 | isNotBigInt32.link(this);
|
---|
1015 | #endif // USE(BIGINT32)
|
---|
1016 | #if USE(JSVALUE64)
|
---|
1017 | truthy.append(branch64(invert ? NotEqual : Equal, value.gpr(), TrustedImm64(JSValue::encode(jsBoolean(true)))));
|
---|
1018 | #else
|
---|
1019 | auto notBoolean = branchIfNotBoolean(value, InvalidGPRReg);
|
---|
1020 | if (invert)
|
---|
1021 | truthy.append(notBoolean);
|
---|
1022 | else
|
---|
1023 | done.append(notBoolean);
|
---|
1024 | truthy.append(branch32(invert ? Equal : NotEqual, value.payloadGPR(), TrustedImm32(0)));
|
---|
1025 | #endif
|
---|
1026 |
|
---|
1027 | done.link(this);
|
---|
1028 |
|
---|
1029 | return truthy;
|
---|
1030 | }
|
---|
1031 |
|
---|
1032 | #if ENABLE(WEBASSEMBLY)
|
---|
1033 | void AssemblyHelpers::loadWasmContextInstance(GPRReg dst)
|
---|
1034 | {
|
---|
1035 | #if ENABLE(FAST_TLS_JIT)
|
---|
1036 | if (Wasm::Context::useFastTLS()) {
|
---|
1037 | loadFromTLSPtr(fastTLSOffsetForKey(WTF_WASM_CONTEXT_KEY), dst);
|
---|
1038 | return;
|
---|
1039 | }
|
---|
1040 | #endif
|
---|
1041 | move(Wasm::PinnedRegisterInfo::get().wasmContextInstancePointer, dst);
|
---|
1042 | }
|
---|
1043 |
|
---|
1044 | void AssemblyHelpers::storeWasmContextInstance(GPRReg src)
|
---|
1045 | {
|
---|
1046 | #if ENABLE(FAST_TLS_JIT)
|
---|
1047 | if (Wasm::Context::useFastTLS()) {
|
---|
1048 | storeToTLSPtr(src, fastTLSOffsetForKey(WTF_WASM_CONTEXT_KEY));
|
---|
1049 | return;
|
---|
1050 | }
|
---|
1051 | #endif
|
---|
1052 | move(src, Wasm::PinnedRegisterInfo::get().wasmContextInstancePointer);
|
---|
1053 | }
|
---|
1054 |
|
---|
1055 | bool AssemblyHelpers::loadWasmContextInstanceNeedsMacroScratchRegister()
|
---|
1056 | {
|
---|
1057 | #if ENABLE(FAST_TLS_JIT)
|
---|
1058 | if (Wasm::Context::useFastTLS())
|
---|
1059 | return loadFromTLSPtrNeedsMacroScratchRegister();
|
---|
1060 | #endif
|
---|
1061 | return false;
|
---|
1062 | }
|
---|
1063 |
|
---|
1064 | bool AssemblyHelpers::storeWasmContextInstanceNeedsMacroScratchRegister()
|
---|
1065 | {
|
---|
1066 | #if ENABLE(FAST_TLS_JIT)
|
---|
1067 | if (Wasm::Context::useFastTLS())
|
---|
1068 | return storeToTLSPtrNeedsMacroScratchRegister();
|
---|
1069 | #endif
|
---|
1070 | return false;
|
---|
1071 | }
|
---|
1072 |
|
---|
1073 | #endif // ENABLE(WEBASSEMBLY)
|
---|
1074 |
|
---|
1075 | void AssemblyHelpers::debugCall(VM& vm, V_DebugOperation_EPP function, void* argument)
|
---|
1076 | {
|
---|
1077 | size_t scratchSize = sizeof(EncodedJSValue) * (GPRInfo::numberOfRegisters + FPRInfo::numberOfRegisters);
|
---|
1078 | ScratchBuffer* scratchBuffer = vm.scratchBufferForSize(scratchSize);
|
---|
1079 | EncodedJSValue* buffer = static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer());
|
---|
1080 |
|
---|
1081 | for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
|
---|
1082 | #if USE(JSVALUE64)
|
---|
1083 | store64(GPRInfo::toRegister(i), buffer + i);
|
---|
1084 | #else
|
---|
1085 | store32(GPRInfo::toRegister(i), buffer + i);
|
---|
1086 | #endif
|
---|
1087 | }
|
---|
1088 |
|
---|
1089 | for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
|
---|
1090 | move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
|
---|
1091 | storeDouble(FPRInfo::toRegister(i), Address(GPRInfo::regT0));
|
---|
1092 | }
|
---|
1093 |
|
---|
1094 | #if CPU(X86_64) || CPU(ARM_THUMB2) || CPU(ARM64) || CPU(MIPS) || CPU(RISCV64)
|
---|
1095 | move(TrustedImmPtr(buffer), GPRInfo::argumentGPR2);
|
---|
1096 | move(TrustedImmPtr(argument), GPRInfo::argumentGPR1);
|
---|
1097 | move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
|
---|
1098 | GPRReg scratch = selectScratchGPR(GPRInfo::argumentGPR0, GPRInfo::argumentGPR1, GPRInfo::argumentGPR2);
|
---|
1099 | #else
|
---|
1100 | #error "JIT not supported on this platform."
|
---|
1101 | #endif
|
---|
1102 | prepareCallOperation(vm);
|
---|
1103 | move(TrustedImmPtr(tagCFunctionPtr<OperationPtrTag>(function)), scratch);
|
---|
1104 | call(scratch, OperationPtrTag);
|
---|
1105 |
|
---|
1106 | for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
|
---|
1107 | move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
|
---|
1108 | loadDouble(Address(GPRInfo::regT0), FPRInfo::toRegister(i));
|
---|
1109 | }
|
---|
1110 | for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
|
---|
1111 | #if USE(JSVALUE64)
|
---|
1112 | load64(buffer + i, GPRInfo::toRegister(i));
|
---|
1113 | #else
|
---|
1114 | load32(buffer + i, GPRInfo::toRegister(i));
|
---|
1115 | #endif
|
---|
1116 | }
|
---|
1117 | }
|
---|
1118 |
|
---|
1119 | void AssemblyHelpers::copyCalleeSavesToEntryFrameCalleeSavesBufferImpl(GPRReg calleeSavesBuffer)
|
---|
1120 | {
|
---|
1121 | #if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
|
---|
1122 | addPtr(TrustedImm32(EntryFrame::calleeSaveRegistersBufferOffset()), calleeSavesBuffer);
|
---|
1123 |
|
---|
1124 | RegisterAtOffsetList* allCalleeSaves = RegisterSet::vmCalleeSaveRegisterOffsets();
|
---|
1125 | RegisterSet dontCopyRegisters = RegisterSet::stackRegisters();
|
---|
1126 | unsigned registerCount = allCalleeSaves->registerCount();
|
---|
1127 |
|
---|
1128 | StoreRegSpooler spooler(*this, calleeSavesBuffer);
|
---|
1129 |
|
---|
1130 | unsigned i = 0;
|
---|
1131 | for (; i < registerCount; i++) {
|
---|
1132 | RegisterAtOffset entry = allCalleeSaves->at(i);
|
---|
1133 | if (dontCopyRegisters.contains(entry.reg()))
|
---|
1134 | continue;
|
---|
1135 | if (!entry.reg().isGPR())
|
---|
1136 | break;
|
---|
1137 | spooler.storeGPR(entry);
|
---|
1138 | }
|
---|
1139 | spooler.finalizeGPR();
|
---|
1140 | for (; i < registerCount; i++) {
|
---|
1141 | RegisterAtOffset entry = allCalleeSaves->at(i);
|
---|
1142 | if (dontCopyRegisters.contains(entry.reg()))
|
---|
1143 | continue;
|
---|
1144 | spooler.storeFPR(entry);
|
---|
1145 | }
|
---|
1146 | spooler.finalizeFPR();
|
---|
1147 |
|
---|
1148 | #else
|
---|
1149 | UNUSED_PARAM(calleeSavesBuffer);
|
---|
1150 | #endif
|
---|
1151 | }
|
---|
1152 |
|
---|
1153 | void AssemblyHelpers::cageWithoutUntagging(Gigacage::Kind kind, GPRReg storage)
|
---|
1154 | {
|
---|
1155 | #if GIGACAGE_ENABLED
|
---|
1156 | if (!Gigacage::isEnabled(kind))
|
---|
1157 | return;
|
---|
1158 |
|
---|
1159 | #if CPU(ARM64E)
|
---|
1160 | RegisterID tempReg = InvalidGPRReg;
|
---|
1161 | Jump skip;
|
---|
1162 | if (kind == Gigacage::Primitive) {
|
---|
1163 | skip = branchPtr(Equal, storage, TrustedImmPtr(JSArrayBufferView::nullVectorPtr()));
|
---|
1164 | tempReg = getCachedMemoryTempRegisterIDAndInvalidate();
|
---|
1165 | move(storage, tempReg);
|
---|
1166 | // Flip the registers since bitFieldInsert only inserts into the low bits.
|
---|
1167 | std::swap(storage, tempReg);
|
---|
1168 | }
|
---|
1169 | #endif
|
---|
1170 | andPtr(TrustedImmPtr(Gigacage::mask(kind)), storage);
|
---|
1171 | addPtr(TrustedImmPtr(Gigacage::basePtr(kind)), storage);
|
---|
1172 | #if CPU(ARM64E)
|
---|
1173 | if (kind == Gigacage::Primitive)
|
---|
1174 | insertBitField64(storage, TrustedImm32(0), TrustedImm32(64 - maxNumberOfAllowedPACBits), tempReg);
|
---|
1175 | if (skip.isSet())
|
---|
1176 | skip.link(this);
|
---|
1177 | #endif
|
---|
1178 |
|
---|
1179 | #else
|
---|
1180 | UNUSED_PARAM(kind);
|
---|
1181 | UNUSED_PARAM(storage);
|
---|
1182 | #endif
|
---|
1183 | }
|
---|
1184 |
|
---|
1185 | // length may be the same register as scratch.
|
---|
1186 | void AssemblyHelpers::cageConditionallyAndUntag(Gigacage::Kind kind, GPRReg storage, GPRReg length, GPRReg scratch, bool validateAuth)
|
---|
1187 | {
|
---|
1188 | #if GIGACAGE_ENABLED
|
---|
1189 | if (Gigacage::isEnabled(kind)) {
|
---|
1190 | if (kind != Gigacage::Primitive || Gigacage::disablingPrimitiveGigacageIsForbidden())
|
---|
1191 | cageWithoutUntagging(kind, storage);
|
---|
1192 | else {
|
---|
1193 | #if CPU(ARM64E)
|
---|
1194 | if (length == scratch)
|
---|
1195 | scratch = getCachedMemoryTempRegisterIDAndInvalidate();
|
---|
1196 | #endif
|
---|
1197 | JumpList done;
|
---|
1198 | #if CPU(ARM64E)
|
---|
1199 | done.append(branchPtr(Equal, storage, TrustedImmPtr(JSArrayBufferView::nullVectorPtr())));
|
---|
1200 | #endif
|
---|
1201 | done.append(branchTest8(NonZero, AbsoluteAddress(&Gigacage::disablePrimitiveGigacageRequested)));
|
---|
1202 |
|
---|
1203 | loadPtr(Gigacage::addressOfBasePtr(kind), scratch);
|
---|
1204 | done.append(branchTest64(Zero, scratch));
|
---|
1205 | #if CPU(ARM64E)
|
---|
1206 | GPRReg tempReg = getCachedDataTempRegisterIDAndInvalidate();
|
---|
1207 | move(storage, tempReg);
|
---|
1208 | ASSERT(LogicalImmediate::create64(Gigacage::mask(kind)).isValid());
|
---|
1209 | andPtr(TrustedImmPtr(Gigacage::mask(kind)), tempReg);
|
---|
1210 | addPtr(scratch, tempReg);
|
---|
1211 | insertBitField64(tempReg, TrustedImm32(0), TrustedImm32(64 - maxNumberOfAllowedPACBits), storage);
|
---|
1212 | #else
|
---|
1213 | andPtr(TrustedImmPtr(Gigacage::mask(kind)), storage);
|
---|
1214 | addPtr(scratch, storage);
|
---|
1215 | #endif // CPU(ARM64E)
|
---|
1216 | done.link(this);
|
---|
1217 | }
|
---|
1218 | }
|
---|
1219 | #endif
|
---|
1220 |
|
---|
1221 | #if CPU(ARM64E)
|
---|
1222 | if (kind == Gigacage::Primitive)
|
---|
1223 | untagArrayPtr(length, storage, validateAuth, scratch);
|
---|
1224 | #endif
|
---|
1225 | UNUSED_PARAM(validateAuth);
|
---|
1226 | UNUSED_PARAM(kind);
|
---|
1227 | UNUSED_PARAM(storage);
|
---|
1228 | UNUSED_PARAM(length);
|
---|
1229 | UNUSED_PARAM(scratch);
|
---|
1230 | }
|
---|
1231 |
|
---|
1232 | void AssemblyHelpers::emitSave(const RegisterAtOffsetList& list)
|
---|
1233 | {
|
---|
1234 | StoreRegSpooler spooler(*this, framePointerRegister);
|
---|
1235 |
|
---|
1236 | size_t registerCount = list.registerCount();
|
---|
1237 | size_t i = 0;
|
---|
1238 | for (; i < registerCount; i++) {
|
---|
1239 | auto entry = list.at(i);
|
---|
1240 | if (!entry.reg().isGPR())
|
---|
1241 | break;
|
---|
1242 | spooler.storeGPR(entry);
|
---|
1243 | }
|
---|
1244 | spooler.finalizeGPR();
|
---|
1245 |
|
---|
1246 | for (; i < registerCount; i++)
|
---|
1247 | spooler.storeFPR(list.at(i));
|
---|
1248 | spooler.finalizeFPR();
|
---|
1249 | }
|
---|
1250 |
|
---|
1251 | void AssemblyHelpers::emitRestore(const RegisterAtOffsetList& list)
|
---|
1252 | {
|
---|
1253 | LoadRegSpooler spooler(*this, framePointerRegister);
|
---|
1254 |
|
---|
1255 | size_t registerCount = list.registerCount();
|
---|
1256 | size_t i = 0;
|
---|
1257 | for (; i < registerCount; i++) {
|
---|
1258 | auto entry = list.at(i);
|
---|
1259 | if (!entry.reg().isGPR())
|
---|
1260 | break;
|
---|
1261 | spooler.loadGPR(entry);
|
---|
1262 | }
|
---|
1263 | spooler.finalizeGPR();
|
---|
1264 |
|
---|
1265 | for (; i < registerCount; i++)
|
---|
1266 | spooler.loadFPR(list.at(i));
|
---|
1267 | spooler.finalizeFPR();
|
---|
1268 | }
|
---|
1269 |
|
---|
1270 | void AssemblyHelpers::emitSaveCalleeSavesFor(const RegisterAtOffsetList* calleeSaves)
|
---|
1271 | {
|
---|
1272 | RegisterSet dontSaveRegisters = RegisterSet(RegisterSet::stackRegisters());
|
---|
1273 | unsigned registerCount = calleeSaves->registerCount();
|
---|
1274 |
|
---|
1275 | StoreRegSpooler spooler(*this, framePointerRegister);
|
---|
1276 |
|
---|
1277 | unsigned i = 0;
|
---|
1278 | for (; i < registerCount; i++) {
|
---|
1279 | RegisterAtOffset entry = calleeSaves->at(i);
|
---|
1280 | if (entry.reg().isFPR())
|
---|
1281 | break;
|
---|
1282 | if (dontSaveRegisters.contains(entry.reg()))
|
---|
1283 | continue;
|
---|
1284 | spooler.storeGPR(entry);
|
---|
1285 | }
|
---|
1286 | spooler.finalizeGPR();
|
---|
1287 | for (; i < registerCount; i++) {
|
---|
1288 | RegisterAtOffset entry = calleeSaves->at(i);
|
---|
1289 | if (dontSaveRegisters.contains(entry.reg()))
|
---|
1290 | continue;
|
---|
1291 | spooler.storeFPR(entry);
|
---|
1292 | }
|
---|
1293 | spooler.finalizeFPR();
|
---|
1294 | }
|
---|
1295 |
|
---|
1296 | void AssemblyHelpers::emitRestoreCalleeSavesFor(const RegisterAtOffsetList* calleeSaves)
|
---|
1297 | {
|
---|
1298 | RegisterSet dontRestoreRegisters = RegisterSet(RegisterSet::stackRegisters());
|
---|
1299 | unsigned registerCount = calleeSaves->registerCount();
|
---|
1300 |
|
---|
1301 | LoadRegSpooler spooler(*this, framePointerRegister);
|
---|
1302 |
|
---|
1303 | unsigned i = 0;
|
---|
1304 | for (; i < registerCount; i++) {
|
---|
1305 | RegisterAtOffset entry = calleeSaves->at(i);
|
---|
1306 | if (entry.reg().isFPR())
|
---|
1307 | break;
|
---|
1308 | if (dontRestoreRegisters.get(entry.reg()))
|
---|
1309 | continue;
|
---|
1310 | spooler.loadGPR(entry);
|
---|
1311 | }
|
---|
1312 | spooler.finalizeGPR();
|
---|
1313 | for (; i < registerCount; i++) {
|
---|
1314 | RegisterAtOffset entry = calleeSaves->at(i);
|
---|
1315 | if (dontRestoreRegisters.get(entry.reg()))
|
---|
1316 | continue;
|
---|
1317 | spooler.loadFPR(entry);
|
---|
1318 | }
|
---|
1319 | spooler.finalizeFPR();
|
---|
1320 | }
|
---|
1321 |
|
---|
1322 | void AssemblyHelpers::copyLLIntBaselineCalleeSavesFromFrameOrRegisterToEntryFrameCalleeSavesBuffer(EntryFrame*& topEntryFrame, const RegisterSet& usedRegisters)
|
---|
1323 | {
|
---|
1324 | #if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
|
---|
1325 | // Copy saved calleeSaves on stack or unsaved calleeSaves in register to vm calleeSave buffer
|
---|
1326 | ScratchRegisterAllocator allocator(usedRegisters);
|
---|
1327 | GPRReg destBufferGPR = allocator.allocateScratchGPR();
|
---|
1328 | GPRReg temp1 = allocator.allocateScratchGPR();
|
---|
1329 | FPRReg fpTemp1 = allocator.allocateScratchFPR();
|
---|
1330 | GPRReg temp2 = allocator.allocateScratchGPR();
|
---|
1331 | FPRReg fpTemp2 = allocator.allocateScratchFPR();
|
---|
1332 | RELEASE_ASSERT(!allocator.didReuseRegisters());
|
---|
1333 |
|
---|
1334 | loadPtr(&topEntryFrame, destBufferGPR);
|
---|
1335 | addPtr(TrustedImm32(EntryFrame::calleeSaveRegistersBufferOffset()), destBufferGPR);
|
---|
1336 |
|
---|
1337 | CopySpooler spooler(*this, framePointerRegister, destBufferGPR, temp1, temp2, fpTemp1, fpTemp2);
|
---|
1338 |
|
---|
1339 | RegisterAtOffsetList* allCalleeSaves = RegisterSet::vmCalleeSaveRegisterOffsets();
|
---|
1340 | const RegisterAtOffsetList* currentCalleeSaves = &RegisterAtOffsetList::llintBaselineCalleeSaveRegisters();
|
---|
1341 | RegisterSet dontCopyRegisters = RegisterSet::stackRegisters();
|
---|
1342 | unsigned registerCount = allCalleeSaves->registerCount();
|
---|
1343 |
|
---|
1344 | unsigned i = 0;
|
---|
1345 | for (; i < registerCount; i++) {
|
---|
1346 | RegisterAtOffset entry = allCalleeSaves->at(i);
|
---|
1347 | if (dontCopyRegisters.contains(entry.reg()))
|
---|
1348 | continue;
|
---|
1349 | RegisterAtOffset* currentFrameEntry = currentCalleeSaves->find(entry.reg());
|
---|
1350 |
|
---|
1351 | if (!entry.reg().isGPR())
|
---|
1352 | break;
|
---|
1353 | if (currentFrameEntry)
|
---|
1354 | spooler.loadGPR(currentFrameEntry->offset());
|
---|
1355 | else
|
---|
1356 | spooler.copyGPR(entry.reg().gpr());
|
---|
1357 | spooler.storeGPR(entry.offset());
|
---|
1358 | }
|
---|
1359 | spooler.finalizeGPR();
|
---|
1360 |
|
---|
1361 | for (; i < registerCount; i++) {
|
---|
1362 | RegisterAtOffset entry = allCalleeSaves->at(i);
|
---|
1363 | if (dontCopyRegisters.get(entry.reg()))
|
---|
1364 | continue;
|
---|
1365 | RegisterAtOffset* currentFrameEntry = currentCalleeSaves->find(entry.reg());
|
---|
1366 |
|
---|
1367 | RELEASE_ASSERT(entry.reg().isFPR());
|
---|
1368 | if (currentFrameEntry)
|
---|
1369 | spooler.loadFPR(currentFrameEntry->offset());
|
---|
1370 | else
|
---|
1371 | spooler.copyFPR(entry.reg().fpr());
|
---|
1372 | spooler.storeFPR(entry.offset());
|
---|
1373 | }
|
---|
1374 | spooler.finalizeFPR();
|
---|
1375 |
|
---|
1376 | #else
|
---|
1377 | UNUSED_PARAM(topEntryFrame);
|
---|
1378 | UNUSED_PARAM(usedRegisters);
|
---|
1379 | #endif
|
---|
1380 | }
|
---|
1381 |
|
---|
1382 | void AssemblyHelpers::emitSaveOrCopyLLIntBaselineCalleeSavesFor(CodeBlock* codeBlock, VirtualRegister offsetVirtualRegister, RestoreTagRegisterMode tagRegisterMode, GPRReg temp1, GPRReg temp2, GPRReg temp3)
|
---|
1383 | {
|
---|
1384 | ASSERT_UNUSED(codeBlock, codeBlock);
|
---|
1385 | ASSERT(JITCode::isBaselineCode(codeBlock->jitType()));
|
---|
1386 | ASSERT(codeBlock->jitCode()->calleeSaveRegisters() == &RegisterAtOffsetList::llintBaselineCalleeSaveRegisters());
|
---|
1387 |
|
---|
1388 | const RegisterAtOffsetList* calleeSaves = &RegisterAtOffsetList::llintBaselineCalleeSaveRegisters();
|
---|
1389 | RegisterSet dontSaveRegisters = RegisterSet(RegisterSet::stackRegisters());
|
---|
1390 | unsigned registerCount = calleeSaves->registerCount();
|
---|
1391 |
|
---|
1392 | GPRReg dstBufferGPR = temp1;
|
---|
1393 | addPtr(TrustedImm32(offsetVirtualRegister.offsetInBytes()), framePointerRegister, dstBufferGPR);
|
---|
1394 |
|
---|
1395 | CopySpooler spooler(*this, framePointerRegister, dstBufferGPR, temp2, temp3);
|
---|
1396 |
|
---|
1397 | for (unsigned i = 0; i < registerCount; i++) {
|
---|
1398 | RegisterAtOffset entry = calleeSaves->at(i);
|
---|
1399 | if (dontSaveRegisters.get(entry.reg()))
|
---|
1400 | continue;
|
---|
1401 | RELEASE_ASSERT(entry.reg().isGPR());
|
---|
1402 |
|
---|
1403 | #if USE(JSVALUE32_64)
|
---|
1404 | UNUSED_PARAM(tagRegisterMode);
|
---|
1405 | #else
|
---|
1406 | if (tagRegisterMode == CopyBaselineCalleeSavedRegistersFromBaseFrame)
|
---|
1407 | spooler.loadGPR(entry.offset());
|
---|
1408 | else
|
---|
1409 | #endif
|
---|
1410 | spooler.copyGPR(entry.reg().gpr());
|
---|
1411 | spooler.storeGPR(entry.offset());
|
---|
1412 | }
|
---|
1413 | spooler.finalizeGPR();
|
---|
1414 | }
|
---|
1415 |
|
---|
1416 | } // namespace JSC
|
---|
1417 |
|
---|
1418 | #endif // ENABLE(JIT)
|
---|
1419 |
|
---|