source: webkit/trunk/Source/JavaScriptCore/jit/AssemblyHelpersSpoolers.h

Last change on this file was 290907, checked in by Angelos Oikonomopoulos, 3 years ago

AssemblyHelpersSpoolers: use load/store pair on ARMv7
https://bugs.webkit.org/show_bug.cgi?id=235112

Reviewed by Žan Doberšek.

Save ~1% code size for baseline JIT on JS2 by emitting load/store pair
instructions on 32-bit ARM.

  • assembler/ARMv7Assembler.h:

(JSC::ARMv6Assembler::vldmia):
(JSC::ARMv7Assembler::vstmia):

  • assembler/CPU.h:

(JSC::isARM):

  • assembler/MacroAssemblerARMv7.h:

(JSC::MacroAssemblerARMv7::loadPair64):
(JSC::MacroAssemblerARMv7::storePair64):

  • jit/AssemblyHelpers.cpp:

(JSC::AssemblyHelpers::copyLLIntBaselineCalleeSavesFromFrameOrRegisterToEntryFrameCalleeSavesBuffer):

  • jit/AssemblyHelpersSpoolers.h:

(JSC::AssemblyHelpers::LoadRegSpooler::executePair):
(JSC::AssemblyHelpers::StoreRegSpooler::executePair):
(JSC::AssemblyHelpers::CopySpooler::CopySpooler):
(JSC::AssemblyHelpers::CopySpooler::loadPair):
(JSC::AssemblyHelpers::CopySpooler::storePair):

File size: 21.3 KB
Line 
1/*
2 * Copyright (C) 2021 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#pragma once
27
28#if ENABLE(JIT)
29
30#include "AssemblyHelpers.h"
31
32namespace JSC {
33
34template<typename RegType>
35struct RegDispatch {
36 static bool hasSameType(Reg);
37 static RegType get(Reg);
38 template<typename Spooler> static RegType temp1(const Spooler*);
39 template<typename Spooler> static RegType temp2(const Spooler*);
40 template<typename Spooler> static RegType& regToStore(Spooler*);
41 static constexpr RegType invalid();
42 static constexpr size_t regSize();
43 static bool isValidLoadPairImm(int);
44 static bool isValidStorePairImm(int);
45};
46
47template<>
48struct RegDispatch<GPRReg> {
49 static bool hasSameType(Reg reg) { return reg.isGPR(); }
50 static GPRReg get(Reg reg) { return reg.gpr(); }
51 template<typename Spooler> static GPRReg temp1(const Spooler* spooler) { return spooler->m_temp1GPR; }
52 template<typename Spooler> static GPRReg temp2(const Spooler* spooler) { return spooler->m_temp2GPR; }
53 template<typename Spooler> static GPRReg& regToStore(Spooler* spooler) { return spooler->m_gprToStore; }
54 static constexpr GPRReg invalid() { return InvalidGPRReg; }
55 static constexpr size_t regSize() { return sizeof(CPURegister); }
56#if CPU(ARM64)
57 static bool isValidLoadPairImm(int offset) { return ARM64Assembler::isValidLDPImm<64>(offset); }
58 static bool isValidStorePairImm(int offset) { return ARM64Assembler::isValidSTPImm<64>(offset); }
59#else
60 static bool isValidLoadPairImm(int) { return false; }
61 static bool isValidStorePairImm(int) { return false; }
62#endif
63};
64
65template<>
66struct RegDispatch<FPRReg> {
67 static bool hasSameType(Reg reg) { return reg.isFPR(); }
68 static FPRReg get(Reg reg) { return reg.fpr(); }
69 template<typename Spooler> static FPRReg temp1(const Spooler* spooler) { return spooler->m_temp1FPR; }
70 template<typename Spooler> static FPRReg temp2(const Spooler* spooler) { return spooler->m_temp2FPR; }
71 template<typename Spooler> static FPRReg& regToStore(Spooler* spooler) { return spooler->m_fprToStore; }
72 static constexpr FPRReg invalid() { return InvalidFPRReg; }
73 static constexpr size_t regSize() { return sizeof(double); }
74#if CPU(ARM64)
75 static bool isValidLoadPairImm(int offset) { return ARM64Assembler::isValidLDPFPImm<64>(offset); }
76 static bool isValidStorePairImm(int offset) { return ARM64Assembler::isValidSTPFPImm<64>(offset); }
77#else
78 static bool isValidLoadPairImm(int) { return false; }
79 static bool isValidStorePairImm(int) { return false; }
80#endif
81};
82
83template<typename Op>
84class AssemblyHelpers::Spooler {
85public:
86 using JIT = AssemblyHelpers;
87
88 Spooler(JIT& jit, GPRReg baseGPR)
89 : m_jit(jit)
90 , m_baseGPR(baseGPR)
91 { }
92
93 template<typename RegType>
94 void execute(const RegisterAtOffset& entry)
95 {
96 RELEASE_ASSERT(RegDispatch<RegType>::hasSameType(entry.reg()));
97 if constexpr (!hasPairOp)
98 return op().executeSingle(entry.offset(), RegDispatch<RegType>::get(entry.reg()));
99
100 if (!m_bufferedEntry.reg().isSet()) {
101 m_bufferedEntry = entry;
102 return;
103 }
104
105 constexpr ptrdiff_t regSize = RegDispatch<RegType>::regSize();
106 RegType bufferedEntryReg = RegDispatch<RegType>::get(m_bufferedEntry.reg());
107 RegType entryReg = RegDispatch<RegType>::get(entry.reg());
108
109 if (entry.offset() == m_bufferedEntry.offset() + regSize) {
110 op().executePair(m_bufferedEntry.offset(), bufferedEntryReg, entryReg);
111 m_bufferedEntry = { };
112 return;
113 }
114 if (m_bufferedEntry.offset() == entry.offset() + regSize) {
115 op().executePair(entry.offset(), entryReg, bufferedEntryReg);
116 m_bufferedEntry = { };
117 return;
118 }
119
120 // We don't have a pair of operations that we can execute as a pair.
121 // Execute the previous one as a single (finalize will do that), and then
122 // buffer the current entry to potentially be paired with the next entry.
123 finalize<RegType>();
124 execute<RegType>(entry);
125 }
126
127 template<typename RegType>
128 void finalize()
129 {
130 if constexpr (hasPairOp) {
131 if (m_bufferedEntry.reg().isSet()) {
132 op().executeSingle(m_bufferedEntry.offset(), RegDispatch<RegType>::get(m_bufferedEntry.reg()));
133 m_bufferedEntry = { };
134 }
135 }
136 }
137
138private:
139 static constexpr bool hasPairOp = isARM() || isARM64();
140
141 Op& op() { return *reinterpret_cast<Op*>(this); }
142
143protected:
144 JIT& m_jit;
145 GPRReg m_baseGPR;
146 RegisterAtOffset m_bufferedEntry;
147};
148
149class AssemblyHelpers::LoadRegSpooler : public AssemblyHelpers::Spooler<LoadRegSpooler> {
150 using Base = Spooler<LoadRegSpooler>;
151 using JIT = Base::JIT;
152public:
153 LoadRegSpooler(JIT& jit, GPRReg baseGPR)
154 : Base(jit, baseGPR)
155 { }
156
157 ALWAYS_INLINE void loadGPR(const RegisterAtOffset& entry) { execute<GPRReg>(entry); }
158 ALWAYS_INLINE void finalizeGPR() { finalize<GPRReg>(); }
159 ALWAYS_INLINE void loadFPR(const RegisterAtOffset& entry) { execute<FPRReg>(entry); }
160 ALWAYS_INLINE void finalizeFPR() { finalize<FPRReg>(); }
161
162private:
163#if CPU(ARM64) || CPU(ARM)
164 ALWAYS_INLINE void executePair(ptrdiff_t offset, GPRReg reg1, GPRReg reg2)
165 {
166#if USE(JSVALUE64)
167 m_jit.loadPair64(m_baseGPR, TrustedImm32(offset), reg1, reg2);
168#else
169 m_jit.loadPair32(m_baseGPR, TrustedImm32(offset), reg1, reg2);
170#endif
171 }
172 ALWAYS_INLINE void executePair(ptrdiff_t offset, FPRReg reg1, FPRReg reg2)
173 {
174 m_jit.loadPair64(m_baseGPR, TrustedImm32(offset), reg1, reg2);
175 }
176#else
177 template<typename RegType>
178 ALWAYS_INLINE void executePair(ptrdiff_t, RegType, RegType) { }
179#endif
180
181 ALWAYS_INLINE void executeSingle(ptrdiff_t offset, GPRReg reg)
182 {
183#if USE(JSVALUE64)
184 m_jit.load64(Address(m_baseGPR, offset), reg);
185#else
186 m_jit.load32(Address(m_baseGPR, offset), reg);
187#endif
188 }
189
190 ALWAYS_INLINE void executeSingle(ptrdiff_t offset, FPRReg reg)
191 {
192 m_jit.loadDouble(Address(m_baseGPR, offset), reg);
193 }
194
195 friend class AssemblyHelpers::Spooler<LoadRegSpooler>;
196};
197
198class AssemblyHelpers::StoreRegSpooler : public AssemblyHelpers::Spooler<StoreRegSpooler> {
199 using Base = Spooler<StoreRegSpooler>;
200 using JIT = typename Base::JIT;
201public:
202 StoreRegSpooler(JIT& jit, GPRReg baseGPR)
203 : Base(jit, baseGPR)
204 { }
205
206 ALWAYS_INLINE void storeGPR(const RegisterAtOffset& entry) { execute<GPRReg>(entry); }
207 ALWAYS_INLINE void finalizeGPR() { finalize<GPRReg>(); }
208 ALWAYS_INLINE void storeFPR(const RegisterAtOffset& entry) { execute<FPRReg>(entry); }
209 ALWAYS_INLINE void finalizeFPR() { finalize<FPRReg>(); }
210
211private:
212#if CPU(ARM64) || CPU(ARM)
213 ALWAYS_INLINE void executePair(ptrdiff_t offset, GPRReg reg1, GPRReg reg2)
214 {
215#if USE(JSVALUE64)
216 m_jit.storePair64(reg1, reg2, m_baseGPR, TrustedImm32(offset));
217#else
218 m_jit.storePair32(reg1, reg2, m_baseGPR, TrustedImm32(offset));
219#endif
220 }
221 ALWAYS_INLINE void executePair(ptrdiff_t offset, FPRReg reg1, FPRReg reg2)
222 {
223 m_jit.storePair64(reg1, reg2, m_baseGPR, TrustedImm32(offset));
224 }
225#else
226 template<typename RegType>
227 ALWAYS_INLINE void executePair(ptrdiff_t, RegType, RegType) { }
228#endif
229
230 ALWAYS_INLINE void executeSingle(ptrdiff_t offset, GPRReg reg)
231 {
232#if USE(JSVALUE64)
233 m_jit.store64(reg, Address(m_baseGPR, offset));
234#else
235 m_jit.store32(reg, Address(m_baseGPR, offset));
236#endif
237 }
238
239 ALWAYS_INLINE void executeSingle(ptrdiff_t offset, FPRReg reg)
240 {
241 m_jit.storeDouble(reg, Address(m_baseGPR, offset));
242 }
243
244 friend class AssemblyHelpers::Spooler<StoreRegSpooler>;
245};
246
247class AssemblyHelpers::CopySpooler {
248public:
249 using JIT = AssemblyHelpers;
250 using Address = JIT::Address;
251 using TrustedImm32 = JIT::TrustedImm32;
252
253 struct Source {
254 enum class Type { BufferOffset, Reg, EncodedJSValue } type;
255 int offset;
256 Reg reg;
257 EncodedJSValue value;
258
259 template<typename RegType> RegType getReg() { return RegDispatch<RegType>::get(reg); };
260 };
261
262 enum class BufferRegs {
263 NeedPreservation,
264 AllowModification
265 };
266
267 CopySpooler(BufferRegs attribute, JIT& jit, GPRReg srcBuffer, GPRReg destBuffer, GPRReg temp1, GPRReg temp2, FPRReg fpTemp1 = InvalidFPRReg, FPRReg fpTemp2 = InvalidFPRReg)
268 : m_jit(jit)
269 , m_srcBufferGPR(srcBuffer)
270 , m_dstBufferGPR(destBuffer)
271 , m_temp1GPR(temp1)
272 , m_temp2GPR(temp2)
273 , m_temp1FPR(fpTemp1)
274 , m_temp2FPR(fpTemp2)
275 , m_bufferRegsAttr(attribute)
276 {
277 if constexpr (hasPairOp && !(isARM() || isARM64()))
278 RELEASE_ASSERT_NOT_REACHED(); // unsupported architecture.
279 }
280
281 CopySpooler(JIT& jit, GPRReg srcBuffer, GPRReg destBuffer, GPRReg temp1, GPRReg temp2, FPRReg fpTemp1 = InvalidFPRReg, FPRReg fpTemp2 = InvalidFPRReg)
282 : CopySpooler(BufferRegs::NeedPreservation, jit, srcBuffer, destBuffer, temp1, temp2, fpTemp1, fpTemp2)
283 { }
284
285private:
286 template<typename RegType> RegType temp1() const { return RegDispatch<RegType>::temp1(this); }
287 template<typename RegType> RegType temp2() const { return RegDispatch<RegType>::temp2(this); }
288 template<typename RegType> RegType& regToStore() { return RegDispatch<RegType>::regToStore(this); }
289
290 template<typename RegType> static constexpr RegType invalid() { return RegDispatch<RegType>::invalid(); }
291 template<typename RegType> static constexpr int regSize() { return RegDispatch<RegType>::regSize(); }
292
293 template<typename RegType> static bool isValidLoadPairImm(int offset) { return RegDispatch<RegType>::isValidLoadPairImm(offset); }
294 template<typename RegType> static bool isValidStorePairImm(int offset) { return RegDispatch<RegType>::isValidStorePairImm(offset); }
295
296 template<typename RegType>
297 void load(int offset)
298 {
299 if constexpr (!hasPairOp) {
300 auto& regToStore = this->regToStore<RegType>();
301 regToStore = temp1<RegType>();
302 load(offset, regToStore);
303 return;
304 }
305
306 auto& source = m_sources[m_currentSource++];
307 source.type = Source::Type::BufferOffset;
308 source.offset = offset;
309 }
310
311 void move(EncodedJSValue value)
312 {
313 if constexpr (!hasPairOp) {
314 auto& regToStore = this->regToStore<GPRReg>();
315 regToStore = temp1<GPRReg>();
316 move(value, regToStore);
317 return;
318 }
319
320 auto& source = m_sources[m_currentSource++];
321 source.type = Source::Type::EncodedJSValue;
322 source.value = value;
323 }
324
325 template<typename RegType>
326 void copy(RegType reg)
327 {
328 if constexpr (!hasPairOp) {
329 auto& regToStore = this->regToStore<RegType>();
330 regToStore = reg;
331 return;
332 }
333
334 auto& source = m_sources[m_currentSource++];
335 source.type = Source::Type::Reg;
336 source.reg = reg;
337 }
338
339 template<typename RegType>
340 void store(int storeOffset)
341 {
342 if constexpr (!hasPairOp) {
343 auto regToStore = this->regToStore<RegType>();
344 store(regToStore, storeOffset);
345 return;
346 }
347
348 constexpr bool regTypeIsGPR = std::is_same<RegType, GPRReg>::value;
349
350 if (m_currentSource < 2) {
351 m_deferredStoreOffset = storeOffset;
352 return;
353 }
354
355 RegType regToStore1 = invalid<RegType>();
356 RegType regToStore2 = invalid<RegType>();
357 auto& source1 = m_sources[0];
358 auto& source2 = m_sources[1];
359 auto srcOffset1 = m_sources[0].offset - m_srcOffsetAdjustment;
360 auto srcOffset2 = m_sources[1].offset - m_srcOffsetAdjustment;
361 constexpr int registerSize = regSize<RegType>();
362
363 if (source1.type == Source::Type::BufferOffset && source2.type == Source::Type::BufferOffset) {
364 regToStore1 = temp1<RegType>();
365 regToStore2 = temp2<RegType>();
366
367 int offsetDelta = abs(srcOffset1 - srcOffset2);
368 int minOffset = std::min(srcOffset1, srcOffset2);
369 bool isValidOffset = isValidLoadPairImm<RegType>(minOffset);
370
371 if (offsetDelta != registerSize || (!isValidOffset && m_bufferRegsAttr != BufferRegs::AllowModification)) {
372 load(srcOffset1, regToStore1);
373 load(srcOffset2, regToStore2);
374 } else {
375 if (!isValidOffset) {
376 ASSERT(m_bufferRegsAttr == BufferRegs::AllowModification);
377 m_srcOffsetAdjustment += minOffset;
378 m_jit.addPtr(TrustedImm32(minOffset), m_srcBufferGPR);
379
380 srcOffset1 -= minOffset;
381 srcOffset2 -= minOffset;
382 ASSERT(isValidLoadPairImm<RegType>(std::min(srcOffset1, srcOffset2)));
383 }
384 if (srcOffset1 < srcOffset2)
385 loadPair(srcOffset1, regToStore1, regToStore2);
386 else
387 loadPair(srcOffset2, regToStore2, regToStore1);
388 }
389 } else if (source1.type == Source::Type::BufferOffset) {
390 regToStore1 = temp1<RegType>();
391 load(srcOffset1, regToStore1);
392 if (source2.type == Source::Type::EncodedJSValue) {
393 if constexpr (regTypeIsGPR) {
394 regToStore2 = temp2<RegType>();
395 move(source2.value, regToStore2);
396 } else
397 RELEASE_ASSERT_NOT_REACHED();
398 } else
399 regToStore2 = source2.getReg<RegType>();
400
401 } else if (source2.type == Source::Type::BufferOffset) {
402 if (source1.type == Source::Type::EncodedJSValue) {
403 if constexpr (regTypeIsGPR) {
404 regToStore1 = temp1<RegType>();
405 move(source1.value, regToStore1);
406 } else
407 RELEASE_ASSERT_NOT_REACHED();
408 } else
409 regToStore1 = source1.getReg<RegType>();
410 regToStore2 = temp2<RegType>();
411 load(srcOffset2, regToStore2);
412
413 } else {
414 if (source1.type == Source::Type::EncodedJSValue) {
415 if constexpr (regTypeIsGPR) {
416 regToStore1 = temp1<RegType>();
417 move(source1.value, regToStore1);
418 } else
419 RELEASE_ASSERT_NOT_REACHED();
420 } else
421 regToStore1 = source1.getReg<RegType>();
422
423 if (source2.type == Source::Type::EncodedJSValue) {
424 if constexpr (regTypeIsGPR) {
425 regToStore2 = temp2<RegType>();
426 move(source2.value, regToStore2);
427 } else
428 RELEASE_ASSERT_NOT_REACHED();
429 } else
430 regToStore2 = source2.getReg<RegType>();
431 }
432
433 int dstOffset1 = m_deferredStoreOffset - m_dstOffsetAdjustment;
434 int dstOffset2 = storeOffset - m_dstOffsetAdjustment;
435
436 int offsetDelta = abs(dstOffset1 - dstOffset2);
437 int minOffset = std::min(dstOffset1, dstOffset2);
438 bool isValidOffset = isValidStorePairImm<RegType>(minOffset);
439
440 if (offsetDelta != registerSize || (!isValidOffset && m_bufferRegsAttr != BufferRegs::AllowModification)) {
441 store(regToStore1, dstOffset1);
442 store(regToStore2, dstOffset2);
443 } else {
444 if (!isValidOffset) {
445 ASSERT(m_bufferRegsAttr == BufferRegs::AllowModification);
446 m_dstOffsetAdjustment += minOffset;
447 m_jit.addPtr(TrustedImm32(minOffset), m_dstBufferGPR);
448
449 dstOffset1 -= minOffset;
450 dstOffset2 -= minOffset;
451 ASSERT(isValidStorePairImm<RegType>(std::min(dstOffset1, dstOffset2)));
452 }
453 if (dstOffset1 < dstOffset2)
454 storePair(regToStore1, regToStore2, dstOffset1);
455 else
456 storePair(regToStore2, regToStore1, dstOffset2);
457 }
458
459 m_currentSource = 0;
460 }
461
462 template<typename RegType>
463 void finalize()
464 {
465 if constexpr (!hasPairOp)
466 return;
467
468 if (!m_currentSource)
469 return; // Nothing to finalize.
470
471 ASSERT(m_currentSource == 1);
472
473 RegType regToStore = invalid<RegType>();
474 auto& source = m_sources[0];
475 auto& srcOffset = source.offset;
476 constexpr bool regTypeIsGPR = std::is_same<RegType, GPRReg>::value;
477
478 if (source.type == Source::Type::BufferOffset) {
479 regToStore = temp1<RegType>();
480 load(srcOffset - m_srcOffsetAdjustment, regToStore);
481 } else if (source.type == Source::Type::Reg)
482 regToStore = source.getReg<RegType>();
483 else if constexpr (regTypeIsGPR) {
484 regToStore = temp1<RegType>();
485 move(source.value, regToStore);
486 } else
487 RELEASE_ASSERT_NOT_REACHED();
488
489 store(regToStore, m_deferredStoreOffset - m_dstOffsetAdjustment);
490 m_currentSource = 0;
491 }
492
493public:
494 ALWAYS_INLINE void loadGPR(int srcOffset) { load<GPRReg>(srcOffset); }
495 ALWAYS_INLINE void copyGPR(GPRReg gpr) { copy<GPRReg>(gpr); }
496 ALWAYS_INLINE void moveConstant(EncodedJSValue value) { move(value); }
497 ALWAYS_INLINE void storeGPR(int dstOffset) { store<GPRReg>(dstOffset); }
498 ALWAYS_INLINE void finalizeGPR() { finalize<GPRReg>(); }
499
500 ALWAYS_INLINE void loadFPR(int srcOffset) { load<FPRReg>(srcOffset); }
501 ALWAYS_INLINE void copyFPR(FPRReg gpr) { copy<FPRReg>(gpr); }
502 ALWAYS_INLINE void storeFPR(int dstOffset) { store<FPRReg>(dstOffset); }
503 ALWAYS_INLINE void finalizeFPR() { finalize<FPRReg>(); }
504
505protected:
506#if USE(JSVALUE64)
507 ALWAYS_INLINE void move(EncodedJSValue value, GPRReg dest)
508 {
509 m_jit.move(TrustedImm64(value), dest);
510 }
511#else
512 NO_RETURN_DUE_TO_CRASH void move(EncodedJSValue, GPRReg) { RELEASE_ASSERT_NOT_REACHED(); }
513#endif
514
515 ALWAYS_INLINE void load(int offset, GPRReg dest)
516 {
517 m_jit.loadPtr(Address(m_srcBufferGPR, offset), dest);
518 }
519
520 ALWAYS_INLINE void store(GPRReg src, int offset)
521 {
522 m_jit.storePtr(src, Address(m_dstBufferGPR, offset));
523 }
524
525 ALWAYS_INLINE void load(int offset, FPRReg dest)
526 {
527 m_jit.loadDouble(Address(m_srcBufferGPR, offset), dest);
528 }
529
530 ALWAYS_INLINE void store(FPRReg src, int offset)
531 {
532 m_jit.storeDouble(src, Address(m_dstBufferGPR, offset));
533 }
534
535#if CPU(ARM64) || CPU(ARM)
536 ALWAYS_INLINE void loadPair(int offset, GPRReg dest1, GPRReg dest2)
537 {
538#if USE(JSVALUE64)
539 m_jit.loadPair64(m_srcBufferGPR, TrustedImm32(offset), dest1, dest2);
540#else
541 m_jit.loadPair32(m_srcBufferGPR, TrustedImm32(offset), dest1, dest2);
542#endif
543 }
544
545 ALWAYS_INLINE void loadPair(int offset, FPRReg dest1, FPRReg dest2)
546 {
547 m_jit.loadPair64(m_srcBufferGPR, TrustedImm32(offset), dest1, dest2);
548 }
549
550 ALWAYS_INLINE void storePair(GPRReg src1, GPRReg src2, int offset)
551 {
552#if USE(JSVALUE64)
553 m_jit.storePair64(src1, src2, m_dstBufferGPR, TrustedImm32(offset));
554#else
555 m_jit.storePair32(src1, src2, m_dstBufferGPR, TrustedImm32(offset));
556#endif
557 }
558
559 ALWAYS_INLINE void storePair(FPRReg src1, FPRReg src2, int offset)
560 {
561 m_jit.storePair64(src1, src2, m_dstBufferGPR, TrustedImm32(offset));
562 }
563
564 static constexpr bool hasPairOp = true;
565#else
566 template<typename RegType> ALWAYS_INLINE void loadPair(int, RegType, RegType) { }
567 template<typename RegType> ALWAYS_INLINE void storePair(RegType, RegType, int) { }
568
569 static constexpr bool hasPairOp = false;
570#endif
571
572 JIT& m_jit;
573
574 GPRReg m_srcBufferGPR;
575 GPRReg m_dstBufferGPR;
576 GPRReg m_temp1GPR;
577 GPRReg m_temp2GPR;
578 FPRReg m_temp1FPR;
579 FPRReg m_temp2FPR;
580
581private:
582 static constexpr int gprSize = static_cast<int>(sizeof(CPURegister));
583 static constexpr int fprSize = static_cast<int>(sizeof(double));
584
585 // These point to which register to use.
586 GPRReg m_gprToStore { InvalidGPRReg }; // Only used when !hasPairOp.
587 FPRReg m_fprToStore { InvalidFPRReg }; // Only used when !hasPairOp.
588
589 BufferRegs m_bufferRegsAttr;
590 Source m_sources[2];
591 unsigned m_currentSource { 0 };
592 int m_srcOffsetAdjustment { 0 };
593 int m_dstOffsetAdjustment { 0 };
594 int m_deferredStoreOffset;
595
596 template<typename RegType> friend struct RegDispatch;
597};
598
599} // namespace JSC
600
601#endif // ENABLE(JIT)
Note: See TracBrowser for help on using the repository browser.