source: webkit/trunk/JavaScriptCore/assembler/ARMAssembler.cpp@ 46247

Last change on this file since 46247 was 46247, checked in by [email protected], 16 years ago

2009-07-22 Gavin Barraclough <[email protected]>

Reviewed by Sam Weinig.

With ENABLE(ASSEMBLER_WX_EXCLUSIVE), only change permissions once per repatch event.
( https://bugs.webkit.org/show_bug.cgi?id=27564 )

Currently we change permissions forwards and backwards for each instruction modified,
instead we should only change permissions once per complete repatching event.

2.5% progression running with ENABLE(ASSEMBLER_WX_EXCLUSIVE) enabled,
which recoups 1/3 of the penalty of running with this mode enabled.

  • assembler/ARMAssembler.cpp: (JSC::ARMAssembler::linkBranch):
    • Replace usage of MakeWritable with cacheFlush.


  • assembler/ARMAssembler.h: (JSC::ARMAssembler::patchPointerInternal): (JSC::ARMAssembler::repatchLoadPtrToLEA):
    • Replace usage of MakeWritable with cacheFlush.
  • assembler/ARMv7Assembler.h: (JSC::ARMv7Assembler::relinkJump): (JSC::ARMv7Assembler::relinkCall): (JSC::ARMv7Assembler::repatchInt32): (JSC::ARMv7Assembler::repatchPointer): (JSC::ARMv7Assembler::repatchLoadPtrToLEA): (JSC::ARMv7Assembler::setInt32):
    • Replace usage of MakeWritable with cacheFlush.
  • assembler/LinkBuffer.h: (JSC::LinkBuffer::performFinalization):
    • Make explicit call to cacheFlush.
  • assembler/MacroAssemblerCodeRef.h: (JSC::MacroAssemblerCodeRef::MacroAssemblerCodeRef):
    • Make size always available.
  • assembler/RepatchBuffer.h: (JSC::RepatchBuffer::RepatchBuffer): (JSC::RepatchBuffer::~RepatchBuffer):
    • Add calls to MakeWritable & makeExecutable.
  • assembler/X86Assembler.h: (JSC::X86Assembler::relinkJump): (JSC::X86Assembler::relinkCall): (JSC::X86Assembler::repatchInt32): (JSC::X86Assembler::repatchPointer): (JSC::X86Assembler::repatchLoadPtrToLEA):
    • Remove usage of MakeWritable.
  • bytecode/CodeBlock.h: (JSC::CodeBlock::getJITCode):
    • Provide access to CodeBlock's JITCode.
  • jit/ExecutableAllocator.h: (JSC::ExecutableAllocator::makeExecutable): (JSC::ExecutableAllocator::cacheFlush):
    • Remove MakeWritable, make cacheFlush public.
  • jit/JIT.cpp: (JSC::ctiPatchNearCallByReturnAddress): (JSC::ctiPatchCallByReturnAddress): (JSC::JIT::privateCompile): (JSC::JIT::unlinkCall): (JSC::JIT::linkCall):
    • Add CodeBlock argument to RepatchBuffer.
  • jit/JIT.h:
    • Pass CodeBlock argument for use by RepatchBuffer.
  • jit/JITCode.h: (JSC::JITCode::start): (JSC::JITCode::size):
    • Provide access to code start & size.
  • jit/JITPropertyAccess.cpp: (JSC::JIT::privateCompilePutByIdTransition): (JSC::JIT::patchGetByIdSelf): (JSC::JIT::patchMethodCallProto): (JSC::JIT::patchPutByIdReplace): (JSC::JIT::privateCompilePatchGetArrayLength): (JSC::JIT::privateCompileGetByIdProto): (JSC::JIT::privateCompileGetByIdSelfList): (JSC::JIT::privateCompileGetByIdProtoList): (JSC::JIT::privateCompileGetByIdChainList): (JSC::JIT::privateCompileGetByIdChain):
    • Add CodeBlock argument to RepatchBuffer.
  • jit/JITStubs.cpp: (JSC::JITThunks::tryCachePutByID): (JSC::JITThunks::tryCacheGetByID): (JSC::JITStubs::DEFINE_STUB_FUNCTION):
    • Pass CodeBlock argument for use by RepatchBuffer.
File size: 9.2 KB
Line 
1/*
2 * Copyright (C) 2009 University of Szeged
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include "config.h"
28
29#if ENABLE(ASSEMBLER) && PLATFORM(ARM)
30
31#include "ARMAssembler.h"
32
33namespace JSC {
34
35// Patching helpers
36
37ARMWord* ARMAssembler::getLdrImmAddress(ARMWord* insn, uint32_t* constPool)
38{
39 // Must be an ldr ..., [pc +/- imm]
40 ASSERT((*insn & 0x0f7f0000) == 0x051f0000);
41
42 if (constPool && (*insn & 0x1))
43 return reinterpret_cast<ARMWord*>(constPool + ((*insn & SDT_OFFSET_MASK) >> 1));
44
45 ARMWord addr = reinterpret_cast<ARMWord>(insn) + 2 * sizeof(ARMWord);
46 if (*insn & DT_UP)
47 return reinterpret_cast<ARMWord*>(addr + (*insn & SDT_OFFSET_MASK));
48 else
49 return reinterpret_cast<ARMWord*>(addr - (*insn & SDT_OFFSET_MASK));
50}
51
52void ARMAssembler::linkBranch(void* code, JmpSrc from, void* to)
53{
54 ARMWord* insn = reinterpret_cast<ARMWord*>(code) + (from.m_offset / sizeof(ARMWord));
55
56 if (!from.m_latePatch) {
57 int diff = reinterpret_cast<ARMWord*>(to) - reinterpret_cast<ARMWord*>(insn + 2);
58
59 if ((diff <= BOFFSET_MAX && diff >= BOFFSET_MIN)) {
60 *insn = B | getConditionalField(*insn) | (diff & BRANCH_MASK);
61 ExecutableAllocator::cacheFlush(insn, sizeof(ARMWord));
62 return;
63 }
64 }
65 ARMWord* addr = getLdrImmAddress(insn);
66 *addr = reinterpret_cast<ARMWord>(to);
67 ExecutableAllocator::cacheFlush(addr, sizeof(ARMWord));
68}
69
70void ARMAssembler::patchConstantPoolLoad(void* loadAddr, void* constPoolAddr)
71{
72 ARMWord *ldr = reinterpret_cast<ARMWord*>(loadAddr);
73 ARMWord diff = reinterpret_cast<ARMWord*>(constPoolAddr) - ldr;
74 ARMWord index = (*ldr & 0xfff) >> 1;
75
76 ASSERT(diff >= 1);
77 if (diff >= 2 || index > 0) {
78 diff = (diff + index - 2) * sizeof(ARMWord);
79 ASSERT(diff <= 0xfff);
80 *ldr = (*ldr & ~0xfff) | diff;
81 } else
82 *ldr = (*ldr & ~(0xfff | ARMAssembler::DT_UP)) | sizeof(ARMWord);
83}
84
85// Handle immediates
86
87ARMWord ARMAssembler::getOp2(ARMWord imm)
88{
89 int rol;
90
91 if (imm <= 0xff)
92 return OP2_IMM | imm;
93
94 if ((imm & 0xff000000) == 0) {
95 imm <<= 8;
96 rol = 8;
97 }
98 else {
99 imm = (imm << 24) | (imm >> 8);
100 rol = 0;
101 }
102
103 if ((imm & 0xff000000) == 0) {
104 imm <<= 8;
105 rol += 4;
106 }
107
108 if ((imm & 0xf0000000) == 0) {
109 imm <<= 4;
110 rol += 2;
111 }
112
113 if ((imm & 0xc0000000) == 0) {
114 imm <<= 2;
115 rol += 1;
116 }
117
118 if ((imm & 0x00ffffff) == 0)
119 return OP2_IMM | (imm >> 24) | (rol << 8);
120
121 return 0;
122}
123
124int ARMAssembler::genInt(int reg, ARMWord imm, bool positive)
125{
126 // Step1: Search a non-immediate part
127 ARMWord mask;
128 ARMWord imm1;
129 ARMWord imm2;
130 int rol;
131
132 mask = 0xff000000;
133 rol = 8;
134 while(1) {
135 if ((imm & mask) == 0) {
136 imm = (imm << rol) | (imm >> (32 - rol));
137 rol = 4 + (rol >> 1);
138 break;
139 }
140 rol += 2;
141 mask >>= 2;
142 if (mask & 0x3) {
143 // rol 8
144 imm = (imm << 8) | (imm >> 24);
145 mask = 0xff00;
146 rol = 24;
147 while (1) {
148 if ((imm & mask) == 0) {
149 imm = (imm << rol) | (imm >> (32 - rol));
150 rol = (rol >> 1) - 8;
151 break;
152 }
153 rol += 2;
154 mask >>= 2;
155 if (mask & 0x3)
156 return 0;
157 }
158 break;
159 }
160 }
161
162 ASSERT((imm & 0xff) == 0);
163
164 if ((imm & 0xff000000) == 0) {
165 imm1 = OP2_IMM | ((imm >> 16) & 0xff) | (((rol + 4) & 0xf) << 8);
166 imm2 = OP2_IMM | ((imm >> 8) & 0xff) | (((rol + 8) & 0xf) << 8);
167 } else if (imm & 0xc0000000) {
168 imm1 = OP2_IMM | ((imm >> 24) & 0xff) | ((rol & 0xf) << 8);
169 imm <<= 8;
170 rol += 4;
171
172 if ((imm & 0xff000000) == 0) {
173 imm <<= 8;
174 rol += 4;
175 }
176
177 if ((imm & 0xf0000000) == 0) {
178 imm <<= 4;
179 rol += 2;
180 }
181
182 if ((imm & 0xc0000000) == 0) {
183 imm <<= 2;
184 rol += 1;
185 }
186
187 if ((imm & 0x00ffffff) == 0)
188 imm2 = OP2_IMM | (imm >> 24) | ((rol & 0xf) << 8);
189 else
190 return 0;
191 } else {
192 if ((imm & 0xf0000000) == 0) {
193 imm <<= 4;
194 rol += 2;
195 }
196
197 if ((imm & 0xc0000000) == 0) {
198 imm <<= 2;
199 rol += 1;
200 }
201
202 imm1 = OP2_IMM | ((imm >> 24) & 0xff) | ((rol & 0xf) << 8);
203 imm <<= 8;
204 rol += 4;
205
206 if ((imm & 0xf0000000) == 0) {
207 imm <<= 4;
208 rol += 2;
209 }
210
211 if ((imm & 0xc0000000) == 0) {
212 imm <<= 2;
213 rol += 1;
214 }
215
216 if ((imm & 0x00ffffff) == 0)
217 imm2 = OP2_IMM | (imm >> 24) | ((rol & 0xf) << 8);
218 else
219 return 0;
220 }
221
222 if (positive) {
223 mov_r(reg, imm1);
224 orr_r(reg, reg, imm2);
225 } else {
226 mvn_r(reg, imm1);
227 bic_r(reg, reg, imm2);
228 }
229
230 return 1;
231}
232
233ARMWord ARMAssembler::getImm(ARMWord imm, int tmpReg, bool invert)
234{
235 ARMWord tmp;
236
237 // Do it by 1 instruction
238 tmp = getOp2(imm);
239 if (tmp)
240 return tmp;
241
242 tmp = getOp2(~imm);
243 if (tmp) {
244 if (invert)
245 return tmp | OP2_INV_IMM;
246 mvn_r(tmpReg, tmp);
247 return tmpReg;
248 }
249
250 // Do it by 2 instruction
251 if (genInt(tmpReg, imm, true))
252 return tmpReg;
253 if (genInt(tmpReg, ~imm, false))
254 return tmpReg;
255
256 ldr_imm(tmpReg, imm);
257 return tmpReg;
258}
259
260void ARMAssembler::moveImm(ARMWord imm, int dest)
261{
262 ARMWord tmp;
263
264 // Do it by 1 instruction
265 tmp = getOp2(imm);
266 if (tmp) {
267 mov_r(dest, tmp);
268 return;
269 }
270
271 tmp = getOp2(~imm);
272 if (tmp) {
273 mvn_r(dest, tmp);
274 return;
275 }
276
277 // Do it by 2 instruction
278 if (genInt(dest, imm, true))
279 return;
280 if (genInt(dest, ~imm, false))
281 return;
282
283 ldr_imm(dest, imm);
284}
285
286// Memory load/store helpers
287
288void ARMAssembler::dataTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, int32_t offset)
289{
290 if (offset >= 0) {
291 if (offset <= 0xfff)
292 dtr_u(isLoad, srcDst, base, offset);
293 else if (offset <= 0xfffff) {
294 add_r(ARM::S0, base, OP2_IMM | (offset >> 12) | (10 << 8));
295 dtr_u(isLoad, srcDst, ARM::S0, offset & 0xfff);
296 } else {
297 ARMWord reg = getImm(offset, ARM::S0);
298 dtr_ur(isLoad, srcDst, base, reg);
299 }
300 } else {
301 offset = -offset;
302 if (offset <= 0xfff)
303 dtr_d(isLoad, srcDst, base, offset);
304 else if (offset <= 0xfffff) {
305 sub_r(ARM::S0, base, OP2_IMM | (offset >> 12) | (10 << 8));
306 dtr_d(isLoad, srcDst, ARM::S0, offset & 0xfff);
307 } else {
308 ARMWord reg = getImm(offset, ARM::S0);
309 dtr_dr(isLoad, srcDst, base, reg);
310 }
311 }
312}
313
314void ARMAssembler::baseIndexTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset)
315{
316 ARMWord op2;
317
318 ASSERT(scale >= 0 && scale <= 3);
319 op2 = lsl(index, scale);
320
321 if (offset >= 0 && offset <= 0xfff) {
322 add_r(ARM::S0, base, op2);
323 dtr_u(isLoad, srcDst, ARM::S0, offset);
324 return;
325 }
326 if (offset <= 0 && offset >= -0xfff) {
327 add_r(ARM::S0, base, op2);
328 dtr_d(isLoad, srcDst, ARM::S0, -offset);
329 return;
330 }
331
332 moveImm(offset, ARM::S0);
333 add_r(ARM::S0, ARM::S0, op2);
334 dtr_ur(isLoad, srcDst, base, ARM::S0);
335}
336
337void* ARMAssembler::executableCopy(ExecutablePool* allocator)
338{
339 char* data = reinterpret_cast<char*>(m_buffer.executableCopy(allocator));
340
341 for (Jumps::Iterator iter = m_jumps.begin(); iter != m_jumps.end(); ++iter) {
342 ARMWord* ldrAddr = reinterpret_cast<ARMWord*>(data + *iter);
343 ARMWord* offset = getLdrImmAddress(ldrAddr);
344 if (*offset != 0xffffffff)
345 linkBranch(data, JmpSrc(*iter), data + *offset);
346 }
347
348 return data;
349}
350
351} // namespace JSC
352
353#endif // ENABLE(ASSEMBLER) && PLATFORM(ARM)
Note: See TracBrowser for help on using the repository browser.