source: webkit/trunk/JavaScriptCore/VM/CTI.cpp@ 38137

Last change on this file since 38137 was 38012, checked in by [email protected], 17 years ago

2008-10-30 Cameron Zwarich <[email protected]>

Reviewed by Oliver Hunt.

Bug 21987: CTI::putDoubleResultToJSNumberCellOrJSImmediate() hardcodes its result register
<https://bugs.webkit.org/show_bug.cgi?id=21987>

CTI::putDoubleResultToJSNumberCellOrJSImmediate() hardcodes its result
register as ecx, but it should be tempReg1, which is ecx at all of its
callsites.

  • VM/CTI.cpp: (JSC::CTI::putDoubleResultToJSNumberCellOrJSImmediate):
File size: 152.0 KB
Line 
1/*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "CTI.h"
28
29#if ENABLE(CTI)
30
31#include "CodeBlock.h"
32#include "JSArray.h"
33#include "JSFunction.h"
34#include "Machine.h"
35#include "wrec/WREC.h"
36#include "ResultType.h"
37#include "SamplingTool.h"
38
39#ifndef NDEBUG
40#include <stdio.h>
41#endif
42
43using namespace std;
44
45namespace JSC {
46
47#if PLATFORM(MAC)
48
49static inline bool isSSE2Present()
50{
51 return true; // All X86 Macs are guaranteed to support at least SSE2
52}
53
54#else
55
56static bool isSSE2Present()
57{
58 static const int SSE2FeatureBit = 1 << 26;
59 struct SSE2Check {
60 SSE2Check()
61 {
62 int flags;
63#if COMPILER(MSVC)
64 _asm {
65 mov eax, 1 // cpuid function 1 gives us the standard feature set
66 cpuid;
67 mov flags, edx;
68 }
69#else
70 flags = 0;
71 // FIXME: Add GCC code to do above asm
72#endif
73 present = (flags & SSE2FeatureBit) != 0;
74 }
75 bool present;
76 };
77 static SSE2Check check;
78 return check.present;
79}
80
81#endif
82
83COMPILE_ASSERT(CTI_ARGS_code == 0xC, CTI_ARGS_code_is_C);
84COMPILE_ASSERT(CTI_ARGS_callFrame == 0xE, CTI_ARGS_callFrame_is_E);
85
86#if COMPILER(GCC) && PLATFORM(X86)
87
88#if PLATFORM(DARWIN)
89#define SYMBOL_STRING(name) "_" #name
90#else
91#define SYMBOL_STRING(name) #name
92#endif
93
94asm(
95".globl " SYMBOL_STRING(ctiTrampoline) "\n"
96SYMBOL_STRING(ctiTrampoline) ":" "\n"
97 "pushl %esi" "\n"
98 "pushl %edi" "\n"
99 "pushl %ebx" "\n"
100 "subl $0x20, %esp" "\n"
101 "movl $512, %esi" "\n"
102 "movl 0x38(%esp), %edi" "\n" // Ox38 = 0x0E * 4, 0x0E = CTI_ARGS_callFrame (see assertion above)
103 "call *0x30(%esp)" "\n" // Ox30 = 0x0C * 4, 0x0C = CTI_ARGS_code (see assertion above)
104 "addl $0x20, %esp" "\n"
105 "popl %ebx" "\n"
106 "popl %edi" "\n"
107 "popl %esi" "\n"
108 "ret" "\n"
109);
110
111asm(
112".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
113SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
114#if USE(CTI_ARGUMENT)
115#if USE(FAST_CALL_CTI_ARGUMENT)
116 "movl %esp, %ecx" "\n"
117#else
118 "movl %esp, 0(%esp)" "\n"
119#endif
120 "call " SYMBOL_STRING(_ZN3JSC7Machine12cti_vm_throwEPPv) "\n"
121#else
122 "call " SYMBOL_STRING(_ZN3JSC7Machine12cti_vm_throwEPvz) "\n"
123#endif
124 "addl $0x20, %esp" "\n"
125 "popl %ebx" "\n"
126 "popl %edi" "\n"
127 "popl %esi" "\n"
128 "ret" "\n"
129);
130
131#elif COMPILER(MSVC)
132
133extern "C" {
134
135 __declspec(naked) JSValue* ctiTrampoline(void* code, RegisterFile*, CallFrame*, JSValue** exception, Profiler**, JSGlobalData*)
136 {
137 __asm {
138 push esi;
139 push edi;
140 push ebx;
141 sub esp, 0x20;
142 mov esi, 512;
143 mov ecx, esp;
144 mov edi, [esp + 0x38];
145 call [esp + 0x30]; // Ox30 = 0x0C * 4, 0x0C = CTI_ARGS_code (see assertion above)
146 add esp, 0x20;
147 pop ebx;
148 pop edi;
149 pop esi;
150 ret;
151 }
152 }
153
154 __declspec(naked) void ctiVMThrowTrampoline()
155 {
156 __asm {
157 mov ecx, esp;
158 call JSC::Machine::cti_vm_throw;
159 add esp, 0x20;
160 pop ebx;
161 pop edi;
162 pop esi;
163 ret;
164 }
165 }
166
167}
168
169#endif
170
171ALWAYS_INLINE bool CTI::isConstant(int src)
172{
173 return src >= m_codeBlock->numVars && src < m_codeBlock->numVars + m_codeBlock->numConstants;
174}
175
176ALWAYS_INLINE JSValue* CTI::getConstant(CallFrame* callFrame, int src)
177{
178 return m_codeBlock->constantRegisters[src - m_codeBlock->numVars].jsValue(callFrame);
179}
180
181inline uintptr_t CTI::asInteger(JSValue* value)
182{
183 return reinterpret_cast<uintptr_t>(value);
184}
185
186// get arg puts an arg from the SF register array into a h/w register
187ALWAYS_INLINE void CTI::emitGetArg(int src, X86Assembler::RegisterID dst)
188{
189 // TODO: we want to reuse values that are already in registers if we can - add a register allocator!
190 if (isConstant(src)) {
191 JSValue* js = getConstant(m_callFrame, src);
192 m_jit.movl_i32r(asInteger(js), dst);
193 } else
194 m_jit.movl_mr(src * sizeof(Register), X86::edi, dst);
195}
196
197// get arg puts an arg from the SF register array onto the stack, as an arg to a context threaded function.
198ALWAYS_INLINE void CTI::emitGetPutArg(unsigned src, unsigned offset, X86Assembler::RegisterID scratch)
199{
200 if (isConstant(src)) {
201 JSValue* js = getConstant(m_callFrame, src);
202 m_jit.movl_i32m(asInteger(js), offset + sizeof(void*), X86::esp);
203 } else {
204 m_jit.movl_mr(src * sizeof(Register), X86::edi, scratch);
205 m_jit.movl_rm(scratch, offset + sizeof(void*), X86::esp);
206 }
207}
208
209// puts an arg onto the stack, as an arg to a context threaded function.
210ALWAYS_INLINE void CTI::emitPutArg(X86Assembler::RegisterID src, unsigned offset)
211{
212 m_jit.movl_rm(src, offset + sizeof(void*), X86::esp);
213}
214
215ALWAYS_INLINE void CTI::emitPutArgConstant(unsigned value, unsigned offset)
216{
217 m_jit.movl_i32m(value, offset + sizeof(void*), X86::esp);
218}
219
220ALWAYS_INLINE JSValue* CTI::getConstantImmediateNumericArg(unsigned src)
221{
222 if (isConstant(src)) {
223 JSValue* js = getConstant(m_callFrame, src);
224 return JSImmediate::isNumber(js) ? js : noValue();
225 }
226 return noValue();
227}
228
229ALWAYS_INLINE void CTI::emitPutCTIParam(void* value, unsigned name)
230{
231 m_jit.movl_i32m(reinterpret_cast<intptr_t>(value), name * sizeof(void*), X86::esp);
232}
233
234ALWAYS_INLINE void CTI::emitPutCTIParam(X86Assembler::RegisterID from, unsigned name)
235{
236 m_jit.movl_rm(from, name * sizeof(void*), X86::esp);
237}
238
239ALWAYS_INLINE void CTI::emitGetCTIParam(unsigned name, X86Assembler::RegisterID to)
240{
241 m_jit.movl_mr(name * sizeof(void*), X86::esp, to);
242}
243
244ALWAYS_INLINE void CTI::emitPutToCallFrameHeader(X86Assembler::RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
245{
246 m_jit.movl_rm(from, entry * sizeof(Register), X86::edi);
247}
248
249ALWAYS_INLINE void CTI::emitGetFromCallFrameHeader(RegisterFile::CallFrameHeaderEntry entry, X86Assembler::RegisterID to)
250{
251 m_jit.movl_mr(entry * sizeof(Register), X86::edi, to);
252}
253
254ALWAYS_INLINE void CTI::emitPutResult(unsigned dst, X86Assembler::RegisterID from)
255{
256 m_jit.movl_rm(from, dst * sizeof(Register), X86::edi);
257 // FIXME: #ifndef NDEBUG, Write the correct m_type to the register.
258}
259
260ALWAYS_INLINE void CTI::emitInitRegister(unsigned dst)
261{
262 m_jit.movl_i32m(asInteger(jsUndefined()), dst * sizeof(Register), X86::edi);
263 // FIXME: #ifndef NDEBUG, Write the correct m_type to the register.
264}
265
266void ctiSetReturnAddress(void** where, void* what)
267{
268 *where = what;
269}
270
271void ctiRepatchCallByReturnAddress(void* where, void* what)
272{
273 (static_cast<void**>(where))[-1] = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(what) - reinterpret_cast<uintptr_t>(where));
274}
275
276#ifndef NDEBUG
277
278void CTI::printOpcodeOperandTypes(unsigned src1, unsigned src2)
279{
280 char which1 = '*';
281 if (isConstant(src1)) {
282 JSValue* js = getConstant(m_callFrame, src1);
283 which1 =
284 JSImmediate::isImmediate(js) ?
285 (JSImmediate::isNumber(js) ? 'i' :
286 JSImmediate::isBoolean(js) ? 'b' :
287 js->isUndefined() ? 'u' :
288 js->isNull() ? 'n' : '?')
289 :
290 (js->isString() ? 's' :
291 js->isObject() ? 'o' :
292 'k');
293 }
294 char which2 = '*';
295 if (isConstant(src2)) {
296 JSValue* js = getConstant(m_callFrame, src2);
297 which2 =
298 JSImmediate::isImmediate(js) ?
299 (JSImmediate::isNumber(js) ? 'i' :
300 JSImmediate::isBoolean(js) ? 'b' :
301 js->isUndefined() ? 'u' :
302 js->isNull() ? 'n' : '?')
303 :
304 (js->isString() ? 's' :
305 js->isObject() ? 'o' :
306 'k');
307 }
308 if ((which1 != '*') | (which2 != '*'))
309 fprintf(stderr, "Types %c %c\n", which1, which2);
310}
311
312#endif
313
314extern "C" {
315 static JSValue* FASTCALL allocateNumber(JSGlobalData* globalData) {
316 JSValue* result = new (globalData) JSNumberCell(globalData);
317 ASSERT(result);
318 return result;
319 }
320}
321
322ALWAYS_INLINE void CTI::emitAllocateNumber(JSGlobalData* globalData, unsigned opcodeIndex)
323{
324 m_jit.movl_i32r(reinterpret_cast<intptr_t>(globalData), X86::ecx);
325 emitNakedFastCall(opcodeIndex, (void*)allocateNumber);
326}
327
328ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitNakedCall(unsigned opcodeIndex, X86::RegisterID r)
329{
330 X86Assembler::JmpSrc call = m_jit.emitCall(r);
331 m_calls.append(CallRecord(call, opcodeIndex));
332
333 return call;
334}
335
336ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitNakedCall(unsigned opcodeIndex, void(*function)())
337{
338 X86Assembler::JmpSrc call = m_jit.emitCall();
339 m_calls.append(CallRecord(call, reinterpret_cast<CTIHelper_v>(function), opcodeIndex));
340 return call;
341}
342
343ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitNakedFastCall(unsigned opcodeIndex, void* function)
344{
345 X86Assembler::JmpSrc call = m_jit.emitCall();
346 m_calls.append(CallRecord(call, reinterpret_cast<CTIHelper_v>(function), opcodeIndex));
347 return call;
348}
349
350ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCTICall(Instruction* vPC, unsigned opcodeIndex, CTIHelper_j helper)
351{
352#if ENABLE(OPCODE_SAMPLING)
353 m_jit.movl_i32m(m_machine->sampler()->encodeSample(vPC, true), m_machine->sampler()->sampleSlot());
354#else
355 UNUSED_PARAM(vPC);
356#endif
357 m_jit.emitRestoreArgumentReference();
358 emitPutCTIParam(X86::edi, CTI_ARGS_callFrame);
359 X86Assembler::JmpSrc call = m_jit.emitCall();
360 m_calls.append(CallRecord(call, helper, opcodeIndex));
361#if ENABLE(OPCODE_SAMPLING)
362 m_jit.movl_i32m(m_machine->sampler()->encodeSample(vPC, false), m_machine->sampler()->sampleSlot());
363#endif
364
365 return call;
366}
367
368ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCTICall(Instruction* vPC, unsigned opcodeIndex, CTIHelper_o helper)
369{
370#if ENABLE(OPCODE_SAMPLING)
371 m_jit.movl_i32m(m_machine->sampler()->encodeSample(vPC, true), m_machine->sampler()->sampleSlot());
372#else
373 UNUSED_PARAM(vPC);
374#endif
375 m_jit.emitRestoreArgumentReference();
376 emitPutCTIParam(X86::edi, CTI_ARGS_callFrame);
377 X86Assembler::JmpSrc call = m_jit.emitCall();
378 m_calls.append(CallRecord(call, helper, opcodeIndex));
379#if ENABLE(OPCODE_SAMPLING)
380 m_jit.movl_i32m(m_machine->sampler()->encodeSample(vPC, false), m_machine->sampler()->sampleSlot());
381#endif
382
383 return call;
384}
385
386ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCTICall(Instruction* vPC, unsigned opcodeIndex, CTIHelper_p helper)
387{
388#if ENABLE(OPCODE_SAMPLING)
389 m_jit.movl_i32m(m_machine->sampler()->encodeSample(vPC, true), m_machine->sampler()->sampleSlot());
390#else
391 UNUSED_PARAM(vPC);
392#endif
393 m_jit.emitRestoreArgumentReference();
394 emitPutCTIParam(X86::edi, CTI_ARGS_callFrame);
395 X86Assembler::JmpSrc call = m_jit.emitCall();
396 m_calls.append(CallRecord(call, helper, opcodeIndex));
397#if ENABLE(OPCODE_SAMPLING)
398 m_jit.movl_i32m(m_machine->sampler()->encodeSample(vPC, false), m_machine->sampler()->sampleSlot());
399#endif
400
401 return call;
402}
403
404ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCTICall(Instruction* vPC, unsigned opcodeIndex, CTIHelper_b helper)
405{
406#if ENABLE(OPCODE_SAMPLING)
407 m_jit.movl_i32m(m_machine->sampler()->encodeSample(vPC, true), m_machine->sampler()->sampleSlot());
408#else
409 UNUSED_PARAM(vPC);
410#endif
411 m_jit.emitRestoreArgumentReference();
412 emitPutCTIParam(X86::edi, CTI_ARGS_callFrame);
413 X86Assembler::JmpSrc call = m_jit.emitCall();
414 m_calls.append(CallRecord(call, helper, opcodeIndex));
415#if ENABLE(OPCODE_SAMPLING)
416 m_jit.movl_i32m(m_machine->sampler()->encodeSample(vPC, false), m_machine->sampler()->sampleSlot());
417#endif
418
419 return call;
420}
421
422ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCTICall(Instruction* vPC, unsigned opcodeIndex, CTIHelper_v helper)
423{
424#if ENABLE(OPCODE_SAMPLING)
425 m_jit.movl_i32m(m_machine->sampler()->encodeSample(vPC, true), m_machine->sampler()->sampleSlot());
426#else
427 UNUSED_PARAM(vPC);
428#endif
429 m_jit.emitRestoreArgumentReference();
430 emitPutCTIParam(X86::edi, CTI_ARGS_callFrame);
431 X86Assembler::JmpSrc call = m_jit.emitCall();
432 m_calls.append(CallRecord(call, helper, opcodeIndex));
433#if ENABLE(OPCODE_SAMPLING)
434 m_jit.movl_i32m(m_machine->sampler()->encodeSample(vPC, false), m_machine->sampler()->sampleSlot());
435#endif
436
437 return call;
438}
439
440ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCTICall(Instruction* vPC, unsigned opcodeIndex, CTIHelper_s helper)
441{
442#if ENABLE(OPCODE_SAMPLING)
443 m_jit.movl_i32m(m_machine->sampler()->encodeSample(vPC, true), m_machine->sampler()->sampleSlot());
444#else
445 UNUSED_PARAM(vPC);
446#endif
447 m_jit.emitRestoreArgumentReference();
448 emitPutCTIParam(X86::edi, CTI_ARGS_callFrame);
449 X86Assembler::JmpSrc call = m_jit.emitCall();
450 m_calls.append(CallRecord(call, helper, opcodeIndex));
451#if ENABLE(OPCODE_SAMPLING)
452 m_jit.movl_i32m(m_machine->sampler()->encodeSample(vPC, false), m_machine->sampler()->sampleSlot());
453#endif
454
455 return call;
456}
457
458ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCTICall(Instruction* vPC, unsigned opcodeIndex, CTIHelper_2 helper)
459{
460#if ENABLE(OPCODE_SAMPLING)
461 m_jit.movl_i32m(m_machine->sampler()->encodeSample(vPC, true), m_machine->sampler()->sampleSlot());
462#else
463 UNUSED_PARAM(vPC);
464#endif
465 m_jit.emitRestoreArgumentReference();
466 emitPutCTIParam(X86::edi, CTI_ARGS_callFrame);
467 X86Assembler::JmpSrc call = m_jit.emitCall();
468 m_calls.append(CallRecord(call, helper, opcodeIndex));
469#if ENABLE(OPCODE_SAMPLING)
470 m_jit.movl_i32m(m_machine->sampler()->encodeSample(vPC, false), m_machine->sampler()->sampleSlot());
471#endif
472
473 return call;
474}
475
476ALWAYS_INLINE void CTI::emitJumpSlowCaseIfNotJSCell(X86Assembler::RegisterID reg, unsigned opcodeIndex)
477{
478 m_jit.testl_i32r(JSImmediate::TagMask, reg);
479 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), opcodeIndex));
480}
481
482ALWAYS_INLINE void CTI::emitJumpSlowCaseIfNotImmNum(X86Assembler::RegisterID reg, unsigned opcodeIndex)
483{
484 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, reg);
485 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), opcodeIndex));
486}
487
488ALWAYS_INLINE void CTI::emitJumpSlowCaseIfNotImmNums(X86Assembler::RegisterID reg1, X86Assembler::RegisterID reg2, unsigned opcodeIndex)
489{
490 m_jit.movl_rr(reg1, X86::ecx);
491 m_jit.andl_rr(reg2, X86::ecx);
492 emitJumpSlowCaseIfNotImmNum(X86::ecx, opcodeIndex);
493}
494
495ALWAYS_INLINE unsigned CTI::getDeTaggedConstantImmediate(JSValue* imm)
496{
497 ASSERT(JSImmediate::isNumber(imm));
498 return asInteger(imm) & ~JSImmediate::TagBitTypeInteger;
499}
500
501ALWAYS_INLINE void CTI::emitFastArithDeTagImmediate(X86Assembler::RegisterID reg)
502{
503 m_jit.subl_i8r(JSImmediate::TagBitTypeInteger, reg);
504}
505
506ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitFastArithDeTagImmediateJumpIfZero(X86Assembler::RegisterID reg)
507{
508 m_jit.subl_i8r(JSImmediate::TagBitTypeInteger, reg);
509 return m_jit.emitUnlinkedJe();
510}
511
512ALWAYS_INLINE void CTI::emitFastArithReTagImmediate(X86Assembler::RegisterID reg)
513{
514 m_jit.addl_i8r(JSImmediate::TagBitTypeInteger, reg);
515}
516
517ALWAYS_INLINE void CTI::emitFastArithPotentiallyReTagImmediate(X86Assembler::RegisterID reg)
518{
519 m_jit.orl_i32r(JSImmediate::TagBitTypeInteger, reg);
520}
521
522ALWAYS_INLINE void CTI::emitFastArithImmToInt(X86Assembler::RegisterID reg)
523{
524 m_jit.sarl_i8r(1, reg);
525}
526
527ALWAYS_INLINE void CTI::emitFastArithIntToImmOrSlowCase(X86Assembler::RegisterID reg, unsigned opcodeIndex)
528{
529 m_jit.addl_rr(reg, reg);
530 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), opcodeIndex));
531 emitFastArithReTagImmediate(reg);
532}
533
534ALWAYS_INLINE void CTI::emitFastArithIntToImmNoCheck(X86Assembler::RegisterID reg)
535{
536 m_jit.addl_rr(reg, reg);
537 emitFastArithReTagImmediate(reg);
538}
539
540ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitArithIntToImmWithJump(X86Assembler::RegisterID reg)
541{
542 m_jit.addl_rr(reg, reg);
543 X86Assembler::JmpSrc jmp = m_jit.emitUnlinkedJo();
544 emitFastArithReTagImmediate(reg);
545 return jmp;
546}
547
548ALWAYS_INLINE void CTI::emitTagAsBoolImmediate(X86Assembler::RegisterID reg)
549{
550 m_jit.shl_i8r(JSImmediate::ExtendedPayloadShift, reg);
551 m_jit.orl_i32r(JSImmediate::FullTagTypeBool, reg);
552}
553
554CTI::CTI(Machine* machine, CallFrame* callFrame, CodeBlock* codeBlock)
555 : m_jit(machine->jitCodeBuffer())
556 , m_machine(machine)
557 , m_callFrame(callFrame)
558 , m_codeBlock(codeBlock)
559 , m_labels(codeBlock ? codeBlock->instructions.size() : 0)
560 , m_propertyAccessCompilationInfo(codeBlock ? codeBlock->propertyAccessInstructions.size() : 0)
561 , m_callStructureStubCompilationInfo(codeBlock ? codeBlock->callLinkInfos.size() : 0)
562{
563}
564
565#define CTI_COMPILE_BINARY_OP(name) \
566 case name: { \
567 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx); \
568 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx); \
569 emitCTICall(instruction + i, i, Machine::cti_##name); \
570 emitPutResult(instruction[i + 1].u.operand); \
571 i += 4; \
572 break; \
573 }
574
575#define CTI_COMPILE_UNARY_OP(name) \
576 case name: { \
577 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx); \
578 emitCTICall(instruction + i, i, Machine::cti_##name); \
579 emitPutResult(instruction[i + 1].u.operand); \
580 i += 3; \
581 break; \
582 }
583
584static void unreachable()
585{
586 ASSERT_NOT_REACHED();
587 exit(1);
588}
589
590void CTI::compileOpCallInitializeCallFrame(unsigned callee, unsigned argCount)
591{
592 emitGetArg(callee, X86::ecx); // Load callee JSFunction into ecx
593 m_jit.movl_rm(X86::eax, RegisterFile::CodeBlock * static_cast<int>(sizeof(Register)), X86::edx); // callee CodeBlock was returned in eax
594 m_jit.movl_i32m(asInteger(noValue()), RegisterFile::OptionalCalleeArguments * static_cast<int>(sizeof(Register)), X86::edx);
595 m_jit.movl_rm(X86::ecx, RegisterFile::Callee * static_cast<int>(sizeof(Register)), X86::edx);
596
597 m_jit.movl_mr(OBJECT_OFFSET(JSFunction, m_scopeChain) + OBJECT_OFFSET(ScopeChain, m_node), X86::ecx, X86::ebx); // newScopeChain
598 m_jit.movl_i32m(argCount, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register)), X86::edx);
599 m_jit.movl_rm(X86::edi, RegisterFile::CallerFrame * static_cast<int>(sizeof(Register)), X86::edx);
600 m_jit.movl_rm(X86::ebx, RegisterFile::ScopeChain * static_cast<int>(sizeof(Register)), X86::edx);
601}
602
603void CTI::compileOpCallSetupArgs(Instruction* instruction, bool isConstruct, bool isEval)
604{
605 int firstArg = instruction[4].u.operand;
606 int argCount = instruction[5].u.operand;
607 int registerOffset = instruction[6].u.operand;
608
609 emitPutArg(X86::ecx, 0);
610 emitPutArgConstant(registerOffset, 4);
611 emitPutArgConstant(argCount, 8);
612 emitPutArgConstant(reinterpret_cast<unsigned>(instruction), 12);
613 if (isConstruct) {
614 emitGetPutArg(instruction[3].u.operand, 16, X86::eax);
615 emitPutArgConstant(firstArg, 20);
616 } else if (isEval)
617 emitGetPutArg(instruction[3].u.operand, 16, X86::eax);
618}
619
620void CTI::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned i, unsigned callLinkInfoIndex)
621{
622 int dst = instruction[1].u.operand;
623 int callee = instruction[2].u.operand;
624 int firstArg = instruction[4].u.operand;
625 int argCount = instruction[5].u.operand;
626 int registerOffset = instruction[6].u.operand;
627
628 // Setup this value as the first argument (does not apply to constructors)
629 if (opcodeID != op_construct) {
630 int thisVal = instruction[3].u.operand;
631 if (thisVal == missingThisObjectMarker()) {
632 // FIXME: should this be loaded dynamically off m_callFrame?
633 m_jit.movl_i32m(asInteger(m_callFrame->globalThisValue()), firstArg * sizeof(Register), X86::edi);
634 } else {
635 emitGetArg(thisVal, X86::eax);
636 emitPutResult(firstArg);
637 }
638 }
639
640 // Handle eval
641 X86Assembler::JmpSrc wasEval;
642 if (opcodeID == op_call_eval) {
643 emitGetArg(callee, X86::ecx);
644 compileOpCallSetupArgs(instruction, false, true);
645
646 emitCTICall(instruction, i, Machine::cti_op_call_eval);
647 m_jit.cmpl_i32r(asInteger(JSImmediate::impossibleValue()), X86::eax);
648 wasEval = m_jit.emitUnlinkedJne();
649 }
650
651 // This plants a check for a cached JSFunction value, so we can plant a fast link to the callee.
652 // This deliberately leaves the callee in ecx, used when setting up the stack frame below
653 emitGetArg(callee, X86::ecx);
654 m_jit.cmpl_i32r(asInteger(JSImmediate::impossibleValue()), X86::ecx);
655 X86Assembler::JmpDst addressOfLinkedFunctionCheck = m_jit.label();
656 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
657 ASSERT(X86Assembler::getDifferenceBetweenLabels(addressOfLinkedFunctionCheck, m_jit.label()) == repatchOffsetOpCallCall);
658 m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
659
660 // The following is the fast case, only used whan a callee can be linked.
661
662 // In the case of OpConstruct, call oout to a cti_ function to create the new object.
663 if (opcodeID == op_construct) {
664 emitPutArg(X86::ecx, 0);
665 emitGetPutArg(instruction[3].u.operand, 4, X86::eax);
666 emitCTICall(instruction, i, Machine::cti_op_construct_JSConstructFast);
667 emitPutResult(instruction[4].u.operand);
668 emitGetArg(callee, X86::ecx);
669 }
670
671 // Fast version of stack frame initialization, directly relative to edi.
672 // Note that this omits to set up RegisterFile::CodeBlock, which is set in the callee
673 m_jit.movl_i32m(asInteger(noValue()), (registerOffset + RegisterFile::OptionalCalleeArguments) * static_cast<int>(sizeof(Register)), X86::edi);
674 m_jit.movl_rm(X86::ecx, (registerOffset + RegisterFile::Callee) * static_cast<int>(sizeof(Register)), X86::edi);
675 m_jit.movl_mr(OBJECT_OFFSET(JSFunction, m_scopeChain) + OBJECT_OFFSET(ScopeChain, m_node), X86::ecx, X86::edx); // newScopeChain
676 m_jit.movl_i32m(argCount, (registerOffset + RegisterFile::ArgumentCount) * static_cast<int>(sizeof(Register)), X86::edi);
677 m_jit.movl_rm(X86::edi, (registerOffset + RegisterFile::CallerFrame) * static_cast<int>(sizeof(Register)), X86::edi);
678 m_jit.movl_rm(X86::edx, (registerOffset + RegisterFile::ScopeChain) * static_cast<int>(sizeof(Register)), X86::edi);
679 m_jit.addl_i32r(registerOffset * sizeof(Register), X86::edi);
680
681 // Call to the callee
682 m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall(i, unreachable);
683
684 if (opcodeID == op_call_eval)
685 m_jit.link(wasEval, m_jit.label());
686
687 // Put the return value in dst. In the interpreter, op_ret does this.
688 emitPutResult(dst);
689
690#if ENABLE(CODEBLOCK_SAMPLING)
691 m_jit.movl_i32m(reinterpret_cast<unsigned>(m_codeBlock), m_machine->sampler()->codeBlockSlot());
692#endif
693}
694
695void CTI::compileOpStrictEq(Instruction* instruction, unsigned i, CompileOpStrictEqType type)
696{
697 bool negated = (type == OpNStrictEq);
698
699 unsigned dst = instruction[1].u.operand;
700 unsigned src1 = instruction[2].u.operand;
701 unsigned src2 = instruction[3].u.operand;
702
703 emitGetArg(src1, X86::eax);
704 emitGetArg(src2, X86::edx);
705
706 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
707 X86Assembler::JmpSrc firstNotImmediate = m_jit.emitUnlinkedJe();
708 m_jit.testl_i32r(JSImmediate::TagMask, X86::edx);
709 X86Assembler::JmpSrc secondNotImmediate = m_jit.emitUnlinkedJe();
710
711 m_jit.cmpl_rr(X86::edx, X86::eax);
712 if (negated)
713 m_jit.setne_r(X86::eax);
714 else
715 m_jit.sete_r(X86::eax);
716 m_jit.movzbl_rr(X86::eax, X86::eax);
717 emitTagAsBoolImmediate(X86::eax);
718
719 X86Assembler::JmpSrc bothWereImmediates = m_jit.emitUnlinkedJmp();
720
721 m_jit.link(firstNotImmediate, m_jit.label());
722
723 // check that edx is immediate but not the zero immediate
724 m_jit.testl_i32r(JSImmediate::TagMask, X86::edx);
725 m_jit.setz_r(X86::ecx);
726 m_jit.movzbl_rr(X86::ecx, X86::ecx); // ecx is now 1 if edx was nonimmediate
727 m_jit.cmpl_i32r(asInteger(JSImmediate::zeroImmediate()), X86::edx);
728 m_jit.sete_r(X86::edx);
729 m_jit.movzbl_rr(X86::edx, X86::edx); // edx is now 1 if edx was the 0 immediate
730 m_jit.orl_rr(X86::ecx, X86::edx);
731
732 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJnz(), i));
733
734 m_jit.movl_i32r(asInteger(jsBoolean(negated)), X86::eax);
735
736 X86Assembler::JmpSrc firstWasNotImmediate = m_jit.emitUnlinkedJmp();
737
738 m_jit.link(secondNotImmediate, m_jit.label());
739 // check that eax is not the zero immediate (we know it must be immediate)
740 m_jit.cmpl_i32r(asInteger(JSImmediate::zeroImmediate()), X86::eax);
741 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i));
742
743 m_jit.movl_i32r(asInteger(jsBoolean(negated)), X86::eax);
744
745 m_jit.link(bothWereImmediates, m_jit.label());
746 m_jit.link(firstWasNotImmediate, m_jit.label());
747
748 emitPutResult(dst);
749}
750
751void CTI::emitSlowScriptCheck(Instruction* vPC, unsigned opcodeIndex)
752{
753 m_jit.subl_i8r(1, X86::esi);
754 X86Assembler::JmpSrc skipTimeout = m_jit.emitUnlinkedJne();
755 emitCTICall(vPC, opcodeIndex, Machine::cti_timeout_check);
756
757 emitGetCTIParam(CTI_ARGS_globalData, X86::ecx);
758 m_jit.movl_mr(OBJECT_OFFSET(JSGlobalData, machine), X86::ecx, X86::ecx);
759 m_jit.movl_mr(OBJECT_OFFSET(Machine, m_ticksUntilNextTimeoutCheck), X86::ecx, X86::esi);
760 m_jit.link(skipTimeout, m_jit.label());
761}
762
763/*
764 This is required since number representation is canonical - values representable as a JSImmediate should not be stored in a JSNumberCell.
765
766 In the common case, the double value from 'xmmSource' is written to the reusable JSNumberCell pointed to by 'jsNumberCell', then 'jsNumberCell'
767 is written to the output SF Register 'dst', and then a jump is planted (stored into *wroteJSNumberCell).
768
769 However if the value from xmmSource is representable as a JSImmediate, then the JSImmediate value will be written to the output, and flow
770 control will fall through from the code planted.
771*/
772void CTI::putDoubleResultToJSNumberCellOrJSImmediate(X86::XMMRegisterID xmmSource, X86::RegisterID jsNumberCell, unsigned dst, X86Assembler::JmpSrc* wroteJSNumberCell, X86::XMMRegisterID tempXmm, X86::RegisterID tempReg1, X86::RegisterID tempReg2)
773{
774 // convert (double -> JSImmediate -> double), and check if the value is unchanged - in which case the value is representable as a JSImmediate.
775 m_jit.cvttsd2si_rr(xmmSource, tempReg1);
776 m_jit.addl_rr(tempReg1, tempReg1);
777 m_jit.sarl_i8r(1, tempReg1);
778 m_jit.cvtsi2sd_rr(tempReg1, tempXmm);
779 // Compare & branch if immediate.
780 m_jit.ucomis_rr(tempXmm, xmmSource);
781 X86Assembler::JmpSrc resultIsImm = m_jit.emitUnlinkedJe();
782 X86Assembler::JmpDst resultLookedLikeImmButActuallyIsnt = m_jit.label();
783
784 // Store the result to the JSNumberCell and jump.
785 m_jit.movsd_rm(xmmSource, OBJECT_OFFSET(JSNumberCell, m_value), jsNumberCell);
786 emitPutResult(dst, jsNumberCell);
787 *wroteJSNumberCell = m_jit.emitUnlinkedJmp();
788
789 m_jit.link(resultIsImm, m_jit.label());
790 // value == (double)(JSImmediate)value... or at least, it looks that way...
791 // ucomi will report that (0 == -0), and will report true if either input in NaN (result is unordered).
792 m_jit.link(m_jit.emitUnlinkedJp(), resultLookedLikeImmButActuallyIsnt); // Actually was a NaN
793 m_jit.pextrw_irr(3, xmmSource, tempReg2);
794 m_jit.cmpl_i32r(0x8000, tempReg2);
795 m_jit.link(m_jit.emitUnlinkedJe(), resultLookedLikeImmButActuallyIsnt); // Actually was -0
796 // Yes it really really really is representable as a JSImmediate.
797 emitFastArithIntToImmNoCheck(tempReg1);
798 emitPutResult(dst, tempReg1);
799}
800
801void CTI::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes types, unsigned i)
802{
803 StructureID* numberStructureID = m_callFrame->globalData().numberStructureID.get();
804 X86Assembler::JmpSrc wasJSNumberCell1, wasJSNumberCell1b, wasJSNumberCell2, wasJSNumberCell2b;
805
806 emitGetArg(src1, X86::eax);
807 emitGetArg(src2, X86::edx);
808
809 if (types.second().isReusable() && isSSE2Present()) {
810 ASSERT(types.second().mightBeNumber());
811
812 // Check op2 is a number
813 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::edx);
814 X86Assembler::JmpSrc op2imm = m_jit.emitUnlinkedJne();
815 if (!types.second().definitelyIsNumber()) {
816 emitJumpSlowCaseIfNotJSCell(X86::edx, i);
817 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(numberStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::edx);
818 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
819 }
820
821 // (1) In this case src2 is a reusable number cell.
822 // Slow case if src1 is not a number type.
823 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
824 X86Assembler::JmpSrc op1imm = m_jit.emitUnlinkedJne();
825 if (!types.first().definitelyIsNumber()) {
826 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
827 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(numberStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
828 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
829 }
830
831 // (1a) if we get here, src1 is also a number cell
832 m_jit.movsd_mr(OBJECT_OFFSET(JSNumberCell, m_value), X86::eax, X86::xmm0);
833 X86Assembler::JmpSrc loadedDouble = m_jit.emitUnlinkedJmp();
834 // (1b) if we get here, src1 is an immediate
835 m_jit.link(op1imm, m_jit.label());
836 emitFastArithImmToInt(X86::eax);
837 m_jit.cvtsi2sd_rr(X86::eax, X86::xmm0);
838 // (1c)
839 m_jit.link(loadedDouble, m_jit.label());
840 if (opcodeID == op_add)
841 m_jit.addsd_mr(OBJECT_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
842 else if (opcodeID == op_sub)
843 m_jit.subsd_mr(OBJECT_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
844 else {
845 ASSERT(opcodeID == op_mul);
846 m_jit.mulsd_mr(OBJECT_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
847 }
848
849 putDoubleResultToJSNumberCellOrJSImmediate(X86::xmm0, X86::edx, dst, &wasJSNumberCell2, X86::xmm1, X86::ecx, X86::eax);
850 wasJSNumberCell2b = m_jit.emitUnlinkedJmp();
851
852 // (2) This handles cases where src2 is an immediate number.
853 // Two slow cases - either src1 isn't an immediate, or the subtract overflows.
854 m_jit.link(op2imm, m_jit.label());
855 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
856 } else if (types.first().isReusable() && isSSE2Present()) {
857 ASSERT(types.first().mightBeNumber());
858
859 // Check op1 is a number
860 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
861 X86Assembler::JmpSrc op1imm = m_jit.emitUnlinkedJne();
862 if (!types.first().definitelyIsNumber()) {
863 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
864 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(numberStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
865 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
866 }
867
868 // (1) In this case src1 is a reusable number cell.
869 // Slow case if src2 is not a number type.
870 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::edx);
871 X86Assembler::JmpSrc op2imm = m_jit.emitUnlinkedJne();
872 if (!types.second().definitelyIsNumber()) {
873 emitJumpSlowCaseIfNotJSCell(X86::edx, i);
874 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(numberStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::edx);
875 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
876 }
877
878 // (1a) if we get here, src2 is also a number cell
879 m_jit.movsd_mr(OBJECT_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm1);
880 X86Assembler::JmpSrc loadedDouble = m_jit.emitUnlinkedJmp();
881 // (1b) if we get here, src2 is an immediate
882 m_jit.link(op2imm, m_jit.label());
883 emitFastArithImmToInt(X86::edx);
884 m_jit.cvtsi2sd_rr(X86::edx, X86::xmm1);
885 // (1c)
886 m_jit.link(loadedDouble, m_jit.label());
887 m_jit.movsd_mr(OBJECT_OFFSET(JSNumberCell, m_value), X86::eax, X86::xmm0);
888 if (opcodeID == op_add)
889 m_jit.addsd_rr(X86::xmm1, X86::xmm0);
890 else if (opcodeID == op_sub)
891 m_jit.subsd_rr(X86::xmm1, X86::xmm0);
892 else {
893 ASSERT(opcodeID == op_mul);
894 m_jit.mulsd_rr(X86::xmm1, X86::xmm0);
895 }
896 m_jit.movsd_rm(X86::xmm0, OBJECT_OFFSET(JSNumberCell, m_value), X86::eax);
897 emitPutResult(dst);
898
899 putDoubleResultToJSNumberCellOrJSImmediate(X86::xmm0, X86::eax, dst, &wasJSNumberCell1, X86::xmm1, X86::ecx, X86::edx);
900 wasJSNumberCell1b = m_jit.emitUnlinkedJmp();
901
902 // (2) This handles cases where src1 is an immediate number.
903 // Two slow cases - either src2 isn't an immediate, or the subtract overflows.
904 m_jit.link(op1imm, m_jit.label());
905 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
906 } else
907 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
908
909 if (opcodeID == op_add) {
910 emitFastArithDeTagImmediate(X86::eax);
911 m_jit.addl_rr(X86::edx, X86::eax);
912 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
913 } else if (opcodeID == op_sub) {
914 m_jit.subl_rr(X86::edx, X86::eax);
915 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
916 emitFastArithReTagImmediate(X86::eax);
917 } else {
918 ASSERT(opcodeID == op_mul);
919 // convert eax & edx from JSImmediates to ints, and check if either are zero
920 emitFastArithImmToInt(X86::edx);
921 X86Assembler::JmpSrc op1Zero = emitFastArithDeTagImmediateJumpIfZero(X86::eax);
922 m_jit.testl_rr(X86::edx, X86::edx);
923 X86Assembler::JmpSrc op2NonZero = m_jit.emitUnlinkedJne();
924 m_jit.link(op1Zero, m_jit.label());
925 // if either input is zero, add the two together, and check if the result is < 0.
926 // If it is, we have a problem (N < 0), (N * 0) == -0, not representatble as a JSImmediate.
927 m_jit.movl_rr(X86::eax, X86::ecx);
928 m_jit.addl_rr(X86::edx, X86::ecx);
929 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJs(), i));
930 // Skip the above check if neither input is zero
931 m_jit.link(op2NonZero, m_jit.label());
932 m_jit.imull_rr(X86::edx, X86::eax);
933 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
934 emitFastArithReTagImmediate(X86::eax);
935 }
936 emitPutResult(dst);
937
938 if (types.second().isReusable() && isSSE2Present()) {
939 m_jit.link(wasJSNumberCell2, m_jit.label());
940 m_jit.link(wasJSNumberCell2b, m_jit.label());
941 }
942 else if (types.first().isReusable() && isSSE2Present()) {
943 m_jit.link(wasJSNumberCell1, m_jit.label());
944 m_jit.link(wasJSNumberCell1b, m_jit.label());
945 }
946}
947
948void CTI::compileBinaryArithOpSlowCase(Instruction* vPC, OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned dst, unsigned src1, unsigned src2, OperandTypes types, unsigned i)
949{
950 X86Assembler::JmpDst here = m_jit.label();
951 m_jit.link(iter->from, here);
952 if (types.second().isReusable() && isSSE2Present()) {
953 if (!types.first().definitelyIsNumber()) {
954 m_jit.link((++iter)->from, here);
955 m_jit.link((++iter)->from, here);
956 }
957 if (!types.second().definitelyIsNumber()) {
958 m_jit.link((++iter)->from, here);
959 m_jit.link((++iter)->from, here);
960 }
961 m_jit.link((++iter)->from, here);
962 } else if (types.first().isReusable() && isSSE2Present()) {
963 if (!types.first().definitelyIsNumber()) {
964 m_jit.link((++iter)->from, here);
965 m_jit.link((++iter)->from, here);
966 }
967 if (!types.second().definitelyIsNumber()) {
968 m_jit.link((++iter)->from, here);
969 m_jit.link((++iter)->from, here);
970 }
971 m_jit.link((++iter)->from, here);
972 } else
973 m_jit.link((++iter)->from, here);
974
975 // additional entry point to handle -0 cases.
976 if (opcodeID == op_mul)
977 m_jit.link((++iter)->from, here);
978
979 emitGetPutArg(src1, 0, X86::ecx);
980 emitGetPutArg(src2, 4, X86::ecx);
981 if (opcodeID == op_add)
982 emitCTICall(vPC, i, Machine::cti_op_add);
983 else if (opcodeID == op_sub)
984 emitCTICall(vPC, i, Machine::cti_op_sub);
985 else {
986 ASSERT(opcodeID == op_mul);
987 emitCTICall(vPC, i, Machine::cti_op_mul);
988 }
989 emitPutResult(dst);
990}
991
992void CTI::privateCompileMainPass()
993{
994 Instruction* instruction = m_codeBlock->instructions.begin();
995 unsigned instructionCount = m_codeBlock->instructions.size();
996
997 unsigned propertyAccessInstructionIndex = 0;
998 unsigned callLinkInfoIndex = 0;
999
1000 for (unsigned i = 0; i < instructionCount; ) {
1001 ASSERT_WITH_MESSAGE(m_machine->isOpcode(instruction[i].u.opcode), "privateCompileMainPass gone bad @ %d", i);
1002
1003#if ENABLE(OPCODE_SAMPLING)
1004 if (i > 0) // Avoid the overhead of sampling op_enter twice.
1005 m_jit.movl_i32m(m_machine->sampler()->encodeSample(instruction + i), m_machine->sampler()->sampleSlot());
1006#endif
1007
1008 m_labels[i] = m_jit.label();
1009 OpcodeID opcodeID = m_machine->getOpcodeID(instruction[i].u.opcode);
1010 switch (opcodeID) {
1011 case op_mov: {
1012 unsigned src = instruction[i + 2].u.operand;
1013 if (isConstant(src))
1014 m_jit.movl_i32r(asInteger(getConstant(m_callFrame, src)), X86::eax);
1015 else
1016 emitGetArg(src, X86::eax);
1017 emitPutResult(instruction[i + 1].u.operand);
1018 i += 3;
1019 break;
1020 }
1021 case op_add: {
1022 unsigned dst = instruction[i + 1].u.operand;
1023 unsigned src1 = instruction[i + 2].u.operand;
1024 unsigned src2 = instruction[i + 3].u.operand;
1025
1026 if (JSValue* value = getConstantImmediateNumericArg(src1)) {
1027 emitGetArg(src2, X86::edx);
1028 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1029 m_jit.addl_i32r(getDeTaggedConstantImmediate(value), X86::edx);
1030 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1031 emitPutResult(dst, X86::edx);
1032 } else if (JSValue* value = getConstantImmediateNumericArg(src2)) {
1033 emitGetArg(src1, X86::eax);
1034 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1035 m_jit.addl_i32r(getDeTaggedConstantImmediate(value), X86::eax);
1036 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1037 emitPutResult(dst);
1038 } else {
1039 OperandTypes types = OperandTypes::fromInt(instruction[i + 4].u.operand);
1040 if (types.first().mightBeNumber() && types.second().mightBeNumber())
1041 compileBinaryArithOp(op_add, instruction[i + 1].u.operand, instruction[i + 2].u.operand, instruction[i + 3].u.operand, OperandTypes::fromInt(instruction[i + 4].u.operand), i);
1042 else {
1043 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1044 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
1045 emitCTICall(instruction + i, i, Machine::cti_op_add);
1046 emitPutResult(instruction[i + 1].u.operand);
1047 }
1048 }
1049
1050 i += 5;
1051 break;
1052 }
1053 case op_end: {
1054 if (m_codeBlock->needsFullScopeChain)
1055 emitCTICall(instruction + i, i, Machine::cti_op_end);
1056 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1057 m_jit.pushl_m(RegisterFile::ReturnPC * static_cast<int>(sizeof(Register)), X86::edi);
1058 m_jit.ret();
1059 i += 2;
1060 break;
1061 }
1062 case op_jmp: {
1063 unsigned target = instruction[i + 1].u.operand;
1064 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 1 + target));
1065 i += 2;
1066 break;
1067 }
1068 case op_pre_inc: {
1069 int srcDst = instruction[i + 1].u.operand;
1070 emitGetArg(srcDst, X86::eax);
1071 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1072 m_jit.addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
1073 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1074 emitPutResult(srcDst);
1075 i += 2;
1076 break;
1077 }
1078 case op_loop: {
1079 emitSlowScriptCheck(instruction + i, i);
1080
1081 unsigned target = instruction[i + 1].u.operand;
1082 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 1 + target));
1083 i += 2;
1084 break;
1085 }
1086 case op_loop_if_less: {
1087 emitSlowScriptCheck(instruction + i, i);
1088
1089 unsigned target = instruction[i + 3].u.operand;
1090 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
1091 if (src2imm) {
1092 emitGetArg(instruction[i + 1].u.operand, X86::edx);
1093 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1094 m_jit.cmpl_i32r(asInteger(src2imm), X86::edx);
1095 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJl(), i + 3 + target));
1096 } else {
1097 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1098 emitGetArg(instruction[i + 2].u.operand, X86::edx);
1099 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1100 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1101 m_jit.cmpl_rr(X86::edx, X86::eax);
1102 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJl(), i + 3 + target));
1103 }
1104 i += 4;
1105 break;
1106 }
1107 case op_loop_if_lesseq: {
1108 emitSlowScriptCheck(instruction + i, i);
1109
1110 unsigned target = instruction[i + 3].u.operand;
1111 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
1112 if (src2imm) {
1113 emitGetArg(instruction[i + 1].u.operand, X86::edx);
1114 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1115 m_jit.cmpl_i32r(asInteger(src2imm), X86::edx);
1116 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJle(), i + 3 + target));
1117 } else {
1118 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1119 emitGetArg(instruction[i + 2].u.operand, X86::edx);
1120 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1121 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1122 m_jit.cmpl_rr(X86::edx, X86::eax);
1123 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJle(), i + 3 + target));
1124 }
1125 i += 4;
1126 break;
1127 }
1128 case op_new_object: {
1129 emitCTICall(instruction + i, i, Machine::cti_op_new_object);
1130 emitPutResult(instruction[i + 1].u.operand);
1131 i += 2;
1132 break;
1133 }
1134 case op_put_by_id: {
1135 // In order to be able to repatch both the StructureID, and the object offset, we store one pointer,
1136 // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
1137 // such that the StructureID & offset are always at the same distance from this.
1138
1139 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1140 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1141
1142 ASSERT(m_codeBlock->propertyAccessInstructions[propertyAccessInstructionIndex].opcodeIndex == i);
1143 X86Assembler::JmpDst hotPathBegin = m_jit.label();
1144 m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
1145 ++propertyAccessInstructionIndex;
1146
1147 // Jump to a slow case if either the base object is an immediate, or if the StructureID does not match.
1148 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
1149 // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
1150 m_jit.cmpl_i32m(repatchGetByIdDefaultStructureID, OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
1151 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetPutByIdStructureID);
1152 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1153
1154 // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
1155 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
1156 m_jit.movl_rm(X86::edx, repatchGetByIdDefaultOffset, X86::eax);
1157 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetPutByIdPropertyMapOffset);
1158
1159 i += 8;
1160 break;
1161 }
1162 case op_get_by_id: {
1163 // As for put_by_id, get_by_id requires the offset of the StructureID and the offset of the access to be repatched.
1164 // Additionally, for get_by_id we need repatch the offset of the branch to the slow case (we repatch this to jump
1165 // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
1166 // to jump back to if one of these trampolies finds a match.
1167
1168 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1169
1170 ASSERT(m_codeBlock->propertyAccessInstructions[propertyAccessInstructionIndex].opcodeIndex == i);
1171
1172 X86Assembler::JmpDst hotPathBegin = m_jit.label();
1173 m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
1174 ++propertyAccessInstructionIndex;
1175
1176 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
1177 m_jit.cmpl_i32m(repatchGetByIdDefaultStructureID, OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
1178 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdStructureID);
1179 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1180 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdBranchToSlowCase);
1181
1182 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
1183 m_jit.movl_mr(repatchGetByIdDefaultOffset, X86::eax, X86::ecx);
1184 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdPropertyMapOffset);
1185 emitPutResult(instruction[i + 1].u.operand, X86::ecx);
1186
1187 i += 8;
1188 break;
1189 }
1190 case op_instanceof: {
1191 emitGetArg(instruction[i + 2].u.operand, X86::eax); // value
1192 emitGetArg(instruction[i + 3].u.operand, X86::ecx); // baseVal
1193 emitGetArg(instruction[i + 4].u.operand, X86::edx); // proto
1194
1195 // check if any are immediates
1196 m_jit.orl_rr(X86::eax, X86::ecx);
1197 m_jit.orl_rr(X86::edx, X86::ecx);
1198 m_jit.testl_i32r(JSImmediate::TagMask, X86::ecx);
1199
1200 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJnz(), i));
1201
1202 // check that all are object type - this is a bit of a bithack to avoid excess branching;
1203 // we check that the sum of the three type codes from StructureIDs is exactly 3 * ObjectType,
1204 // this works because NumberType and StringType are smaller
1205 m_jit.movl_i32r(3 * ObjectType, X86::ecx);
1206 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::eax);
1207 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::edx, X86::edx);
1208 m_jit.subl_mr(OBJECT_OFFSET(StructureID, m_typeInfo.m_type), X86::eax, X86::ecx);
1209 m_jit.subl_mr(OBJECT_OFFSET(StructureID, m_typeInfo.m_type), X86::edx, X86::ecx);
1210 emitGetArg(instruction[i + 3].u.operand, X86::edx); // reload baseVal
1211 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::edx, X86::edx);
1212 m_jit.cmpl_rm(X86::ecx, OBJECT_OFFSET(StructureID, m_typeInfo.m_type), X86::edx);
1213
1214 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1215
1216 // check that baseVal's flags include ImplementsHasInstance but not OverridesHasInstance
1217 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_typeInfo.m_flags), X86::edx, X86::ecx);
1218 m_jit.andl_i32r(ImplementsHasInstance | OverridesHasInstance, X86::ecx);
1219 m_jit.cmpl_i32r(ImplementsHasInstance, X86::ecx);
1220
1221 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1222
1223 emitGetArg(instruction[i + 2].u.operand, X86::ecx); // reload value
1224 emitGetArg(instruction[i + 4].u.operand, X86::edx); // reload proto
1225
1226 // optimistically load true result
1227 m_jit.movl_i32r(asInteger(jsBoolean(true)), X86::eax);
1228
1229 X86Assembler::JmpDst loop = m_jit.label();
1230
1231 // load value's prototype
1232 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::ecx, X86::ecx);
1233 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_prototype), X86::ecx, X86::ecx);
1234
1235 m_jit.cmpl_rr(X86::ecx, X86::edx);
1236 X86Assembler::JmpSrc exit = m_jit.emitUnlinkedJe();
1237
1238 m_jit.cmpl_i32r(asInteger(jsNull()), X86::ecx);
1239 X86Assembler::JmpSrc goToLoop = m_jit.emitUnlinkedJne();
1240 m_jit.link(goToLoop, loop);
1241
1242 m_jit.movl_i32r(asInteger(jsBoolean(false)), X86::eax);
1243
1244 m_jit.link(exit, m_jit.label());
1245
1246 emitPutResult(instruction[i + 1].u.operand);
1247
1248 i += 5;
1249 break;
1250 }
1251 case op_del_by_id: {
1252 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1253 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1254 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1255 emitCTICall(instruction + i, i, Machine::cti_op_del_by_id);
1256 emitPutResult(instruction[i + 1].u.operand);
1257 i += 4;
1258 break;
1259 }
1260 case op_mul: {
1261 unsigned dst = instruction[i + 1].u.operand;
1262 unsigned src1 = instruction[i + 2].u.operand;
1263 unsigned src2 = instruction[i + 3].u.operand;
1264
1265 // For now, only plant a fast int case if the constant operand is greater than zero.
1266 JSValue* src1Value = getConstantImmediateNumericArg(src1);
1267 JSValue* src2Value = getConstantImmediateNumericArg(src2);
1268 int32_t value;
1269 if (src1Value && ((value = JSImmediate::intValue(src1Value)) > 0)) {
1270 emitGetArg(src2, X86::eax);
1271 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1272 emitFastArithDeTagImmediate(X86::eax);
1273 m_jit.imull_i32r(X86::eax, value, X86::eax);
1274 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1275 emitFastArithReTagImmediate(X86::eax);
1276 emitPutResult(dst);
1277 } else if (src2Value && ((value = JSImmediate::intValue(src2Value)) > 0)) {
1278 emitGetArg(src1, X86::eax);
1279 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1280 emitFastArithDeTagImmediate(X86::eax);
1281 m_jit.imull_i32r(X86::eax, value, X86::eax);
1282 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1283 emitFastArithReTagImmediate(X86::eax);
1284 emitPutResult(dst);
1285 } else
1286 compileBinaryArithOp(op_mul, instruction[i + 1].u.operand, instruction[i + 2].u.operand, instruction[i + 3].u.operand, OperandTypes::fromInt(instruction[i + 4].u.operand), i);
1287
1288 i += 5;
1289 break;
1290 }
1291 case op_new_func: {
1292 FuncDeclNode* func = (m_codeBlock->functions[instruction[i + 2].u.operand]).get();
1293 emitPutArgConstant(reinterpret_cast<unsigned>(func), 0);
1294 emitCTICall(instruction + i, i, Machine::cti_op_new_func);
1295 emitPutResult(instruction[i + 1].u.operand);
1296 i += 3;
1297 break;
1298 }
1299 case op_call: {
1300 compileOpCall(opcodeID, instruction + i, i, callLinkInfoIndex++);
1301 i += 7;
1302 break;
1303 }
1304 case op_get_global_var: {
1305 JSVariableObject* globalObject = static_cast<JSVariableObject*>(instruction[i + 2].u.jsCell);
1306 m_jit.movl_i32r(asInteger(globalObject), X86::eax);
1307 emitGetVariableObjectRegister(X86::eax, instruction[i + 3].u.operand, X86::eax);
1308 emitPutResult(instruction[i + 1].u.operand);
1309 i += 4;
1310 break;
1311 }
1312 case op_put_global_var: {
1313 JSVariableObject* globalObject = static_cast<JSVariableObject*>(instruction[i + 1].u.jsCell);
1314 m_jit.movl_i32r(asInteger(globalObject), X86::eax);
1315 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1316 emitPutVariableObjectRegister(X86::edx, X86::eax, instruction[i + 2].u.operand);
1317 i += 4;
1318 break;
1319 }
1320 case op_get_scoped_var: {
1321 int skip = instruction[i + 3].u.operand + m_codeBlock->needsFullScopeChain;
1322
1323 emitGetArg(RegisterFile::ScopeChain, X86::eax);
1324 while (skip--)
1325 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, next), X86::eax, X86::eax);
1326
1327 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, object), X86::eax, X86::eax);
1328 emitGetVariableObjectRegister(X86::eax, instruction[i + 2].u.operand, X86::eax);
1329 emitPutResult(instruction[i + 1].u.operand);
1330 i += 4;
1331 break;
1332 }
1333 case op_put_scoped_var: {
1334 int skip = instruction[i + 2].u.operand + m_codeBlock->needsFullScopeChain;
1335
1336 emitGetArg(RegisterFile::ScopeChain, X86::edx);
1337 emitGetArg(instruction[i + 3].u.operand, X86::eax);
1338 while (skip--)
1339 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, next), X86::edx, X86::edx);
1340
1341 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, object), X86::edx, X86::edx);
1342 emitPutVariableObjectRegister(X86::eax, X86::edx, instruction[i + 1].u.operand);
1343 i += 4;
1344 break;
1345 }
1346 case op_tear_off_activation: {
1347 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1348 emitCTICall(instruction + i, i, Machine::cti_op_tear_off_activation);
1349 i += 2;
1350 break;
1351 }
1352 case op_tear_off_arguments: {
1353 emitCTICall(instruction + i, i, Machine::cti_op_tear_off_arguments);
1354 i += 1;
1355 break;
1356 }
1357 case op_ret: {
1358 // We could JIT generate the deref, only calling out to C when the refcount hits zero.
1359 if (m_codeBlock->needsFullScopeChain)
1360 emitCTICall(instruction + i, i, Machine::cti_op_ret_scopeChain);
1361
1362 // Return the result in %eax.
1363 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1364
1365 // Grab the return address.
1366 emitGetArg(RegisterFile::ReturnPC, X86::edx);
1367
1368 // Restore our caller's "r".
1369 emitGetArg(RegisterFile::CallerFrame, X86::edi);
1370
1371 // Return.
1372 m_jit.pushl_r(X86::edx);
1373 m_jit.ret();
1374
1375 i += 2;
1376 break;
1377 }
1378 case op_new_array: {
1379 m_jit.leal_mr(sizeof(Register) * instruction[i + 2].u.operand, X86::edi, X86::edx);
1380 emitPutArg(X86::edx, 0);
1381 emitPutArgConstant(instruction[i + 3].u.operand, 4);
1382 emitCTICall(instruction + i, i, Machine::cti_op_new_array);
1383 emitPutResult(instruction[i + 1].u.operand);
1384 i += 4;
1385 break;
1386 }
1387 case op_resolve: {
1388 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1389 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1390 emitCTICall(instruction + i, i, Machine::cti_op_resolve);
1391 emitPutResult(instruction[i + 1].u.operand);
1392 i += 3;
1393 break;
1394 }
1395 case op_construct: {
1396 compileOpCall(opcodeID, instruction + i, i, callLinkInfoIndex++);
1397 i += 7;
1398 break;
1399 }
1400 case op_construct_verify: {
1401 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1402
1403 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1404 X86Assembler::JmpSrc isImmediate = m_jit.emitUnlinkedJne();
1405 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
1406 m_jit.cmpl_i32m(ObjectType, OBJECT_OFFSET(StructureID, m_typeInfo) + OBJECT_OFFSET(TypeInfo, m_type), X86::ecx);
1407 X86Assembler::JmpSrc isObject = m_jit.emitUnlinkedJe();
1408
1409 m_jit.link(isImmediate, m_jit.label());
1410 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1411 emitPutResult(instruction[i + 1].u.operand);
1412 m_jit.link(isObject, m_jit.label());
1413
1414 i += 3;
1415 break;
1416 }
1417 case op_get_by_val: {
1418 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1419 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1420 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1421 emitFastArithImmToInt(X86::edx);
1422 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1423 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1424 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
1425 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1426
1427 // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
1428 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
1429 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSArray, m_fastAccessCutoff), X86::eax);
1430 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJbe(), i));
1431
1432 // Get the value from the vector
1433 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*), X86::eax);
1434 emitPutResult(instruction[i + 1].u.operand);
1435 i += 4;
1436 break;
1437 }
1438 case op_resolve_func: {
1439 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1440 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1441 emitCTICall(instruction + i, i, Machine::cti_op_resolve_func);
1442 emitPutResult(instruction[i + 1].u.operand);
1443 emitPutResult(instruction[i + 2].u.operand, X86::edx);
1444 i += 4;
1445 break;
1446 }
1447 case op_sub: {
1448 compileBinaryArithOp(op_sub, instruction[i + 1].u.operand, instruction[i + 2].u.operand, instruction[i + 3].u.operand, OperandTypes::fromInt(instruction[i + 4].u.operand), i);
1449 i += 5;
1450 break;
1451 }
1452 case op_put_by_val: {
1453 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1454 emitGetArg(instruction[i + 2].u.operand, X86::edx);
1455 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1456 emitFastArithImmToInt(X86::edx);
1457 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1458 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1459 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
1460 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1461
1462 // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
1463 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
1464 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSArray, m_fastAccessCutoff), X86::eax);
1465 X86Assembler::JmpSrc inFastVector = m_jit.emitUnlinkedJa();
1466 // No; oh well, check if the access if within the vector - if so, we may still be okay.
1467 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(ArrayStorage, m_vectorLength), X86::ecx);
1468 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJbe(), i));
1469
1470 // This is a write to the slow part of the vector; first, we have to check if this would be the first write to this location.
1471 // FIXME: should be able to handle initial write to array; increment the the number of items in the array, and potentially update fast access cutoff.
1472 m_jit.cmpl_i8m(0, OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*));
1473 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i));
1474
1475 // All good - put the value into the array.
1476 m_jit.link(inFastVector, m_jit.label());
1477 emitGetArg(instruction[i + 3].u.operand, X86::eax);
1478 m_jit.movl_rm(X86::eax, OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*));
1479 i += 4;
1480 break;
1481 }
1482 CTI_COMPILE_BINARY_OP(op_lesseq)
1483 case op_loop_if_true: {
1484 emitSlowScriptCheck(instruction + i, i);
1485
1486 unsigned target = instruction[i + 2].u.operand;
1487 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1488
1489 m_jit.cmpl_i32r(asInteger(JSImmediate::zeroImmediate()), X86::eax);
1490 X86Assembler::JmpSrc isZero = m_jit.emitUnlinkedJe();
1491 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1492 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJne(), i + 2 + target));
1493
1494 m_jit.cmpl_i32r(asInteger(JSImmediate::trueImmediate()), X86::eax);
1495 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1496 m_jit.cmpl_i32r(asInteger(JSImmediate::falseImmediate()), X86::eax);
1497 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1498
1499 m_jit.link(isZero, m_jit.label());
1500 i += 3;
1501 break;
1502 };
1503 case op_resolve_base: {
1504 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1505 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1506 emitCTICall(instruction + i, i, Machine::cti_op_resolve_base);
1507 emitPutResult(instruction[i + 1].u.operand);
1508 i += 3;
1509 break;
1510 }
1511 case op_negate: {
1512 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1513 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1514 X86Assembler::JmpSrc notImmediate = m_jit.emitUnlinkedJe();
1515
1516 m_jit.cmpl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1517 X86Assembler::JmpSrc zeroImmediate = m_jit.emitUnlinkedJe();
1518 emitFastArithImmToInt(X86::eax);
1519 m_jit.negl_r(X86::eax); // This can't overflow as we only have a 31bit int at this point
1520 X86Assembler::JmpSrc overflow = emitArithIntToImmWithJump(X86::eax);
1521 emitPutResult(instruction[i + 1].u.operand);
1522 X86Assembler::JmpSrc immediateNegateSuccess = m_jit.emitUnlinkedJmp();
1523
1524 if (!isSSE2Present()) {
1525 m_jit.link(zeroImmediate, m_jit.label());
1526 m_jit.link(overflow, m_jit.label());
1527 m_jit.link(notImmediate, m_jit.label());
1528 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1529 emitCTICall(instruction + i, i, Machine::cti_op_negate);
1530 emitPutResult(instruction[i + 1].u.operand);
1531 } else {
1532 // Slow case immediates
1533 m_slowCases.append(SlowCaseEntry(zeroImmediate, i));
1534 m_slowCases.append(SlowCaseEntry(overflow, i));
1535 m_jit.link(notImmediate, m_jit.label());
1536 ResultType resultType(instruction[i + 3].u.resultType);
1537 if (!resultType.definitelyIsNumber()) {
1538 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
1539 StructureID* numberStructureID = m_callFrame->globalData().numberStructureID.get();
1540 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(numberStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
1541 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1542 }
1543 m_jit.movsd_mr(OBJECT_OFFSET(JSNumberCell, m_value), X86::eax, X86::xmm0);
1544 // We need 3 copies of the sign bit mask so we can assure alignment and pad for the 128bit load
1545 static double doubleSignBit[] = { -0.0, -0.0, -0.0 };
1546 m_jit.xorpd_mr((void*)((((uintptr_t)doubleSignBit)+15)&~15), X86::xmm0);
1547 X86Assembler::JmpSrc wasCell;
1548 if (!resultType.isReusableNumber())
1549 emitAllocateNumber(&m_callFrame->globalData(), i);
1550
1551 putDoubleResultToJSNumberCellOrJSImmediate(X86::xmm0, X86::eax, instruction[i + 1].u.operand, &wasCell,
1552 X86::xmm1, X86::ecx, X86::edx);
1553 m_jit.link(wasCell, m_jit.label());
1554 }
1555 m_jit.link(immediateNegateSuccess, m_jit.label());
1556 i += 4;
1557 break;
1558 }
1559 case op_resolve_skip: {
1560 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1561 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1562 emitPutArgConstant(instruction[i + 3].u.operand + m_codeBlock->needsFullScopeChain, 4);
1563 emitCTICall(instruction + i, i, Machine::cti_op_resolve_skip);
1564 emitPutResult(instruction[i + 1].u.operand);
1565 i += 4;
1566 break;
1567 }
1568 case op_resolve_global: {
1569 // Fast case
1570 unsigned globalObject = asInteger(instruction[i + 2].u.jsCell);
1571 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1572 void* structureIDAddr = reinterpret_cast<void*>(instruction + i + 4);
1573 void* offsetAddr = reinterpret_cast<void*>(instruction + i + 5);
1574
1575 // Check StructureID of global object
1576 m_jit.movl_i32r(globalObject, X86::eax);
1577 m_jit.movl_mr(structureIDAddr, X86::edx);
1578 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
1579 X86Assembler::JmpSrc noMatch = m_jit.emitUnlinkedJne(); // StructureIDs don't match
1580
1581 // Load cached property
1582 m_jit.movl_mr(OBJECT_OFFSET(JSGlobalObject, m_propertyStorage), X86::eax, X86::eax);
1583 m_jit.movl_mr(offsetAddr, X86::edx);
1584 m_jit.movl_mr(0, X86::eax, X86::edx, sizeof(JSValue*), X86::eax);
1585 emitPutResult(instruction[i + 1].u.operand);
1586 X86Assembler::JmpSrc end = m_jit.emitUnlinkedJmp();
1587
1588 // Slow case
1589 m_jit.link(noMatch, m_jit.label());
1590 emitPutArgConstant(globalObject, 0);
1591 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1592 emitPutArgConstant(reinterpret_cast<unsigned>(instruction + i), 8);
1593 emitCTICall(instruction + i, i, Machine::cti_op_resolve_global);
1594 emitPutResult(instruction[i + 1].u.operand);
1595 m_jit.link(end, m_jit.label());
1596 i += 6;
1597 break;
1598 }
1599 CTI_COMPILE_BINARY_OP(op_div)
1600 case op_pre_dec: {
1601 int srcDst = instruction[i + 1].u.operand;
1602 emitGetArg(srcDst, X86::eax);
1603 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1604 m_jit.subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
1605 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1606 emitPutResult(srcDst);
1607 i += 2;
1608 break;
1609 }
1610 case op_jnless: {
1611 unsigned target = instruction[i + 3].u.operand;
1612 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
1613 if (src2imm) {
1614 emitGetArg(instruction[i + 1].u.operand, X86::edx);
1615 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1616 m_jit.cmpl_i32r(asInteger(src2imm), X86::edx);
1617 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJge(), i + 3 + target));
1618 } else {
1619 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1620 emitGetArg(instruction[i + 2].u.operand, X86::edx);
1621 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1622 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1623 m_jit.cmpl_rr(X86::edx, X86::eax);
1624 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJge(), i + 3 + target));
1625 }
1626 i += 4;
1627 break;
1628 }
1629 case op_not: {
1630 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1631 m_jit.xorl_i8r(JSImmediate::FullTagTypeBool, X86::eax);
1632 m_jit.testl_i32r(JSImmediate::FullTagTypeMask, X86::eax); // i8?
1633 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1634 m_jit.xorl_i8r((JSImmediate::FullTagTypeBool | JSImmediate::ExtendedPayloadBitBoolValue), X86::eax);
1635 emitPutResult(instruction[i + 1].u.operand);
1636 i += 3;
1637 break;
1638 }
1639 case op_jfalse: {
1640 unsigned target = instruction[i + 2].u.operand;
1641 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1642
1643 m_jit.cmpl_i32r(asInteger(JSImmediate::zeroImmediate()), X86::eax);
1644 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1645 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1646 X86Assembler::JmpSrc isNonZero = m_jit.emitUnlinkedJne();
1647
1648 m_jit.cmpl_i32r(asInteger(JSImmediate::falseImmediate()), X86::eax);
1649 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1650 m_jit.cmpl_i32r(asInteger(JSImmediate::trueImmediate()), X86::eax);
1651 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1652
1653 m_jit.link(isNonZero, m_jit.label());
1654 i += 3;
1655 break;
1656 };
1657 case op_jeq_null: {
1658 unsigned src = instruction[i + 1].u.operand;
1659 unsigned target = instruction[i + 2].u.operand;
1660
1661 emitGetArg(src, X86::eax);
1662 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1663 X86Assembler::JmpSrc isImmediate = m_jit.emitUnlinkedJnz();
1664
1665 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
1666 m_jit.testl_i32m(MasqueradesAsUndefined, OBJECT_OFFSET(StructureID, m_typeInfo.m_flags), X86::ecx);
1667 m_jit.setnz_r(X86::eax);
1668
1669 X86Assembler::JmpSrc wasNotImmediate = m_jit.emitUnlinkedJmp();
1670
1671 m_jit.link(isImmediate, m_jit.label());
1672
1673 m_jit.movl_i32r(~JSImmediate::ExtendedTagBitUndefined, X86::ecx);
1674 m_jit.andl_rr(X86::eax, X86::ecx);
1675 m_jit.cmpl_i32r(JSImmediate::FullTagTypeNull, X86::ecx);
1676 m_jit.sete_r(X86::eax);
1677
1678 m_jit.link(wasNotImmediate, m_jit.label());
1679
1680 m_jit.movzbl_rr(X86::eax, X86::eax);
1681 m_jit.cmpl_i32r(0, X86::eax);
1682 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJnz(), i + 2 + target));
1683
1684 i += 3;
1685 break;
1686 };
1687 case op_jneq_null: {
1688 unsigned src = instruction[i + 1].u.operand;
1689 unsigned target = instruction[i + 2].u.operand;
1690
1691 emitGetArg(src, X86::eax);
1692 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1693 X86Assembler::JmpSrc isImmediate = m_jit.emitUnlinkedJnz();
1694
1695 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
1696 m_jit.testl_i32m(MasqueradesAsUndefined, OBJECT_OFFSET(StructureID, m_typeInfo.m_flags), X86::ecx);
1697 m_jit.setz_r(X86::eax);
1698
1699 X86Assembler::JmpSrc wasNotImmediate = m_jit.emitUnlinkedJmp();
1700
1701 m_jit.link(isImmediate, m_jit.label());
1702
1703 m_jit.movl_i32r(~JSImmediate::ExtendedTagBitUndefined, X86::ecx);
1704 m_jit.andl_rr(X86::eax, X86::ecx);
1705 m_jit.cmpl_i32r(JSImmediate::FullTagTypeNull, X86::ecx);
1706 m_jit.setne_r(X86::eax);
1707
1708 m_jit.link(wasNotImmediate, m_jit.label());
1709
1710 m_jit.movzbl_rr(X86::eax, X86::eax);
1711 m_jit.cmpl_i32r(0, X86::eax);
1712 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJnz(), i + 2 + target));
1713
1714 i += 3;
1715 break;
1716 }
1717 case op_post_inc: {
1718 int srcDst = instruction[i + 2].u.operand;
1719 emitGetArg(srcDst, X86::eax);
1720 m_jit.movl_rr(X86::eax, X86::edx);
1721 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1722 m_jit.addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::edx);
1723 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1724 emitPutResult(srcDst, X86::edx);
1725 emitPutResult(instruction[i + 1].u.operand);
1726 i += 3;
1727 break;
1728 }
1729 case op_unexpected_load: {
1730 JSValue* v = m_codeBlock->unexpectedConstants[instruction[i + 2].u.operand];
1731 m_jit.movl_i32r(asInteger(v), X86::eax);
1732 emitPutResult(instruction[i + 1].u.operand);
1733 i += 3;
1734 break;
1735 }
1736 case op_jsr: {
1737 int retAddrDst = instruction[i + 1].u.operand;
1738 int target = instruction[i + 2].u.operand;
1739 m_jit.movl_i32m(0, sizeof(Register) * retAddrDst, X86::edi);
1740 X86Assembler::JmpDst addrPosition = m_jit.label();
1741 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 2 + target));
1742 X86Assembler::JmpDst sretTarget = m_jit.label();
1743 m_jsrSites.append(JSRInfo(addrPosition, sretTarget));
1744 i += 3;
1745 break;
1746 }
1747 case op_sret: {
1748 m_jit.jmp_m(sizeof(Register) * instruction[i + 1].u.operand, X86::edi);
1749 i += 2;
1750 break;
1751 }
1752 case op_eq: {
1753 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1754 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1755 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
1756 m_jit.cmpl_rr(X86::edx, X86::eax);
1757 m_jit.sete_r(X86::eax);
1758 m_jit.movzbl_rr(X86::eax, X86::eax);
1759 emitTagAsBoolImmediate(X86::eax);
1760 emitPutResult(instruction[i + 1].u.operand);
1761 i += 4;
1762 break;
1763 }
1764 case op_lshift: {
1765 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1766 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1767 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1768 emitJumpSlowCaseIfNotImmNum(X86::ecx, i);
1769 emitFastArithImmToInt(X86::eax);
1770 emitFastArithImmToInt(X86::ecx);
1771 m_jit.shll_CLr(X86::eax);
1772 emitFastArithIntToImmOrSlowCase(X86::eax, i);
1773 emitPutResult(instruction[i + 1].u.operand);
1774 i += 4;
1775 break;
1776 }
1777 case op_bitand: {
1778 unsigned src1 = instruction[i + 2].u.operand;
1779 unsigned src2 = instruction[i + 3].u.operand;
1780 unsigned dst = instruction[i + 1].u.operand;
1781 if (JSValue* value = getConstantImmediateNumericArg(src1)) {
1782 emitGetArg(src2, X86::eax);
1783 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1784 m_jit.andl_i32r(asInteger(value), X86::eax); // FIXME: make it more obvious this is relying on the format of JSImmediate
1785 emitPutResult(dst);
1786 } else if (JSValue* value = getConstantImmediateNumericArg(src2)) {
1787 emitGetArg(src1, X86::eax);
1788 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1789 m_jit.andl_i32r(asInteger(value), X86::eax);
1790 emitPutResult(dst);
1791 } else {
1792 emitGetArg(src1, X86::eax);
1793 emitGetArg(src2, X86::edx);
1794 m_jit.andl_rr(X86::edx, X86::eax);
1795 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1796 emitPutResult(dst);
1797 }
1798 i += 5;
1799 break;
1800 }
1801 case op_rshift: {
1802 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1803 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1804 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1805 emitJumpSlowCaseIfNotImmNum(X86::ecx, i);
1806 emitFastArithImmToInt(X86::ecx);
1807 m_jit.sarl_CLr(X86::eax);
1808 emitFastArithPotentiallyReTagImmediate(X86::eax);
1809 emitPutResult(instruction[i + 1].u.operand);
1810 i += 4;
1811 break;
1812 }
1813 case op_bitnot: {
1814 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1815 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1816 m_jit.xorl_i8r(~JSImmediate::TagBitTypeInteger, X86::eax);
1817 emitPutResult(instruction[i + 1].u.operand);
1818 i += 3;
1819 break;
1820 }
1821 case op_resolve_with_base: {
1822 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1823 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1824 emitCTICall(instruction + i, i, Machine::cti_op_resolve_with_base);
1825 emitPutResult(instruction[i + 1].u.operand);
1826 emitPutResult(instruction[i + 2].u.operand, X86::edx);
1827 i += 4;
1828 break;
1829 }
1830 case op_new_func_exp: {
1831 FuncExprNode* func = (m_codeBlock->functionExpressions[instruction[i + 2].u.operand]).get();
1832 emitPutArgConstant(reinterpret_cast<unsigned>(func), 0);
1833 emitCTICall(instruction + i, i, Machine::cti_op_new_func_exp);
1834 emitPutResult(instruction[i + 1].u.operand);
1835 i += 3;
1836 break;
1837 }
1838 case op_mod: {
1839 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1840 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1841 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1842 emitJumpSlowCaseIfNotImmNum(X86::ecx, i);
1843 emitFastArithDeTagImmediate(X86::eax);
1844 m_slowCases.append(SlowCaseEntry(emitFastArithDeTagImmediateJumpIfZero(X86::ecx), i));
1845 m_jit.cdq();
1846 m_jit.idivl_r(X86::ecx);
1847 emitFastArithReTagImmediate(X86::edx);
1848 m_jit.movl_rr(X86::edx, X86::eax);
1849 emitPutResult(instruction[i + 1].u.operand);
1850 i += 4;
1851 break;
1852 }
1853 case op_jtrue: {
1854 unsigned target = instruction[i + 2].u.operand;
1855 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1856
1857 m_jit.cmpl_i32r(asInteger(JSImmediate::zeroImmediate()), X86::eax);
1858 X86Assembler::JmpSrc isZero = m_jit.emitUnlinkedJe();
1859 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1860 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJne(), i + 2 + target));
1861
1862 m_jit.cmpl_i32r(asInteger(JSImmediate::trueImmediate()), X86::eax);
1863 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1864 m_jit.cmpl_i32r(asInteger(JSImmediate::falseImmediate()), X86::eax);
1865 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1866
1867 m_jit.link(isZero, m_jit.label());
1868 i += 3;
1869 break;
1870 }
1871 CTI_COMPILE_BINARY_OP(op_less)
1872 case op_neq: {
1873 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1874 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1875 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
1876 m_jit.cmpl_rr(X86::eax, X86::edx);
1877
1878 m_jit.setne_r(X86::eax);
1879 m_jit.movzbl_rr(X86::eax, X86::eax);
1880 emitTagAsBoolImmediate(X86::eax);
1881
1882 emitPutResult(instruction[i + 1].u.operand);
1883
1884 i += 4;
1885 break;
1886 }
1887 case op_post_dec: {
1888 int srcDst = instruction[i + 2].u.operand;
1889 emitGetArg(srcDst, X86::eax);
1890 m_jit.movl_rr(X86::eax, X86::edx);
1891 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1892 m_jit.subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::edx);
1893 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1894 emitPutResult(srcDst, X86::edx);
1895 emitPutResult(instruction[i + 1].u.operand);
1896 i += 3;
1897 break;
1898 }
1899 CTI_COMPILE_BINARY_OP(op_urshift)
1900 case op_bitxor: {
1901 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1902 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1903 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
1904 m_jit.xorl_rr(X86::edx, X86::eax);
1905 emitFastArithReTagImmediate(X86::eax);
1906 emitPutResult(instruction[i + 1].u.operand);
1907 i += 5;
1908 break;
1909 }
1910 case op_new_regexp: {
1911 RegExp* regExp = m_codeBlock->regexps[instruction[i + 2].u.operand].get();
1912 emitPutArgConstant(reinterpret_cast<unsigned>(regExp), 0);
1913 emitCTICall(instruction + i, i, Machine::cti_op_new_regexp);
1914 emitPutResult(instruction[i + 1].u.operand);
1915 i += 3;
1916 break;
1917 }
1918 case op_bitor: {
1919 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1920 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1921 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
1922 m_jit.orl_rr(X86::edx, X86::eax);
1923 emitPutResult(instruction[i + 1].u.operand);
1924 i += 5;
1925 break;
1926 }
1927 case op_call_eval: {
1928 compileOpCall(opcodeID, instruction + i, i, callLinkInfoIndex++);
1929 i += 7;
1930 break;
1931 }
1932 case op_throw: {
1933 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1934 emitCTICall(instruction + i, i, Machine::cti_op_throw);
1935 m_jit.addl_i8r(0x20, X86::esp);
1936 m_jit.popl_r(X86::ebx);
1937 m_jit.popl_r(X86::edi);
1938 m_jit.popl_r(X86::esi);
1939 m_jit.ret();
1940 i += 2;
1941 break;
1942 }
1943 case op_get_pnames: {
1944 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1945 emitCTICall(instruction + i, i, Machine::cti_op_get_pnames);
1946 emitPutResult(instruction[i + 1].u.operand);
1947 i += 3;
1948 break;
1949 }
1950 case op_next_pname: {
1951 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1952 unsigned target = instruction[i + 3].u.operand;
1953 emitCTICall(instruction + i, i, Machine::cti_op_next_pname);
1954 m_jit.testl_rr(X86::eax, X86::eax);
1955 X86Assembler::JmpSrc endOfIter = m_jit.emitUnlinkedJe();
1956 emitPutResult(instruction[i + 1].u.operand);
1957 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 3 + target));
1958 m_jit.link(endOfIter, m_jit.label());
1959 i += 4;
1960 break;
1961 }
1962 case op_push_scope: {
1963 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1964 emitCTICall(instruction + i, i, Machine::cti_op_push_scope);
1965 i += 2;
1966 break;
1967 }
1968 case op_pop_scope: {
1969 emitCTICall(instruction + i, i, Machine::cti_op_pop_scope);
1970 i += 1;
1971 break;
1972 }
1973 CTI_COMPILE_UNARY_OP(op_typeof)
1974 CTI_COMPILE_UNARY_OP(op_is_undefined)
1975 CTI_COMPILE_UNARY_OP(op_is_boolean)
1976 CTI_COMPILE_UNARY_OP(op_is_number)
1977 CTI_COMPILE_UNARY_OP(op_is_string)
1978 CTI_COMPILE_UNARY_OP(op_is_object)
1979 CTI_COMPILE_UNARY_OP(op_is_function)
1980 case op_stricteq: {
1981 compileOpStrictEq(instruction + i, i, OpStrictEq);
1982 i += 4;
1983 break;
1984 }
1985 case op_nstricteq: {
1986 compileOpStrictEq(instruction + i, i, OpNStrictEq);
1987 i += 4;
1988 break;
1989 }
1990 case op_to_jsnumber: {
1991 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1992
1993 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1994 X86Assembler::JmpSrc wasImmediate = m_jit.emitUnlinkedJnz();
1995
1996 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
1997
1998 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
1999 m_jit.cmpl_i32m(NumberType, OBJECT_OFFSET(StructureID, m_typeInfo.m_type), X86::ecx);
2000
2001 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
2002
2003 m_jit.link(wasImmediate, m_jit.label());
2004
2005 emitPutResult(instruction[i + 1].u.operand);
2006 i += 3;
2007 break;
2008 }
2009 case op_in: {
2010 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
2011 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
2012 emitCTICall(instruction + i, i, Machine::cti_op_in);
2013 emitPutResult(instruction[i + 1].u.operand);
2014 i += 4;
2015 break;
2016 }
2017 case op_push_new_scope: {
2018 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
2019 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
2020 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
2021 emitCTICall(instruction + i, i, Machine::cti_op_push_new_scope);
2022 emitPutResult(instruction[i + 1].u.operand);
2023 i += 4;
2024 break;
2025 }
2026 case op_catch: {
2027 emitGetCTIParam(CTI_ARGS_callFrame, X86::edi); // edi := r
2028 emitPutResult(instruction[i + 1].u.operand);
2029 i += 2;
2030 break;
2031 }
2032 case op_jmp_scopes: {
2033 unsigned count = instruction[i + 1].u.operand;
2034 emitPutArgConstant(count, 0);
2035 emitCTICall(instruction + i, i, Machine::cti_op_jmp_scopes);
2036 unsigned target = instruction[i + 2].u.operand;
2037 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 2 + target));
2038 i += 3;
2039 break;
2040 }
2041 case op_put_by_index: {
2042 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
2043 emitPutArgConstant(instruction[i + 2].u.operand, 4);
2044 emitGetPutArg(instruction[i + 3].u.operand, 8, X86::ecx);
2045 emitCTICall(instruction + i, i, Machine::cti_op_put_by_index);
2046 i += 4;
2047 break;
2048 }
2049 case op_switch_imm: {
2050 unsigned tableIndex = instruction[i + 1].u.operand;
2051 unsigned defaultOffset = instruction[i + 2].u.operand;
2052 unsigned scrutinee = instruction[i + 3].u.operand;
2053
2054 // create jump table for switch destinations, track this switch statement.
2055 SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTables[tableIndex];
2056 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset, SwitchRecord::Immediate));
2057 jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
2058
2059 emitGetPutArg(scrutinee, 0, X86::ecx);
2060 emitPutArgConstant(tableIndex, 4);
2061 emitCTICall(instruction + i, i, Machine::cti_op_switch_imm);
2062 m_jit.jmp_r(X86::eax);
2063 i += 4;
2064 break;
2065 }
2066 case op_switch_char: {
2067 unsigned tableIndex = instruction[i + 1].u.operand;
2068 unsigned defaultOffset = instruction[i + 2].u.operand;
2069 unsigned scrutinee = instruction[i + 3].u.operand;
2070
2071 // create jump table for switch destinations, track this switch statement.
2072 SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTables[tableIndex];
2073 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset, SwitchRecord::Character));
2074 jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
2075
2076 emitGetPutArg(scrutinee, 0, X86::ecx);
2077 emitPutArgConstant(tableIndex, 4);
2078 emitCTICall(instruction + i, i, Machine::cti_op_switch_char);
2079 m_jit.jmp_r(X86::eax);
2080 i += 4;
2081 break;
2082 }
2083 case op_switch_string: {
2084 unsigned tableIndex = instruction[i + 1].u.operand;
2085 unsigned defaultOffset = instruction[i + 2].u.operand;
2086 unsigned scrutinee = instruction[i + 3].u.operand;
2087
2088 // create jump table for switch destinations, track this switch statement.
2089 StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTables[tableIndex];
2090 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset));
2091
2092 emitGetPutArg(scrutinee, 0, X86::ecx);
2093 emitPutArgConstant(tableIndex, 4);
2094 emitCTICall(instruction + i, i, Machine::cti_op_switch_string);
2095 m_jit.jmp_r(X86::eax);
2096 i += 4;
2097 break;
2098 }
2099 case op_del_by_val: {
2100 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
2101 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
2102 emitCTICall(instruction + i, i, Machine::cti_op_del_by_val);
2103 emitPutResult(instruction[i + 1].u.operand);
2104 i += 4;
2105 break;
2106 }
2107 case op_put_getter: {
2108 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
2109 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
2110 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
2111 emitGetPutArg(instruction[i + 3].u.operand, 8, X86::ecx);
2112 emitCTICall(instruction + i, i, Machine::cti_op_put_getter);
2113 i += 4;
2114 break;
2115 }
2116 case op_put_setter: {
2117 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
2118 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
2119 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
2120 emitGetPutArg(instruction[i + 3].u.operand, 8, X86::ecx);
2121 emitCTICall(instruction + i, i, Machine::cti_op_put_setter);
2122 i += 4;
2123 break;
2124 }
2125 case op_new_error: {
2126 JSValue* message = m_codeBlock->unexpectedConstants[instruction[i + 3].u.operand];
2127 emitPutArgConstant(instruction[i + 2].u.operand, 0);
2128 emitPutArgConstant(asInteger(message), 4);
2129 emitPutArgConstant(m_codeBlock->lineNumberForVPC(&instruction[i]), 8);
2130 emitCTICall(instruction + i, i, Machine::cti_op_new_error);
2131 emitPutResult(instruction[i + 1].u.operand);
2132 i += 4;
2133 break;
2134 }
2135 case op_debug: {
2136 emitPutArgConstant(instruction[i + 1].u.operand, 0);
2137 emitPutArgConstant(instruction[i + 2].u.operand, 4);
2138 emitPutArgConstant(instruction[i + 3].u.operand, 8);
2139 emitCTICall(instruction + i, i, Machine::cti_op_debug);
2140 i += 4;
2141 break;
2142 }
2143 case op_eq_null: {
2144 unsigned dst = instruction[i + 1].u.operand;
2145 unsigned src1 = instruction[i + 2].u.operand;
2146
2147 emitGetArg(src1, X86::eax);
2148 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2149 X86Assembler::JmpSrc isImmediate = m_jit.emitUnlinkedJnz();
2150
2151 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
2152 m_jit.testl_i32m(MasqueradesAsUndefined, OBJECT_OFFSET(StructureID, m_typeInfo.m_flags), X86::ecx);
2153 m_jit.setnz_r(X86::eax);
2154
2155 X86Assembler::JmpSrc wasNotImmediate = m_jit.emitUnlinkedJmp();
2156
2157 m_jit.link(isImmediate, m_jit.label());
2158
2159 m_jit.movl_i32r(~JSImmediate::ExtendedTagBitUndefined, X86::ecx);
2160 m_jit.andl_rr(X86::eax, X86::ecx);
2161 m_jit.cmpl_i32r(JSImmediate::FullTagTypeNull, X86::ecx);
2162 m_jit.sete_r(X86::eax);
2163
2164 m_jit.link(wasNotImmediate, m_jit.label());
2165
2166 m_jit.movzbl_rr(X86::eax, X86::eax);
2167 emitTagAsBoolImmediate(X86::eax);
2168 emitPutResult(dst);
2169
2170 i += 3;
2171 break;
2172 }
2173 case op_neq_null: {
2174 unsigned dst = instruction[i + 1].u.operand;
2175 unsigned src1 = instruction[i + 2].u.operand;
2176
2177 emitGetArg(src1, X86::eax);
2178 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2179 X86Assembler::JmpSrc isImmediate = m_jit.emitUnlinkedJnz();
2180
2181 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
2182 m_jit.testl_i32m(MasqueradesAsUndefined, OBJECT_OFFSET(StructureID, m_typeInfo.m_flags), X86::ecx);
2183 m_jit.setz_r(X86::eax);
2184
2185 X86Assembler::JmpSrc wasNotImmediate = m_jit.emitUnlinkedJmp();
2186
2187 m_jit.link(isImmediate, m_jit.label());
2188
2189 m_jit.movl_i32r(~JSImmediate::ExtendedTagBitUndefined, X86::ecx);
2190 m_jit.andl_rr(X86::eax, X86::ecx);
2191 m_jit.cmpl_i32r(JSImmediate::FullTagTypeNull, X86::ecx);
2192 m_jit.setne_r(X86::eax);
2193
2194 m_jit.link(wasNotImmediate, m_jit.label());
2195
2196 m_jit.movzbl_rr(X86::eax, X86::eax);
2197 emitTagAsBoolImmediate(X86::eax);
2198 emitPutResult(dst);
2199
2200 i += 3;
2201 break;
2202 }
2203 case op_enter: {
2204 // Even though CTI doesn't use them, we initialize our constant
2205 // registers to zap stale pointers, to avoid unnecessarily prolonging
2206 // object lifetime and increasing GC pressure.
2207 size_t count = m_codeBlock->numVars + m_codeBlock->constantRegisters.size();
2208 for (size_t j = 0; j < count; ++j)
2209 emitInitRegister(j);
2210
2211 i+= 1;
2212 break;
2213 }
2214 case op_enter_with_activation: {
2215 // Even though CTI doesn't use them, we initialize our constant
2216 // registers to zap stale pointers, to avoid unnecessarily prolonging
2217 // object lifetime and increasing GC pressure.
2218 size_t count = m_codeBlock->numVars + m_codeBlock->constantRegisters.size();
2219 for (size_t j = 0; j < count; ++j)
2220 emitInitRegister(j);
2221
2222 emitCTICall(instruction + i, i, Machine::cti_op_push_activation);
2223 emitPutResult(instruction[i + 1].u.operand);
2224
2225 i+= 2;
2226 break;
2227 }
2228 case op_create_arguments: {
2229 emitCTICall(instruction + i, i, (m_codeBlock->numParameters == 1) ? Machine::cti_op_create_arguments_no_params : Machine::cti_op_create_arguments);
2230 i += 1;
2231 break;
2232 }
2233 case op_convert_this: {
2234 emitGetArg(instruction[i + 1].u.operand, X86::eax);
2235
2236 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
2237 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::edx);
2238 m_jit.testl_i32m(NeedsThisConversion, OBJECT_OFFSET(StructureID, m_typeInfo.m_flags), X86::edx);
2239 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJnz(), i));
2240
2241 i += 2;
2242 break;
2243 }
2244 case op_profile_will_call: {
2245 emitGetCTIParam(CTI_ARGS_profilerReference, X86::eax);
2246 m_jit.cmpl_i32m(0, X86::eax);
2247 X86Assembler::JmpSrc noProfiler = m_jit.emitUnlinkedJe();
2248 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::eax);
2249 emitCTICall(instruction + i, i, Machine::cti_op_profile_will_call);
2250 m_jit.link(noProfiler, m_jit.label());
2251
2252 i += 2;
2253 break;
2254 }
2255 case op_profile_did_call: {
2256 emitGetCTIParam(CTI_ARGS_profilerReference, X86::eax);
2257 m_jit.cmpl_i32m(0, X86::eax);
2258 X86Assembler::JmpSrc noProfiler = m_jit.emitUnlinkedJe();
2259 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::eax);
2260 emitCTICall(instruction + i, i, Machine::cti_op_profile_did_call);
2261 m_jit.link(noProfiler, m_jit.label());
2262
2263 i += 2;
2264 break;
2265 }
2266 case op_get_array_length:
2267 case op_get_by_id_chain:
2268 case op_get_by_id_generic:
2269 case op_get_by_id_proto:
2270 case op_get_by_id_self:
2271 case op_get_string_length:
2272 case op_put_by_id_generic:
2273 case op_put_by_id_replace:
2274 case op_put_by_id_transition:
2275 ASSERT_NOT_REACHED();
2276 }
2277 }
2278
2279 ASSERT(propertyAccessInstructionIndex == m_codeBlock->propertyAccessInstructions.size());
2280 ASSERT(callLinkInfoIndex == m_codeBlock->callLinkInfos.size());
2281}
2282
2283
2284void CTI::privateCompileLinkPass()
2285{
2286 unsigned jmpTableCount = m_jmpTable.size();
2287 for (unsigned i = 0; i < jmpTableCount; ++i)
2288 m_jit.link(m_jmpTable[i].from, m_labels[m_jmpTable[i].to]);
2289 m_jmpTable.clear();
2290}
2291
2292#define CTI_COMPILE_BINARY_OP_SLOW_CASE(name) \
2293 case name: { \
2294 m_jit.link(iter->from, m_jit.label()); \
2295 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx); \
2296 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx); \
2297 emitCTICall(instruction + i, i, Machine::cti_##name); \
2298 emitPutResult(instruction[i + 1].u.operand); \
2299 i += 4; \
2300 break; \
2301 }
2302
2303void CTI::privateCompileSlowCases()
2304{
2305 unsigned propertyAccessInstructionIndex = 0;
2306 unsigned callLinkInfoIndex = 0;
2307
2308 Instruction* instruction = m_codeBlock->instructions.begin();
2309 for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end(); ++iter) {
2310 unsigned i = iter->to;
2311 switch (OpcodeID opcodeID = m_machine->getOpcodeID(instruction[i].u.opcode)) {
2312 case op_convert_this: {
2313 m_jit.link(iter->from, m_jit.label());
2314 m_jit.link((++iter)->from, m_jit.label());
2315 emitPutArg(X86::eax, 0);
2316 emitCTICall(instruction + i, i, Machine::cti_op_convert_this);
2317 emitPutResult(instruction[i + 1].u.operand);
2318 i += 2;
2319 break;
2320 }
2321 case op_add: {
2322 unsigned dst = instruction[i + 1].u.operand;
2323 unsigned src1 = instruction[i + 2].u.operand;
2324 unsigned src2 = instruction[i + 3].u.operand;
2325 if (JSValue* value = getConstantImmediateNumericArg(src1)) {
2326 X86Assembler::JmpSrc notImm = iter->from;
2327 m_jit.link((++iter)->from, m_jit.label());
2328 m_jit.subl_i32r(getDeTaggedConstantImmediate(value), X86::edx);
2329 m_jit.link(notImm, m_jit.label());
2330 emitGetPutArg(src1, 0, X86::ecx);
2331 emitPutArg(X86::edx, 4);
2332 emitCTICall(instruction + i, i, Machine::cti_op_add);
2333 emitPutResult(dst);
2334 } else if (JSValue* value = getConstantImmediateNumericArg(src2)) {
2335 X86Assembler::JmpSrc notImm = iter->from;
2336 m_jit.link((++iter)->from, m_jit.label());
2337 m_jit.subl_i32r(getDeTaggedConstantImmediate(value), X86::eax);
2338 m_jit.link(notImm, m_jit.label());
2339 emitPutArg(X86::eax, 0);
2340 emitGetPutArg(src2, 4, X86::ecx);
2341 emitCTICall(instruction + i, i, Machine::cti_op_add);
2342 emitPutResult(dst);
2343 } else {
2344 OperandTypes types = OperandTypes::fromInt(instruction[i + 4].u.operand);
2345 if (types.first().mightBeNumber() && types.second().mightBeNumber())
2346 compileBinaryArithOpSlowCase(instruction + i, op_add, iter, dst, src1, src2, types, i);
2347 else
2348 ASSERT_NOT_REACHED();
2349 }
2350
2351 i += 5;
2352 break;
2353 }
2354 case op_get_by_val: {
2355 // The slow case that handles accesses to arrays (below) may jump back up to here.
2356 X86Assembler::JmpDst beginGetByValSlow = m_jit.label();
2357
2358 X86Assembler::JmpSrc notImm = iter->from;
2359 m_jit.link((++iter)->from, m_jit.label());
2360 m_jit.link((++iter)->from, m_jit.label());
2361 emitFastArithIntToImmNoCheck(X86::edx);
2362 m_jit.link(notImm, m_jit.label());
2363 emitPutArg(X86::eax, 0);
2364 emitPutArg(X86::edx, 4);
2365 emitCTICall(instruction + i, i, Machine::cti_op_get_by_val);
2366 emitPutResult(instruction[i + 1].u.operand);
2367 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i + 4]);
2368
2369 // This is slow case that handles accesses to arrays above the fast cut-off.
2370 // First, check if this is an access to the vector
2371 m_jit.link((++iter)->from, m_jit.label());
2372 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(ArrayStorage, m_vectorLength), X86::ecx);
2373 m_jit.link(m_jit.emitUnlinkedJbe(), beginGetByValSlow);
2374
2375 // okay, missed the fast region, but it is still in the vector. Get the value.
2376 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*), X86::ecx);
2377 // Check whether the value loaded is zero; if so we need to return undefined.
2378 m_jit.testl_rr(X86::ecx, X86::ecx);
2379 m_jit.link(m_jit.emitUnlinkedJe(), beginGetByValSlow);
2380 emitPutResult(instruction[i + 1].u.operand, X86::ecx);
2381
2382 i += 4;
2383 break;
2384 }
2385 case op_sub: {
2386 compileBinaryArithOpSlowCase(instruction + i, op_sub, iter, instruction[i + 1].u.operand, instruction[i + 2].u.operand, instruction[i + 3].u.operand, OperandTypes::fromInt(instruction[i + 4].u.operand), i);
2387 i += 5;
2388 break;
2389 }
2390 case op_negate: {
2391 m_jit.link(iter->from, m_jit.label());
2392 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
2393 emitCTICall(instruction + i, i, Machine::cti_op_negate);
2394 emitPutResult(instruction[i + 1].u.operand);
2395 i += 4;
2396 break;
2397 }
2398 case op_rshift: {
2399 m_jit.link(iter->from, m_jit.label());
2400 m_jit.link((++iter)->from, m_jit.label());
2401 emitPutArg(X86::eax, 0);
2402 emitPutArg(X86::ecx, 4);
2403 emitCTICall(instruction + i, i, Machine::cti_op_rshift);
2404 emitPutResult(instruction[i + 1].u.operand);
2405 i += 4;
2406 break;
2407 }
2408 case op_lshift: {
2409 X86Assembler::JmpSrc notImm1 = iter->from;
2410 X86Assembler::JmpSrc notImm2 = (++iter)->from;
2411 m_jit.link((++iter)->from, m_jit.label());
2412 emitGetArg(instruction[i + 2].u.operand, X86::eax);
2413 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
2414 m_jit.link(notImm1, m_jit.label());
2415 m_jit.link(notImm2, m_jit.label());
2416 emitPutArg(X86::eax, 0);
2417 emitPutArg(X86::ecx, 4);
2418 emitCTICall(instruction + i, i, Machine::cti_op_lshift);
2419 emitPutResult(instruction[i + 1].u.operand);
2420 i += 4;
2421 break;
2422 }
2423 case op_loop_if_less: {
2424 emitSlowScriptCheck(instruction + i, i);
2425
2426 unsigned target = instruction[i + 3].u.operand;
2427 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
2428 if (src2imm) {
2429 m_jit.link(iter->from, m_jit.label());
2430 emitPutArg(X86::edx, 0);
2431 emitGetPutArg(instruction[i + 2].u.operand, 4, X86::ecx);
2432 emitCTICall(instruction + i, i, Machine::cti_op_loop_if_less);
2433 m_jit.testl_rr(X86::eax, X86::eax);
2434 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
2435 } else {
2436 m_jit.link(iter->from, m_jit.label());
2437 m_jit.link((++iter)->from, m_jit.label());
2438 emitPutArg(X86::eax, 0);
2439 emitPutArg(X86::edx, 4);
2440 emitCTICall(instruction + i, i, Machine::cti_op_loop_if_less);
2441 m_jit.testl_rr(X86::eax, X86::eax);
2442 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
2443 }
2444 i += 4;
2445 break;
2446 }
2447 case op_put_by_id: {
2448 m_jit.link(iter->from, m_jit.label());
2449 m_jit.link((++iter)->from, m_jit.label());
2450
2451 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
2452 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
2453 emitPutArg(X86::eax, 0);
2454 emitPutArg(X86::edx, 8);
2455 X86Assembler::JmpSrc call = emitCTICall(instruction + i, i, Machine::cti_op_put_by_id);
2456
2457 // Track the location of the call; this will be used to recover repatch information.
2458 ASSERT(m_codeBlock->propertyAccessInstructions[propertyAccessInstructionIndex].opcodeIndex == i);
2459 m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call;
2460 ++propertyAccessInstructionIndex;
2461
2462 i += 8;
2463 break;
2464 }
2465 case op_get_by_id: {
2466 // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
2467 // so that we only need track one pointer into the slow case code - we track a pointer to the location
2468 // of the call (which we can use to look up the repatch information), but should a array-length or
2469 // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back
2470 // the distance from the call to the head of the slow case.
2471
2472 m_jit.link(iter->from, m_jit.label());
2473 m_jit.link((++iter)->from, m_jit.label());
2474
2475#ifndef NDEBUG
2476 X86Assembler::JmpDst coldPathBegin = m_jit.label();
2477#endif
2478 emitPutArg(X86::eax, 0);
2479 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
2480 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
2481 X86Assembler::JmpSrc call = emitCTICall(instruction + i, i, Machine::cti_op_get_by_id);
2482 ASSERT(X86Assembler::getDifferenceBetweenLabels(coldPathBegin, call) == repatchOffsetGetByIdSlowCaseCall);
2483 emitPutResult(instruction[i + 1].u.operand);
2484
2485 // Track the location of the call; this will be used to recover repatch information.
2486 ASSERT(m_codeBlock->propertyAccessInstructions[propertyAccessInstructionIndex].opcodeIndex == i);
2487 m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call;
2488 ++propertyAccessInstructionIndex;
2489
2490 i += 8;
2491 break;
2492 }
2493 case op_loop_if_lesseq: {
2494 emitSlowScriptCheck(instruction + i, i);
2495
2496 unsigned target = instruction[i + 3].u.operand;
2497 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
2498 if (src2imm) {
2499 m_jit.link(iter->from, m_jit.label());
2500 emitPutArg(X86::edx, 0);
2501 emitGetPutArg(instruction[i + 2].u.operand, 4, X86::ecx);
2502 emitCTICall(instruction + i, i, Machine::cti_op_loop_if_lesseq);
2503 m_jit.testl_rr(X86::eax, X86::eax);
2504 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
2505 } else {
2506 m_jit.link(iter->from, m_jit.label());
2507 m_jit.link((++iter)->from, m_jit.label());
2508 emitPutArg(X86::eax, 0);
2509 emitPutArg(X86::edx, 4);
2510 emitCTICall(instruction + i, i, Machine::cti_op_loop_if_lesseq);
2511 m_jit.testl_rr(X86::eax, X86::eax);
2512 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
2513 }
2514 i += 4;
2515 break;
2516 }
2517 case op_pre_inc: {
2518 unsigned srcDst = instruction[i + 1].u.operand;
2519 X86Assembler::JmpSrc notImm = iter->from;
2520 m_jit.link((++iter)->from, m_jit.label());
2521 m_jit.subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
2522 m_jit.link(notImm, m_jit.label());
2523 emitPutArg(X86::eax, 0);
2524 emitCTICall(instruction + i, i, Machine::cti_op_pre_inc);
2525 emitPutResult(srcDst);
2526 i += 2;
2527 break;
2528 }
2529 case op_put_by_val: {
2530 // Normal slow cases - either is not an immediate imm, or is an array.
2531 X86Assembler::JmpSrc notImm = iter->from;
2532 m_jit.link((++iter)->from, m_jit.label());
2533 m_jit.link((++iter)->from, m_jit.label());
2534 emitFastArithIntToImmNoCheck(X86::edx);
2535 m_jit.link(notImm, m_jit.label());
2536 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
2537 emitPutArg(X86::eax, 0);
2538 emitPutArg(X86::edx, 4);
2539 emitPutArg(X86::ecx, 8);
2540 emitCTICall(instruction + i, i, Machine::cti_op_put_by_val);
2541 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i + 4]);
2542
2543 // slow cases for immediate int accesses to arrays
2544 m_jit.link((++iter)->from, m_jit.label());
2545 m_jit.link((++iter)->from, m_jit.label());
2546 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
2547 emitPutArg(X86::eax, 0);
2548 emitPutArg(X86::edx, 4);
2549 emitPutArg(X86::ecx, 8);
2550 emitCTICall(instruction + i, i, Machine::cti_op_put_by_val_array);
2551
2552 i += 4;
2553 break;
2554 }
2555 case op_loop_if_true: {
2556 emitSlowScriptCheck(instruction + i, i);
2557
2558 m_jit.link(iter->from, m_jit.label());
2559 emitPutArg(X86::eax, 0);
2560 emitCTICall(instruction + i, i, Machine::cti_op_jtrue);
2561 m_jit.testl_rr(X86::eax, X86::eax);
2562 unsigned target = instruction[i + 2].u.operand;
2563 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 2 + target]);
2564 i += 3;
2565 break;
2566 }
2567 case op_pre_dec: {
2568 unsigned srcDst = instruction[i + 1].u.operand;
2569 X86Assembler::JmpSrc notImm = iter->from;
2570 m_jit.link((++iter)->from, m_jit.label());
2571 m_jit.addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
2572 m_jit.link(notImm, m_jit.label());
2573 emitPutArg(X86::eax, 0);
2574 emitCTICall(instruction + i, i, Machine::cti_op_pre_dec);
2575 emitPutResult(srcDst);
2576 i += 2;
2577 break;
2578 }
2579 case op_jnless: {
2580 unsigned target = instruction[i + 3].u.operand;
2581 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
2582 if (src2imm) {
2583 m_jit.link(iter->from, m_jit.label());
2584 emitPutArg(X86::edx, 0);
2585 emitGetPutArg(instruction[i + 2].u.operand, 4, X86::ecx);
2586 emitCTICall(instruction + i, i, Machine::cti_op_jless);
2587 m_jit.testl_rr(X86::eax, X86::eax);
2588 m_jit.link(m_jit.emitUnlinkedJe(), m_labels[i + 3 + target]);
2589 } else {
2590 m_jit.link(iter->from, m_jit.label());
2591 m_jit.link((++iter)->from, m_jit.label());
2592 emitPutArg(X86::eax, 0);
2593 emitPutArg(X86::edx, 4);
2594 emitCTICall(instruction + i, i, Machine::cti_op_jless);
2595 m_jit.testl_rr(X86::eax, X86::eax);
2596 m_jit.link(m_jit.emitUnlinkedJe(), m_labels[i + 3 + target]);
2597 }
2598 i += 4;
2599 break;
2600 }
2601 case op_not: {
2602 m_jit.link(iter->from, m_jit.label());
2603 m_jit.xorl_i8r(JSImmediate::FullTagTypeBool, X86::eax);
2604 emitPutArg(X86::eax, 0);
2605 emitCTICall(instruction + i, i, Machine::cti_op_not);
2606 emitPutResult(instruction[i + 1].u.operand);
2607 i += 3;
2608 break;
2609 }
2610 case op_jfalse: {
2611 m_jit.link(iter->from, m_jit.label());
2612 emitPutArg(X86::eax, 0);
2613 emitCTICall(instruction + i, i, Machine::cti_op_jtrue);
2614 m_jit.testl_rr(X86::eax, X86::eax);
2615 unsigned target = instruction[i + 2].u.operand;
2616 m_jit.link(m_jit.emitUnlinkedJe(), m_labels[i + 2 + target]); // inverted!
2617 i += 3;
2618 break;
2619 }
2620 case op_post_inc: {
2621 unsigned srcDst = instruction[i + 2].u.operand;
2622 m_jit.link(iter->from, m_jit.label());
2623 m_jit.link((++iter)->from, m_jit.label());
2624 emitPutArg(X86::eax, 0);
2625 emitCTICall(instruction + i, i, Machine::cti_op_post_inc);
2626 emitPutResult(instruction[i + 1].u.operand);
2627 emitPutResult(srcDst, X86::edx);
2628 i += 3;
2629 break;
2630 }
2631 case op_bitnot: {
2632 m_jit.link(iter->from, m_jit.label());
2633 emitPutArg(X86::eax, 0);
2634 emitCTICall(instruction + i, i, Machine::cti_op_bitnot);
2635 emitPutResult(instruction[i + 1].u.operand);
2636 i += 3;
2637 break;
2638 }
2639 case op_bitand: {
2640 unsigned src1 = instruction[i + 2].u.operand;
2641 unsigned src2 = instruction[i + 3].u.operand;
2642 unsigned dst = instruction[i + 1].u.operand;
2643 if (getConstantImmediateNumericArg(src1)) {
2644 m_jit.link(iter->from, m_jit.label());
2645 emitGetPutArg(src1, 0, X86::ecx);
2646 emitPutArg(X86::eax, 4);
2647 emitCTICall(instruction + i, i, Machine::cti_op_bitand);
2648 emitPutResult(dst);
2649 } else if (getConstantImmediateNumericArg(src2)) {
2650 m_jit.link(iter->from, m_jit.label());
2651 emitPutArg(X86::eax, 0);
2652 emitGetPutArg(src2, 4, X86::ecx);
2653 emitCTICall(instruction + i, i, Machine::cti_op_bitand);
2654 emitPutResult(dst);
2655 } else {
2656 m_jit.link(iter->from, m_jit.label());
2657 emitGetPutArg(src1, 0, X86::ecx);
2658 emitPutArg(X86::edx, 4);
2659 emitCTICall(instruction + i, i, Machine::cti_op_bitand);
2660 emitPutResult(dst);
2661 }
2662 i += 5;
2663 break;
2664 }
2665 case op_jtrue: {
2666 m_jit.link(iter->from, m_jit.label());
2667 emitPutArg(X86::eax, 0);
2668 emitCTICall(instruction + i, i, Machine::cti_op_jtrue);
2669 m_jit.testl_rr(X86::eax, X86::eax);
2670 unsigned target = instruction[i + 2].u.operand;
2671 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 2 + target]);
2672 i += 3;
2673 break;
2674 }
2675 case op_post_dec: {
2676 unsigned srcDst = instruction[i + 2].u.operand;
2677 m_jit.link(iter->from, m_jit.label());
2678 m_jit.link((++iter)->from, m_jit.label());
2679 emitPutArg(X86::eax, 0);
2680 emitCTICall(instruction + i, i, Machine::cti_op_post_dec);
2681 emitPutResult(instruction[i + 1].u.operand);
2682 emitPutResult(srcDst, X86::edx);
2683 i += 3;
2684 break;
2685 }
2686 case op_bitxor: {
2687 m_jit.link(iter->from, m_jit.label());
2688 emitPutArg(X86::eax, 0);
2689 emitPutArg(X86::edx, 4);
2690 emitCTICall(instruction + i, i, Machine::cti_op_bitxor);
2691 emitPutResult(instruction[i + 1].u.operand);
2692 i += 5;
2693 break;
2694 }
2695 case op_bitor: {
2696 m_jit.link(iter->from, m_jit.label());
2697 emitPutArg(X86::eax, 0);
2698 emitPutArg(X86::edx, 4);
2699 emitCTICall(instruction + i, i, Machine::cti_op_bitor);
2700 emitPutResult(instruction[i + 1].u.operand);
2701 i += 5;
2702 break;
2703 }
2704 case op_eq: {
2705 m_jit.link(iter->from, m_jit.label());
2706 emitPutArg(X86::eax, 0);
2707 emitPutArg(X86::edx, 4);
2708 emitCTICall(instruction + i, i, Machine::cti_op_eq);
2709 emitPutResult(instruction[i + 1].u.operand);
2710 i += 4;
2711 break;
2712 }
2713 case op_neq: {
2714 m_jit.link(iter->from, m_jit.label());
2715 emitPutArg(X86::eax, 0);
2716 emitPutArg(X86::edx, 4);
2717 emitCTICall(instruction + i, i, Machine::cti_op_neq);
2718 emitPutResult(instruction[i + 1].u.operand);
2719 i += 4;
2720 break;
2721 }
2722 CTI_COMPILE_BINARY_OP_SLOW_CASE(op_stricteq);
2723 CTI_COMPILE_BINARY_OP_SLOW_CASE(op_nstricteq);
2724 case op_instanceof: {
2725 m_jit.link(iter->from, m_jit.label());
2726 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
2727 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
2728 emitGetPutArg(instruction[i + 4].u.operand, 8, X86::ecx);
2729 emitCTICall(instruction + i, i, Machine::cti_op_instanceof);
2730 emitPutResult(instruction[i + 1].u.operand);
2731 i += 5;
2732 break;
2733 }
2734 case op_mod: {
2735 X86Assembler::JmpSrc notImm1 = iter->from;
2736 X86Assembler::JmpSrc notImm2 = (++iter)->from;
2737 m_jit.link((++iter)->from, m_jit.label());
2738 emitFastArithReTagImmediate(X86::eax);
2739 emitFastArithReTagImmediate(X86::ecx);
2740 m_jit.link(notImm1, m_jit.label());
2741 m_jit.link(notImm2, m_jit.label());
2742 emitPutArg(X86::eax, 0);
2743 emitPutArg(X86::ecx, 4);
2744 emitCTICall(instruction + i, i, Machine::cti_op_mod);
2745 emitPutResult(instruction[i + 1].u.operand);
2746 i += 4;
2747 break;
2748 }
2749 case op_mul: {
2750 int dst = instruction[i + 1].u.operand;
2751 int src1 = instruction[i + 2].u.operand;
2752 int src2 = instruction[i + 3].u.operand;
2753 JSValue* src1Value = getConstantImmediateNumericArg(src1);
2754 JSValue* src2Value = getConstantImmediateNumericArg(src2);
2755 int32_t value;
2756 if (src1Value && ((value = JSImmediate::intValue(src1Value)) > 0)) {
2757 m_jit.link(iter->from, m_jit.label());
2758 // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
2759 emitGetPutArg(src1, 0, X86::ecx);
2760 emitGetPutArg(src2, 4, X86::ecx);
2761 emitCTICall(instruction + i, i, Machine::cti_op_mul);
2762 emitPutResult(dst);
2763 } else if (src2Value && ((value = JSImmediate::intValue(src2Value)) > 0)) {
2764 m_jit.link(iter->from, m_jit.label());
2765 // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
2766 emitGetPutArg(src1, 0, X86::ecx);
2767 emitGetPutArg(src2, 4, X86::ecx);
2768 emitCTICall(instruction + i, i, Machine::cti_op_mul);
2769 emitPutResult(dst);
2770 } else
2771 compileBinaryArithOpSlowCase(instruction + i, op_mul, iter, dst, src1, src2, OperandTypes::fromInt(instruction[i + 4].u.operand), i);
2772 i += 5;
2773 break;
2774 }
2775
2776 case op_call:
2777 case op_call_eval:
2778 case op_construct: {
2779 int dst = instruction[i + 1].u.operand;
2780 int callee = instruction[i + 2].u.operand;
2781 int argCount = instruction[i + 5].u.operand;
2782
2783 m_jit.link(iter->from, m_jit.label());
2784
2785 // The arguments have been set up on the hot path for op_call_eval
2786 if (opcodeID != op_call_eval)
2787 compileOpCallSetupArgs(instruction + i, (opcodeID == op_construct), false);
2788
2789 // Fast check for JS function.
2790 m_jit.testl_i32r(JSImmediate::TagMask, X86::ecx);
2791 X86Assembler::JmpSrc callLinkFailNotObject = m_jit.emitUnlinkedJne();
2792 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsFunctionVptr), X86::ecx);
2793 X86Assembler::JmpSrc callLinkFailNotJSFunction = m_jit.emitUnlinkedJne();
2794
2795 // This handles JSFunctions
2796 emitCTICall(instruction + i, i, (opcodeID == op_construct) ? Machine::cti_op_construct_JSConstruct : Machine::cti_op_call_JSFunction);
2797 // initialize the new call frame (pointed to by edx, after the last call), then set edi to point to it.
2798 compileOpCallInitializeCallFrame(callee, argCount);
2799 m_jit.movl_rr(X86::edx, X86::edi);
2800
2801 // Try to link & repatch this call.
2802 CallLinkInfo* info = &(m_codeBlock->callLinkInfos[callLinkInfoIndex]);
2803 emitPutArgConstant(reinterpret_cast<unsigned>(info), 4);
2804 m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation =
2805 emitCTICall(instruction + i, i, Machine::cti_vm_lazyLinkCall);
2806 emitNakedCall(i, X86::eax);
2807 X86Assembler::JmpSrc storeResultForFirstRun = m_jit.emitUnlinkedJmp();
2808
2809 // This is the address for the cold path *after* the first run (which tries to link the call).
2810 m_callStructureStubCompilationInfo[callLinkInfoIndex].coldPathOther = m_jit.label();
2811
2812 // The arguments have been set up on the hot path for op_call_eval
2813 if (opcodeID != op_call_eval)
2814 compileOpCallSetupArgs(instruction + i, (opcodeID == op_construct), false);
2815
2816 // Check for JSFunctions.
2817 m_jit.testl_i32r(JSImmediate::TagMask, X86::ecx);
2818 X86Assembler::JmpSrc isNotObject = m_jit.emitUnlinkedJne();
2819 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsFunctionVptr), X86::ecx);
2820 X86Assembler::JmpSrc isJSFunction = m_jit.emitUnlinkedJe();
2821
2822 // This handles host functions
2823 X86Assembler::JmpDst notJSFunctionlabel = m_jit.label();
2824 m_jit.link(isNotObject, notJSFunctionlabel);
2825 m_jit.link(callLinkFailNotObject, notJSFunctionlabel);
2826 m_jit.link(callLinkFailNotJSFunction, notJSFunctionlabel);
2827 emitCTICall(instruction + i, i, ((opcodeID == op_construct) ? Machine::cti_op_construct_NotJSConstruct : Machine::cti_op_call_NotJSFunction));
2828 X86Assembler::JmpSrc wasNotJSFunction = m_jit.emitUnlinkedJmp();
2829
2830 // Next, handle JSFunctions...
2831 m_jit.link(isJSFunction, m_jit.label());
2832 emitCTICall(instruction + i, i, (opcodeID == op_construct) ? Machine::cti_op_construct_JSConstruct : Machine::cti_op_call_JSFunction);
2833 // initialize the new call frame (pointed to by edx, after the last call).
2834 compileOpCallInitializeCallFrame(callee, argCount);
2835 m_jit.movl_rr(X86::edx, X86::edi);
2836
2837 // load ctiCode from the new codeBlock.
2838 m_jit.movl_mr(OBJECT_OFFSET(CodeBlock, ctiCode), X86::eax, X86::eax);
2839
2840 // Move the new callframe into edi.
2841 m_jit.movl_rr(X86::edx, X86::edi);
2842
2843 // Check the ctiCode has been generated (if not compile it now), and make the call.
2844 m_jit.testl_rr(X86::eax, X86::eax);
2845 X86Assembler::JmpSrc hasCode = m_jit.emitUnlinkedJne();
2846 emitCTICall(instruction + i, i, Machine::cti_vm_compile);
2847 m_jit.link(hasCode, m_jit.label());
2848
2849 emitNakedCall(i, X86::eax);
2850
2851 // Put the return value in dst. In the interpreter, op_ret does this.
2852 X86Assembler::JmpDst storeResult = m_jit.label();
2853 m_jit.link(wasNotJSFunction, storeResult);
2854 m_jit.link(storeResultForFirstRun, storeResult);
2855 emitPutResult(dst);
2856
2857#if ENABLE(CODEBLOCK_SAMPLING)
2858 m_jit.movl_i32m(reinterpret_cast<unsigned>(m_codeBlock), m_machine->sampler()->codeBlockSlot());
2859#endif
2860 ++callLinkInfoIndex;
2861
2862 i += 7;
2863 break;
2864 }
2865 case op_to_jsnumber: {
2866 m_jit.link(iter->from, m_jit.label());
2867 m_jit.link(iter->from, m_jit.label());
2868
2869 emitPutArg(X86::eax, 0);
2870 emitCTICall(instruction + i, i, Machine::cti_op_to_jsnumber);
2871
2872 emitPutResult(instruction[i + 1].u.operand);
2873 i += 3;
2874 break;
2875 }
2876
2877 default:
2878 ASSERT_NOT_REACHED();
2879 break;
2880 }
2881
2882 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i]);
2883 }
2884
2885 ASSERT(propertyAccessInstructionIndex == m_codeBlock->propertyAccessInstructions.size());
2886 ASSERT(callLinkInfoIndex == m_codeBlock->callLinkInfos.size());
2887}
2888
2889void CTI::privateCompile()
2890{
2891#if ENABLE(CODEBLOCK_SAMPLING)
2892 m_jit.movl_i32m(reinterpret_cast<unsigned>(m_codeBlock), m_machine->sampler()->codeBlockSlot());
2893#endif
2894#if ENABLE(OPCODE_SAMPLING)
2895 m_jit.movl_i32m(m_machine->sampler()->encodeSample(m_codeBlock->instructions.begin()), m_machine->sampler()->sampleSlot());
2896#endif
2897
2898 // Could use a popl_m, but would need to offset the following instruction if so.
2899 m_jit.popl_r(X86::ecx);
2900 emitPutToCallFrameHeader(X86::ecx, RegisterFile::ReturnPC);
2901
2902 X86Assembler::JmpSrc slowRegisterFileCheck;
2903 X86Assembler::JmpDst afterRegisterFileCheck;
2904 if (m_codeBlock->codeType == FunctionCode) {
2905 // In the case of a fast linked call, we do not set this up in the caller.
2906 m_jit.movl_i32m(reinterpret_cast<unsigned>(m_codeBlock), RegisterFile::CodeBlock * static_cast<int>(sizeof(Register)), X86::edi);
2907
2908 emitGetCTIParam(CTI_ARGS_registerFile, X86::eax);
2909 m_jit.leal_mr(m_codeBlock->numCalleeRegisters * sizeof(Register), X86::edi, X86::edx);
2910 m_jit.cmpl_mr(OBJECT_OFFSET(RegisterFile, m_end), X86::eax, X86::edx);
2911 slowRegisterFileCheck = m_jit.emitUnlinkedJg();
2912 afterRegisterFileCheck = m_jit.label();
2913 }
2914
2915 privateCompileMainPass();
2916 privateCompileLinkPass();
2917 privateCompileSlowCases();
2918
2919 if (m_codeBlock->codeType == FunctionCode) {
2920 m_jit.link(slowRegisterFileCheck, m_jit.label());
2921 emitCTICall(m_codeBlock->instructions.begin(), 0, Machine::cti_register_file_check);
2922 X86Assembler::JmpSrc backToBody = m_jit.emitUnlinkedJmp();
2923 m_jit.link(backToBody, afterRegisterFileCheck);
2924 }
2925
2926 ASSERT(m_jmpTable.isEmpty());
2927
2928 void* code = m_jit.copy();
2929 ASSERT(code);
2930
2931 // Translate vPC offsets into addresses in JIT generated code, for switch tables.
2932 for (unsigned i = 0; i < m_switches.size(); ++i) {
2933 SwitchRecord record = m_switches[i];
2934 unsigned opcodeIndex = record.m_opcodeIndex;
2935
2936 if (record.m_type != SwitchRecord::String) {
2937 ASSERT(record.m_type == SwitchRecord::Immediate || record.m_type == SwitchRecord::Character);
2938 ASSERT(record.m_jumpTable.m_simpleJumpTable->branchOffsets.size() == record.m_jumpTable.m_simpleJumpTable->ctiOffsets.size());
2939
2940 record.m_jumpTable.m_simpleJumpTable->ctiDefault = m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + record.m_defaultOffset]);
2941
2942 for (unsigned j = 0; j < record.m_jumpTable.m_simpleJumpTable->branchOffsets.size(); ++j) {
2943 unsigned offset = record.m_jumpTable.m_simpleJumpTable->branchOffsets[j];
2944 record.m_jumpTable.m_simpleJumpTable->ctiOffsets[j] = offset ? m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + offset]) : record.m_jumpTable.m_simpleJumpTable->ctiDefault;
2945 }
2946 } else {
2947 ASSERT(record.m_type == SwitchRecord::String);
2948
2949 record.m_jumpTable.m_stringJumpTable->ctiDefault = m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + record.m_defaultOffset]);
2950
2951 StringJumpTable::StringOffsetTable::iterator end = record.m_jumpTable.m_stringJumpTable->offsetTable.end();
2952 for (StringJumpTable::StringOffsetTable::iterator it = record.m_jumpTable.m_stringJumpTable->offsetTable.begin(); it != end; ++it) {
2953 unsigned offset = it->second.branchOffset;
2954 it->second.ctiOffset = offset ? m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + offset]) : record.m_jumpTable.m_stringJumpTable->ctiDefault;
2955 }
2956 }
2957 }
2958
2959 for (Vector<HandlerInfo>::iterator iter = m_codeBlock->exceptionHandlers.begin(); iter != m_codeBlock->exceptionHandlers.end(); ++iter)
2960 iter->nativeCode = m_jit.getRelocatedAddress(code, m_labels[iter->target]);
2961
2962 for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
2963 if (iter->to)
2964 X86Assembler::link(code, iter->from, iter->to);
2965 m_codeBlock->ctiReturnAddressVPCMap.add(m_jit.getRelocatedAddress(code, iter->from), iter->opcodeIndex);
2966 }
2967
2968 // Link absolute addresses for jsr
2969 for (Vector<JSRInfo>::iterator iter = m_jsrSites.begin(); iter != m_jsrSites.end(); ++iter)
2970 X86Assembler::linkAbsoluteAddress(code, iter->addrPosition, iter->target);
2971
2972 for (unsigned i = 0; i < m_codeBlock->propertyAccessInstructions.size(); ++i) {
2973 StructureStubInfo& info = m_codeBlock->propertyAccessInstructions[i];
2974 info.callReturnLocation = X86Assembler::getRelocatedAddress(code, m_propertyAccessCompilationInfo[i].callReturnLocation);
2975 info.hotPathBegin = X86Assembler::getRelocatedAddress(code, m_propertyAccessCompilationInfo[i].hotPathBegin);
2976 }
2977 for (unsigned i = 0; i < m_codeBlock->callLinkInfos.size(); ++i) {
2978 CallLinkInfo& info = m_codeBlock->callLinkInfos[i];
2979 info.callReturnLocation = X86Assembler::getRelocatedAddress(code, m_callStructureStubCompilationInfo[i].callReturnLocation);
2980 info.hotPathBegin = X86Assembler::getRelocatedAddress(code, m_callStructureStubCompilationInfo[i].hotPathBegin);
2981 info.hotPathOther = X86Assembler::getRelocatedAddress(code, m_callStructureStubCompilationInfo[i].hotPathOther);
2982 info.coldPathOther = X86Assembler::getRelocatedAddress(code, m_callStructureStubCompilationInfo[i].coldPathOther);
2983 }
2984
2985 m_codeBlock->ctiCode = code;
2986}
2987
2988void CTI::privateCompileGetByIdSelf(StructureID* structureID, size_t cachedOffset, void* returnAddress)
2989{
2990 // Check eax is an object of the right StructureID.
2991 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2992 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2993 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2994 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2995
2996 // Checks out okay! - getDirectOffset
2997 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
2998 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::eax, X86::eax);
2999 m_jit.ret();
3000
3001 void* code = m_jit.copy();
3002 ASSERT(code);
3003
3004 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
3005 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
3006
3007 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
3008
3009 ctiRepatchCallByReturnAddress(returnAddress, code);
3010}
3011
3012void CTI::privateCompileGetByIdProto(StructureID* structureID, StructureID* prototypeStructureID, size_t cachedOffset, void* returnAddress)
3013{
3014#if USE(CTI_REPATCH_PIC)
3015 StructureStubInfo& info = m_codeBlock->getStubInfo(returnAddress);
3016
3017 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
3018 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
3019
3020 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a StructureID that is
3021 // referencing the prototype object - let's speculatively load it's table nice and early!)
3022 JSObject* protoObject = asObject(structureID->prototypeForLookup(m_callFrame));
3023 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
3024 m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
3025
3026 // check eax is an object of the right StructureID.
3027 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
3028 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
3029 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
3030 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
3031
3032 // Check the prototype object's StructureID had not changed.
3033 StructureID** protoStructureIDAddress = &(protoObject->m_structureID);
3034 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(prototypeStructureID), static_cast<void*>(protoStructureIDAddress));
3035 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJne();
3036
3037 // Checks out okay! - getDirectOffset
3038 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::ecx);
3039
3040 X86Assembler::JmpSrc success = m_jit.emitUnlinkedJmp();
3041
3042 void* code = m_jit.copy();
3043 ASSERT(code);
3044
3045 // Use the repatch information to link the failure cases back to the original slow case routine.
3046 void* slowCaseBegin = reinterpret_cast<char*>(info.callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
3047 X86Assembler::link(code, failureCases1, slowCaseBegin);
3048 X86Assembler::link(code, failureCases2, slowCaseBegin);
3049 X86Assembler::link(code, failureCases3, slowCaseBegin);
3050
3051 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
3052 intptr_t successDest = (intptr_t)(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
3053 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
3054
3055 // Track the stub we have created so that it will be deleted later.
3056 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
3057
3058 // Finally repatch the jump to sow case back in the hot path to jump here instead.
3059 // FIXME: should revert this repatching, on failure.
3060 intptr_t jmpLocation = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
3061 X86Assembler::repatchBranchOffset(jmpLocation, code);
3062#else
3063 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a StructureID that is
3064 // referencing the prototype object - let's speculatively load it's table nice and early!)
3065 JSObject* protoObject = asObject(structureID->prototypeForLookup(m_callFrame));
3066 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
3067 m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
3068
3069 // check eax is an object of the right StructureID.
3070 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
3071 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
3072 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
3073 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
3074
3075 // Check the prototype object's StructureID had not changed.
3076 StructureID** protoStructureIDAddress = &(protoObject->m_structureID);
3077 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(prototypeStructureID), static_cast<void*>(protoStructureIDAddress));
3078 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJne();
3079
3080 // Checks out okay! - getDirectOffset
3081 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
3082
3083 m_jit.ret();
3084
3085 void* code = m_jit.copy();
3086 ASSERT(code);
3087
3088 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
3089 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
3090 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
3091
3092 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
3093
3094 ctiRepatchCallByReturnAddress(returnAddress, code);
3095#endif
3096}
3097
3098void CTI::privateCompileGetByIdChain(StructureID* structureID, StructureIDChain* chain, size_t count, size_t cachedOffset, void* returnAddress)
3099{
3100 ASSERT(count);
3101
3102 Vector<X86Assembler::JmpSrc> bucketsOfFail;
3103
3104 // Check eax is an object of the right StructureID.
3105 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
3106 bucketsOfFail.append(m_jit.emitUnlinkedJne());
3107 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
3108 bucketsOfFail.append(m_jit.emitUnlinkedJne());
3109
3110 StructureID* currStructureID = structureID;
3111 RefPtr<StructureID>* chainEntries = chain->head();
3112 JSObject* protoObject = 0;
3113 for (unsigned i = 0; i<count; ++i) {
3114 protoObject = asObject(currStructureID->prototypeForLookup(m_callFrame));
3115 currStructureID = chainEntries[i].get();
3116
3117 // Check the prototype object's StructureID had not changed.
3118 StructureID** protoStructureIDAddress = &(protoObject->m_structureID);
3119 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(currStructureID), static_cast<void*>(protoStructureIDAddress));
3120 bucketsOfFail.append(m_jit.emitUnlinkedJne());
3121 }
3122 ASSERT(protoObject);
3123
3124 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
3125 m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
3126 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
3127 m_jit.ret();
3128
3129 bucketsOfFail.append(m_jit.emitUnlinkedJmp());
3130
3131 void* code = m_jit.copy();
3132 ASSERT(code);
3133
3134 for (unsigned i = 0; i < bucketsOfFail.size(); ++i)
3135 X86Assembler::link(code, bucketsOfFail[i], reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
3136
3137 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
3138
3139 ctiRepatchCallByReturnAddress(returnAddress, code);
3140}
3141
3142void CTI::privateCompilePutByIdReplace(StructureID* structureID, size_t cachedOffset, void* returnAddress)
3143{
3144 // check eax is an object of the right StructureID.
3145 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
3146 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
3147 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
3148 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
3149
3150 // checks out okay! - putDirectOffset
3151 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
3152 m_jit.movl_rm(X86::edx, cachedOffset * sizeof(JSValue*), X86::eax);
3153 m_jit.ret();
3154
3155 void* code = m_jit.copy();
3156 ASSERT(code);
3157
3158 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_put_by_id_fail));
3159 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_put_by_id_fail));
3160
3161 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
3162
3163 ctiRepatchCallByReturnAddress(returnAddress, code);
3164}
3165
3166extern "C" {
3167
3168 static JSObject* resizePropertyStorage(JSObject* baseObject, size_t oldSize, size_t newSize)
3169 {
3170 baseObject->allocatePropertyStorageInline(oldSize, newSize);
3171 return baseObject;
3172 }
3173
3174}
3175
3176static inline bool transitionWillNeedStorageRealloc(StructureID* oldStructureID, StructureID* newStructureID)
3177{
3178 return oldStructureID->propertyStorageCapacity() != newStructureID->propertyStorageCapacity();
3179}
3180
3181void CTI::privateCompilePutByIdTransition(StructureID* oldStructureID, StructureID* newStructureID, size_t cachedOffset, StructureIDChain* sIDC, void* returnAddress)
3182{
3183 Vector<X86Assembler::JmpSrc, 16> failureCases;
3184 // check eax is an object of the right StructureID.
3185 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
3186 failureCases.append(m_jit.emitUnlinkedJne());
3187 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(oldStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
3188 failureCases.append(m_jit.emitUnlinkedJne());
3189 Vector<X86Assembler::JmpSrc> successCases;
3190
3191 // ecx = baseObject
3192 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
3193 // proto(ecx) = baseObject->structureID()->prototype()
3194 m_jit.cmpl_i32m(ObjectType, OBJECT_OFFSET(StructureID, m_typeInfo) + OBJECT_OFFSET(TypeInfo, m_type), X86::ecx);
3195 failureCases.append(m_jit.emitUnlinkedJne());
3196 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_prototype), X86::ecx, X86::ecx);
3197
3198 // ecx = baseObject->m_structureID
3199 for (RefPtr<StructureID>* it = sIDC->head(); *it; ++it) {
3200 // null check the prototype
3201 m_jit.cmpl_i32r(asInteger(jsNull()), X86::ecx);
3202 successCases.append(m_jit.emitUnlinkedJe());
3203
3204 // Check the structure id
3205 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(it->get()), OBJECT_OFFSET(JSCell, m_structureID), X86::ecx);
3206 failureCases.append(m_jit.emitUnlinkedJne());
3207
3208 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::ecx, X86::ecx);
3209 m_jit.cmpl_i32m(ObjectType, OBJECT_OFFSET(StructureID, m_typeInfo) + OBJECT_OFFSET(TypeInfo, m_type), X86::ecx);
3210 failureCases.append(m_jit.emitUnlinkedJne());
3211 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_prototype), X86::ecx, X86::ecx);
3212 }
3213
3214 failureCases.append(m_jit.emitUnlinkedJne());
3215 for (unsigned i = 0; i < successCases.size(); ++i)
3216 m_jit.link(successCases[i], m_jit.label());
3217
3218 X86Assembler::JmpSrc callTarget;
3219
3220 // emit a call only if storage realloc is needed
3221 if (transitionWillNeedStorageRealloc(oldStructureID, newStructureID)) {
3222 m_jit.pushl_r(X86::edx);
3223 m_jit.pushl_i32(newStructureID->propertyStorageCapacity());
3224 m_jit.pushl_i32(oldStructureID->propertyStorageCapacity());
3225 m_jit.pushl_r(X86::eax);
3226 callTarget = m_jit.emitCall();
3227 m_jit.addl_i32r(3 * sizeof(void*), X86::esp);
3228 m_jit.popl_r(X86::edx);
3229 }
3230
3231 // Assumes m_refCount can be decremented easily, refcount decrement is safe as
3232 // codeblock should ensure oldStructureID->m_refCount > 0
3233 m_jit.subl_i8m(1, reinterpret_cast<void*>(oldStructureID));
3234 m_jit.addl_i8m(1, reinterpret_cast<void*>(newStructureID));
3235 m_jit.movl_i32m(reinterpret_cast<uint32_t>(newStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
3236
3237 // write the value
3238 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
3239 m_jit.movl_rm(X86::edx, cachedOffset * sizeof(JSValue*), X86::eax);
3240
3241 m_jit.ret();
3242
3243 X86Assembler::JmpSrc failureJump;
3244 if (failureCases.size()) {
3245 for (unsigned i = 0; i < failureCases.size(); ++i)
3246 m_jit.link(failureCases[i], m_jit.label());
3247 m_jit.emitRestoreArgumentReferenceForTrampoline();
3248 failureJump = m_jit.emitUnlinkedJmp();
3249 }
3250
3251 void* code = m_jit.copy();
3252 ASSERT(code);
3253
3254 if (failureCases.size())
3255 X86Assembler::link(code, failureJump, reinterpret_cast<void*>(Machine::cti_op_put_by_id_fail));
3256
3257 if (transitionWillNeedStorageRealloc(oldStructureID, newStructureID))
3258 X86Assembler::link(code, callTarget, reinterpret_cast<void*>(resizePropertyStorage));
3259
3260 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
3261
3262 ctiRepatchCallByReturnAddress(returnAddress, code);
3263}
3264
3265void CTI::unlinkCall(CallLinkInfo* callLinkInfo)
3266{
3267 // When the JSFunction is deleted the pointer embedded in the instruction stream will no longer be valid
3268 // (and, if a new JSFunction happened to be constructed at the same location, we could get a false positive
3269 // match). Reset the check so it no longer matches.
3270 reinterpret_cast<void**>(callLinkInfo->hotPathBegin)[-1] = asPointer(JSImmediate::impossibleValue());
3271}
3272
3273void CTI::linkCall(JSFunction* callee, CodeBlock* calleeCodeBlock, void* ctiCode, CallLinkInfo* callLinkInfo, int callerArgCount)
3274{
3275 // Currently we only link calls with the exact number of arguments.
3276 if (callerArgCount == calleeCodeBlock->numParameters) {
3277 ASSERT(!callLinkInfo->isLinked());
3278
3279 calleeCodeBlock->addCaller(callLinkInfo);
3280
3281 reinterpret_cast<void**>(callLinkInfo->hotPathBegin)[-1] = callee;
3282 ctiRepatchCallByReturnAddress(callLinkInfo->hotPathOther, ctiCode);
3283 }
3284
3285 // repatch the instruction that jumps out to the cold path, so that we only try to link once.
3286 void* repatchCheck = reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(callLinkInfo->hotPathBegin) + repatchOffsetOpCallCall);
3287 ctiRepatchCallByReturnAddress(repatchCheck, callLinkInfo->coldPathOther);
3288}
3289
3290void* CTI::privateCompileArrayLengthTrampoline()
3291{
3292 // Check eax is an array
3293 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
3294 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
3295 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
3296 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
3297
3298 // Checks out okay! - get the length from the storage
3299 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::eax);
3300 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_length), X86::eax, X86::eax);
3301
3302 m_jit.addl_rr(X86::eax, X86::eax);
3303 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJo();
3304 m_jit.addl_i8r(1, X86::eax);
3305
3306 m_jit.ret();
3307
3308 void* code = m_jit.copy();
3309 ASSERT(code);
3310
3311 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
3312 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
3313 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
3314
3315 return code;
3316}
3317
3318void* CTI::privateCompileStringLengthTrampoline()
3319{
3320 // Check eax is a string
3321 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
3322 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
3323 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsStringVptr), X86::eax);
3324 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
3325
3326 // Checks out okay! - get the length from the Ustring.
3327 m_jit.movl_mr(OBJECT_OFFSET(JSString, m_value) + OBJECT_OFFSET(UString, m_rep), X86::eax, X86::eax);
3328 m_jit.movl_mr(OBJECT_OFFSET(UString::Rep, len), X86::eax, X86::eax);
3329
3330 m_jit.addl_rr(X86::eax, X86::eax);
3331 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJo();
3332 m_jit.addl_i8r(1, X86::eax);
3333
3334 m_jit.ret();
3335
3336 void* code = m_jit.copy();
3337 ASSERT(code);
3338
3339 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
3340 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
3341 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
3342
3343 return code;
3344}
3345
3346void CTI::patchGetByIdSelf(CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset, void* returnAddress)
3347{
3348 StructureStubInfo& info = codeBlock->getStubInfo(returnAddress);
3349
3350 // We don't want to repatch more than once - in future go to cti_op_get_by_id_generic.
3351 // Should probably go to Machine::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
3352 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Machine::cti_op_get_by_id_generic));
3353
3354 // Repatch the offset into the propoerty map to load from, then repatch the StructureID to look for.
3355 X86Assembler::repatchDisplacement(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset, cachedOffset * sizeof(JSValue*));
3356 X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdStructureID, reinterpret_cast<uint32_t>(structureID));
3357}
3358
3359void CTI::patchPutByIdReplace(CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset, void* returnAddress)
3360{
3361 StructureStubInfo& info = codeBlock->getStubInfo(returnAddress);
3362
3363 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
3364 // Should probably go to Machine::cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
3365 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Machine::cti_op_put_by_id_generic));
3366
3367 // Repatch the offset into the propoerty map to load from, then repatch the StructureID to look for.
3368 X86Assembler::repatchDisplacement(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetPutByIdPropertyMapOffset, cachedOffset * sizeof(JSValue*));
3369 X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetPutByIdStructureID, reinterpret_cast<uint32_t>(structureID));
3370}
3371
3372void CTI::privateCompilePatchGetArrayLength(void* returnAddress)
3373{
3374 StructureStubInfo& info = m_codeBlock->getStubInfo(returnAddress);
3375
3376 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
3377 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
3378
3379 // Check eax is an array
3380 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
3381 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
3382 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
3383 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
3384
3385 // Checks out okay! - get the length from the storage
3386 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
3387 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_length), X86::ecx, X86::ecx);
3388
3389 m_jit.addl_rr(X86::ecx, X86::ecx);
3390 X86Assembler::JmpSrc failureClobberedECX = m_jit.emitUnlinkedJo();
3391 m_jit.addl_i8r(1, X86::ecx);
3392
3393 X86Assembler::JmpSrc success = m_jit.emitUnlinkedJmp();
3394
3395 m_jit.link(failureClobberedECX, m_jit.label());
3396 m_jit.emitRestoreArgumentReference();
3397 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJmp();
3398
3399 void* code = m_jit.copy();
3400 ASSERT(code);
3401
3402 // Use the repatch information to link the failure cases back to the original slow case routine.
3403 void* slowCaseBegin = reinterpret_cast<char*>(info.callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
3404 X86Assembler::link(code, failureCases1, slowCaseBegin);
3405 X86Assembler::link(code, failureCases2, slowCaseBegin);
3406 X86Assembler::link(code, failureCases3, slowCaseBegin);
3407
3408 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
3409 intptr_t successDest = (intptr_t)(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
3410 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
3411
3412 // Track the stub we have created so that it will be deleted later.
3413 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
3414
3415 // Finally repatch the jump to sow case back in the hot path to jump here instead.
3416 // FIXME: should revert this repatching, on failure.
3417 intptr_t jmpLocation = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
3418 X86Assembler::repatchBranchOffset(jmpLocation, code);
3419}
3420
3421void CTI::emitGetVariableObjectRegister(X86Assembler::RegisterID variableObject, int index, X86Assembler::RegisterID dst)
3422{
3423 m_jit.movl_mr(JSVariableObject::offsetOf_d(), variableObject, dst);
3424 m_jit.movl_mr(JSVariableObject::offsetOf_Data_registers(), dst, dst);
3425 m_jit.movl_mr(index * sizeof(Register), dst, dst);
3426}
3427
3428void CTI::emitPutVariableObjectRegister(X86Assembler::RegisterID src, X86Assembler::RegisterID variableObject, int index)
3429{
3430 m_jit.movl_mr(JSVariableObject::offsetOf_d(), variableObject, variableObject);
3431 m_jit.movl_mr(JSVariableObject::offsetOf_Data_registers(), variableObject, variableObject);
3432 m_jit.movl_rm(src, index * sizeof(Register), variableObject);
3433}
3434
3435#if ENABLE(WREC)
3436
3437void* CTI::compileRegExp(Machine* machine, const UString& pattern, unsigned* numSubpatterns_ptr, const char** error_ptr, bool ignoreCase, bool multiline)
3438{
3439 // TODO: better error messages
3440 if (pattern.size() > MaxPatternSize) {
3441 *error_ptr = "regular expression too large";
3442 return 0;
3443 }
3444
3445 X86Assembler jit(machine->jitCodeBuffer());
3446 WRECParser parser(pattern, ignoreCase, multiline, jit);
3447
3448 jit.emitConvertToFastCall();
3449 // (0) Setup:
3450 // Preserve regs & initialize outputRegister.
3451 jit.pushl_r(WRECGenerator::outputRegister);
3452 jit.pushl_r(WRECGenerator::currentValueRegister);
3453 // push pos onto the stack, both to preserve and as a parameter available to parseDisjunction
3454 jit.pushl_r(WRECGenerator::currentPositionRegister);
3455 // load output pointer
3456 jit.movl_mr(16
3457#if COMPILER(MSVC)
3458 + 3 * sizeof(void*)
3459#endif
3460 , X86::esp, WRECGenerator::outputRegister);
3461
3462 // restart point on match fail.
3463 WRECGenerator::JmpDst nextLabel = jit.label();
3464
3465 // (1) Parse Disjunction:
3466
3467 // Parsing the disjunction should fully consume the pattern.
3468 JmpSrcVector failures;
3469 parser.parseDisjunction(failures);
3470 if (parser.isEndOfPattern()) {
3471 parser.m_err = WRECParser::Error_malformedPattern;
3472 }
3473 if (parser.m_err) {
3474 // TODO: better error messages
3475 *error_ptr = "TODO: better error messages";
3476 return 0;
3477 }
3478
3479 // (2) Success:
3480 // Set return value & pop registers from the stack.
3481
3482 jit.testl_rr(WRECGenerator::outputRegister, WRECGenerator::outputRegister);
3483 WRECGenerator::JmpSrc noOutput = jit.emitUnlinkedJe();
3484
3485 jit.movl_rm(WRECGenerator::currentPositionRegister, 4, WRECGenerator::outputRegister);
3486 jit.popl_r(X86::eax);
3487 jit.movl_rm(X86::eax, WRECGenerator::outputRegister);
3488 jit.popl_r(WRECGenerator::currentValueRegister);
3489 jit.popl_r(WRECGenerator::outputRegister);
3490 jit.ret();
3491
3492 jit.link(noOutput, jit.label());
3493
3494 jit.popl_r(X86::eax);
3495 jit.movl_rm(X86::eax, WRECGenerator::outputRegister);
3496 jit.popl_r(WRECGenerator::currentValueRegister);
3497 jit.popl_r(WRECGenerator::outputRegister);
3498 jit.ret();
3499
3500 // (3) Failure:
3501 // All fails link to here. Progress the start point & if it is within scope, loop.
3502 // Otherwise, return fail value.
3503 WRECGenerator::JmpDst here = jit.label();
3504 for (unsigned i = 0; i < failures.size(); ++i)
3505 jit.link(failures[i], here);
3506 failures.clear();
3507
3508 jit.movl_mr(X86::esp, WRECGenerator::currentPositionRegister);
3509 jit.addl_i8r(1, WRECGenerator::currentPositionRegister);
3510 jit.movl_rm(WRECGenerator::currentPositionRegister, X86::esp);
3511 jit.cmpl_rr(WRECGenerator::lengthRegister, WRECGenerator::currentPositionRegister);
3512 jit.link(jit.emitUnlinkedJle(), nextLabel);
3513
3514 jit.addl_i8r(4, X86::esp);
3515
3516 jit.movl_i32r(-1, X86::eax);
3517 jit.popl_r(WRECGenerator::currentValueRegister);
3518 jit.popl_r(WRECGenerator::outputRegister);
3519 jit.ret();
3520
3521 *numSubpatterns_ptr = parser.m_numSubpatterns;
3522
3523 void* code = jit.copy();
3524 ASSERT(code);
3525 return code;
3526}
3527
3528#endif // ENABLE(WREC)
3529
3530} // namespace JSC
3531
3532#endif // ENABLE(CTI)
Note: See TracBrowser for help on using the repository browser.