Changeset 36418 in webkit for trunk/JavaScriptCore
- Timestamp:
- Sep 14, 2008, 7:18:13 PM (17 years ago)
- Location:
- trunk/JavaScriptCore
- Files:
-
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/JavaScriptCore/ChangeLog
r36417 r36418 31 31 property and pass that register to instanceof. 32 32 * kjs/nodes.h: 33 34 2008-09-14 Gavin Barraclough <[email protected]> 35 36 Reviewed by Sam Weinig. 37 38 Accelerated property accesses. 39 40 Inline more of the array access code into the JIT code for get/put_by_val. 41 Accelerate get/put_by_id by speculatively inlining a disable direct access 42 into the hot path of the code, and repatch this with the correct StructureID 43 and property map offset once these are known. In the case of accesses to the 44 prototype and reading the array-length a trampoline is genertaed, and the 45 branch to the slow-case is relinked to jump to this. 46 47 By repatching, we mean rewriting the x86 instruction stream. Instructions are 48 only modified in a simple fasion - altering immediate operands, memory access 49 deisplacements, and branch offsets. 50 51 For regular get_by_id/put_by_id accesses to an object, a StructureID in an 52 instruction's immediate operant is updateded, and a memory access operation's 53 displacement is updated to access the correct field on the object. In the case 54 of more complex accesses (array length and get_by_id_prototype) the offset on 55 the branch to slow-case is updated, to now jump to a trampoline. 56 57 +2.8% sunspider, +13% v8-tests 58 59 * VM/CTI.cpp: 60 (JSC::CTI::emitCall): 61 (JSC::CTI::emitJumpSlowCaseIfNotJSCell): 62 (JSC::CTI::CTI): 63 (JSC::CTI::privateCompileMainPass): 64 (JSC::CTI::privateCompileSlowCases): 65 (JSC::CTI::privateCompile): 66 (JSC::CTI::privateCompileGetByIdSelf): 67 (JSC::CTI::privateCompileGetByIdProto): 68 (JSC::CTI::privateCompileGetByIdChain): 69 (JSC::CTI::privateCompilePutByIdReplace): 70 (JSC::CTI::privateCompilePutByIdTransition): 71 (JSC::CTI::privateCompileArrayLengthTrampoline): 72 (JSC::CTI::privateCompileStringLengthTrampoline): 73 (JSC::CTI::patchGetByIdSelf): 74 (JSC::CTI::patchPutByIdReplace): 75 (JSC::CTI::privateCompilePatchGetArrayLength): 76 (JSC::CTI::privateCompilePatchGetStringLength): 77 * VM/CTI.h: 78 (JSC::CTI::compileGetByIdSelf): 79 (JSC::CTI::compileGetByIdProto): 80 (JSC::CTI::compileGetByIdChain): 81 (JSC::CTI::compilePutByIdReplace): 82 (JSC::CTI::compilePutByIdTransition): 83 (JSC::CTI::compileArrayLengthTrampoline): 84 (JSC::CTI::compileStringLengthTrampoline): 85 (JSC::CTI::compilePatchGetArrayLength): 86 (JSC::CTI::compilePatchGetStringLength): 87 * VM/CodeBlock.cpp: 88 (JSC::CodeBlock::dump): 89 (JSC::CodeBlock::~CodeBlock): 90 * VM/CodeBlock.h: 91 (JSC::StructureStubInfo::StructureStubInfo): 92 (JSC::CodeBlock::getStubInfo): 93 * VM/Machine.cpp: 94 (JSC::Machine::tryCTICachePutByID): 95 (JSC::Machine::tryCTICacheGetByID): 96 (JSC::Machine::cti_op_put_by_val_array): 97 * VM/Machine.h: 98 * masm/X86Assembler.h: 99 (JSC::X86Assembler::): 100 (JSC::X86Assembler::cmpl_i8m): 101 (JSC::X86Assembler::emitUnlinkedJa): 102 (JSC::X86Assembler::getRelocatedAddress): 103 (JSC::X86Assembler::getDifferenceBetweenLabels): 104 (JSC::X86Assembler::emitModRm_opmsib): 33 105 34 106 2008-09-14 Gavin Barraclough <[email protected]> -
trunk/JavaScriptCore/VM/CTI.cpp
r36417 r36418 243 243 #endif 244 244 245 ALWAYS_INLINE voidCTI::emitCall(unsigned opcodeIndex, CTIHelper_j helper)245 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_j helper) 246 246 { 247 247 #if ENABLE(SAMPLING_TOOL) 248 248 m_jit.movl_i32m(1, &inCalledCode); 249 249 #endif 250 m_calls.append(CallRecord(m_jit.emitCall(), helper, opcodeIndex)); 250 X86Assembler::JmpSrc call = m_jit.emitCall(); 251 m_calls.append(CallRecord(call, helper, opcodeIndex)); 251 252 emitDebugExceptionCheck(); 252 253 #if ENABLE(SAMPLING_TOOL) 253 254 m_jit.movl_i32m(0, &inCalledCode); 254 255 #endif 255 } 256 257 ALWAYS_INLINE void CTI::emitCall(unsigned opcodeIndex, CTIHelper_p helper) 256 257 return call; 258 } 259 260 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_p helper) 258 261 { 259 262 #if ENABLE(SAMPLING_TOOL) 260 263 m_jit.movl_i32m(1, &inCalledCode); 261 264 #endif 262 m_calls.append(CallRecord(m_jit.emitCall(), helper, opcodeIndex)); 265 X86Assembler::JmpSrc call = m_jit.emitCall(); 266 m_calls.append(CallRecord(call, helper, opcodeIndex)); 263 267 emitDebugExceptionCheck(); 264 268 #if ENABLE(SAMPLING_TOOL) 265 269 m_jit.movl_i32m(0, &inCalledCode); 266 270 #endif 267 } 268 269 ALWAYS_INLINE void CTI::emitCall(unsigned opcodeIndex, CTIHelper_b helper) 271 272 return call; 273 } 274 275 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_b helper) 270 276 { 271 277 #if ENABLE(SAMPLING_TOOL) 272 278 m_jit.movl_i32m(1, &inCalledCode); 273 279 #endif 274 m_calls.append(CallRecord(m_jit.emitCall(), helper, opcodeIndex)); 280 X86Assembler::JmpSrc call = m_jit.emitCall(); 281 m_calls.append(CallRecord(call, helper, opcodeIndex)); 275 282 emitDebugExceptionCheck(); 276 283 #if ENABLE(SAMPLING_TOOL) 277 284 m_jit.movl_i32m(0, &inCalledCode); 278 285 #endif 279 } 280 281 ALWAYS_INLINE void CTI::emitCall(unsigned opcodeIndex, CTIHelper_v helper) 286 287 return call; 288 } 289 290 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_v helper) 282 291 { 283 292 #if ENABLE(SAMPLING_TOOL) 284 293 m_jit.movl_i32m(1, &inCalledCode); 285 294 #endif 286 m_calls.append(CallRecord(m_jit.emitCall(), helper, opcodeIndex)); 295 X86Assembler::JmpSrc call = m_jit.emitCall(); 296 m_calls.append(CallRecord(call, helper, opcodeIndex)); 287 297 emitDebugExceptionCheck(); 288 298 #if ENABLE(SAMPLING_TOOL) 289 299 m_jit.movl_i32m(0, &inCalledCode); 290 300 #endif 291 } 292 293 ALWAYS_INLINE void CTI::emitCall(unsigned opcodeIndex, CTIHelper_s helper) 301 302 return call; 303 } 304 305 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_s helper) 294 306 { 295 307 #if ENABLE(SAMPLING_TOOL) 296 308 m_jit.movl_i32m(1, &inCalledCode); 297 309 #endif 298 m_calls.append(CallRecord(m_jit.emitCall(), helper, opcodeIndex)); 310 X86Assembler::JmpSrc call = m_jit.emitCall(); 311 m_calls.append(CallRecord(call, helper, opcodeIndex)); 299 312 emitDebugExceptionCheck(); 300 313 #if ENABLE(SAMPLING_TOOL) 301 314 m_jit.movl_i32m(0, &inCalledCode); 302 315 #endif 316 317 return call; 318 } 319 320 ALWAYS_INLINE void CTI::emitJumpSlowCaseIfNotJSCell(X86Assembler::RegisterID reg, unsigned opcodeIndex) 321 { 322 m_jit.testl_i32r(JSImmediate::TagMask, reg); 323 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), opcodeIndex)); 303 324 } 304 325 … … 362 383 , m_codeBlock(codeBlock) 363 384 , m_labels(codeBlock ? codeBlock->instructions.size() : 0) 385 , m_structureStubCompilationInfo(codeBlock ? codeBlock->structureIDInstructions.size() : 0) 364 386 { 365 387 } … … 469 491 Instruction* instruction = m_codeBlock->instructions.begin(); 470 492 unsigned instructionCount = m_codeBlock->instructions.size(); 493 494 unsigned structureIDInstructionIndex = 0; 471 495 472 496 for (unsigned i = 0; i < instructionCount; ) { … … 608 632 } 609 633 case op_put_by_id: { 610 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]); 611 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4); 634 // In order to be able to repatch both the StructureID, and the object offset, we store one pointer, 635 // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code 636 // such that the StructureID & offset are always at the same distance from this. 637 612 638 emitGetArg(instruction[i + 1].u.operand, X86::eax); 613 639 emitGetArg(instruction[i + 3].u.operand, X86::edx); 614 emitPutArg(X86::eax, 0); // leave the base in eax 615 emitPutArg(X86::edx, 8); // leave the base in edx 616 emitCall(i, Machine::cti_op_put_by_id); 640 641 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i); 642 X86Assembler::JmpDst hotPathBegin = m_jit.label(); 643 m_structureStubCompilationInfo[structureIDInstructionIndex].hotPathBegin = hotPathBegin; 644 ++structureIDInstructionIndex; 645 646 // Jump to a slow case if either the base object is an immediate, or if the StructureID does not match. 647 emitJumpSlowCaseIfNotJSCell(X86::eax, i); 648 // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over. 649 m_jit.cmpl_i32m(repatchGetByIdDefaultStructureID, OBJECT_OFFSET(JSCell, m_structureID), X86::eax); 650 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetPutByIdStructureID); 651 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i)); 652 653 // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used. 654 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax); 655 m_jit.movl_rm(X86::edx, repatchGetByIdDefaultOffset, X86::eax); 656 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetPutByIdPropertyMapOffset); 657 617 658 i += 8; 618 659 break; 619 660 } 620 661 case op_get_by_id: { 621 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]); 622 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4); 662 // As for put_by_id, get_by_id requires the offset of the StructureID and the offset of the access to be repatched. 663 // Additionally, for get_by_id we need repatch the offset of the branch to the slow case (we repatch this to jump 664 // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label 665 // to jump back to if one of these trampolies finds a match. 666 623 667 emitGetArg(instruction[i + 2].u.operand, X86::eax); 624 emitPutArg(X86::eax, 0); // leave the base in eax 625 emitCall(i, Machine::cti_op_get_by_id); 626 emitPutResult(instruction[i + 1].u.operand); 668 669 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i); 670 671 X86Assembler::JmpDst hotPathBegin = m_jit.label(); 672 m_structureStubCompilationInfo[structureIDInstructionIndex].hotPathBegin = hotPathBegin; 673 ++structureIDInstructionIndex; 674 675 emitJumpSlowCaseIfNotJSCell(X86::eax, i); 676 m_jit.cmpl_i32m(repatchGetByIdDefaultStructureID, OBJECT_OFFSET(JSCell, m_structureID), X86::eax); 677 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdStructureID); 678 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i)); 679 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdBranchToSlowCase); 680 681 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax); 682 m_jit.movl_mr(repatchGetByIdDefaultOffset, X86::eax, X86::ecx); 683 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdPropertyMapOffset); 684 emitPutResult(instruction[i + 1].u.operand, X86::ecx); 685 627 686 i += 8; 628 687 break; … … 778 837 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax); 779 838 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i)); 839 840 // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff 841 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx); 780 842 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSArray, m_fastAccessCutoff), X86::eax); 781 843 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJbe(), i)); 782 844 783 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::eax);784 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::e ax, X86::edx, sizeof(JSValue*), X86::eax);845 // Get the value from the vector 846 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*), X86::eax); 785 847 emitPutResult(instruction[i + 1].u.operand); 786 848 i += 4; … … 811 873 emitGetArg(instruction[i + 1].u.operand, X86::eax); 812 874 emitGetArg(instruction[i + 2].u.operand, X86::edx); 813 emitGetArg(instruction[i + 3].u.operand, X86::ecx);814 875 emitJumpSlowCaseIfNotImm(X86::edx, i); 815 876 emitFastArithImmToInt(X86::edx); … … 818 879 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax); 819 880 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i)); 881 882 // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff 883 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx); 820 884 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSArray, m_fastAccessCutoff), X86::eax); 885 X86Assembler::JmpSrc inFastVector = m_jit.emitUnlinkedJa(); 886 // No; oh well, check if the access if within the vector - if so, we may still be okay. 887 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(ArrayStorage, m_vectorLength), X86::ecx); 821 888 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJbe(), i)); 822 889 823 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::eax); 824 m_jit.movl_rm(X86::ecx, OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::eax, X86::edx, sizeof(JSValue*)); 890 // This is a write to the slow part of the vector; first, we have to check if this would be the first write to this location. 891 // FIXME: should be able to handle initial write to array; increment the the number of items in the array, and potentially update fast access cutoff. 892 m_jit.cmpl_i8m(0, OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*)); 893 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i)); 894 895 // All good - put the value into the array. 896 m_jit.link(inFastVector, m_jit.label()); 897 emitGetArg(instruction[i + 3].u.operand, X86::eax); 898 m_jit.movl_rm(X86::eax, OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*)); 825 899 i += 4; 826 900 break; … … 1340 1414 } 1341 1415 } 1416 1417 ASSERT(structureIDInstructionIndex == m_codeBlock->structureIDInstructions.size()); 1342 1418 } 1343 1419 … … 1364 1440 void CTI::privateCompileSlowCases() 1365 1441 { 1442 unsigned structureIDInstructionIndex = 0; 1443 1366 1444 Instruction* instruction = m_codeBlock->instructions.begin(); 1367 1445 for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end(); ++iter) { 1368 inti = iter->to;1446 unsigned i = iter->to; 1369 1447 m_jit.emitRestoreArgumentReference(); 1370 1448 switch (m_machine->getOpcodeID(instruction[i].u.opcode)) { … … 1403 1481 } 1404 1482 case op_get_by_val: { 1483 // The slow case that handles accesses to arrays (below) may jump back up to here. 1484 X86Assembler::JmpDst beginGetByValSlow = m_jit.label(); 1485 1405 1486 X86Assembler::JmpSrc notImm = iter->from; 1406 m_jit.link((++iter)->from, m_jit.label());1407 1487 m_jit.link((++iter)->from, m_jit.label()); 1408 1488 m_jit.link((++iter)->from, m_jit.label()); … … 1413 1493 emitCall(i, Machine::cti_op_get_by_val); 1414 1494 emitPutResult(instruction[i + 1].u.operand); 1495 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i + 4]); 1496 1497 // This is slow case that handles accesses to arrays above the fast cut-off. 1498 // First, check if this is an access to the vector 1499 m_jit.link((++iter)->from, m_jit.label()); 1500 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(ArrayStorage, m_vectorLength), X86::ecx); 1501 m_jit.link(m_jit.emitUnlinkedJbe(), beginGetByValSlow); 1502 1503 // okay, missed the fast region, but it is still in the vector. Get the value. 1504 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*), X86::ecx); 1505 // Check whether the value loaded is zero; if so we need to return undefined. 1506 m_jit.testl_rr(X86::ecx, X86::ecx); 1507 m_jit.link(m_jit.emitUnlinkedJe(), beginGetByValSlow); 1508 emitPutResult(instruction[i + 1].u.operand, X86::ecx); 1509 1415 1510 i += 4; 1416 1511 break; … … 1477 1572 break; 1478 1573 } 1574 case op_put_by_id: { 1575 m_jit.link(iter->from, m_jit.label()); 1576 m_jit.link((++iter)->from, m_jit.label()); 1577 1578 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]); 1579 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4); 1580 emitPutArg(X86::eax, 0); 1581 emitPutArg(X86::edx, 8); 1582 X86Assembler::JmpSrc call = emitCall(i, Machine::cti_op_put_by_id); 1583 1584 // Track the location of the call; this will be used to recover repatch information. 1585 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i); 1586 m_structureStubCompilationInfo[structureIDInstructionIndex].callReturnLocation = call; 1587 ++structureIDInstructionIndex; 1588 1589 i += 8; 1590 break; 1591 } 1592 case op_get_by_id: { 1593 // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset 1594 // so that we only need track one pointer into the slow case code - we track a pointer to the location 1595 // of the call (which we can use to look up the repatch information), but should a array-length or 1596 // prototype access tramopile fail we want to bail out back to here. To do so we can subtract back 1597 // the distance from the call to the head of the slow case. 1598 1599 m_jit.link(iter->from, m_jit.label()); 1600 m_jit.link((++iter)->from, m_jit.label()); 1601 1602 #ifndef NDEBUG 1603 X86Assembler::JmpDst coldPathBegin = m_jit.label(); 1604 #endif 1605 emitPutArg(X86::eax, 0); 1606 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]); 1607 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4); 1608 X86Assembler::JmpSrc call = emitCall(i, Machine::cti_op_get_by_id); 1609 ASSERT(X86Assembler::getDifferenceBetweenLabels(coldPathBegin, call) == repatchOffsetGetByIdSlowCaseCall); 1610 emitPutResult(instruction[i + 1].u.operand); 1611 1612 // Track the location of the call; this will be used to recover repatch information. 1613 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i); 1614 m_structureStubCompilationInfo[structureIDInstructionIndex].callReturnLocation = call; 1615 ++structureIDInstructionIndex; 1616 1617 i += 8; 1618 break; 1619 } 1479 1620 case op_loop_if_lesseq: { 1480 1621 emitSlowScriptCheck(i); … … 1514 1655 } 1515 1656 case op_put_by_val: { 1657 // Normal slow cases - either is not an immediate imm, or is an array. 1516 1658 X86Assembler::JmpSrc notImm = iter->from; 1517 m_jit.link((++iter)->from, m_jit.label());1518 1659 m_jit.link((++iter)->from, m_jit.label()); 1519 1660 m_jit.link((++iter)->from, m_jit.label()); 1520 1661 emitFastArithIntToImmNoCheck(X86::edx); 1521 1662 m_jit.link(notImm, m_jit.label()); 1663 emitGetArg(instruction[i + 3].u.operand, X86::ecx); 1522 1664 emitPutArg(X86::eax, 0); 1523 1665 emitPutArg(X86::edx, 4); 1524 1666 emitPutArg(X86::ecx, 8); 1525 1667 emitCall(i, Machine::cti_op_put_by_val); 1668 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i + 4]); 1669 1670 // slow cases for immediate int accesses to arrays 1671 m_jit.link((++iter)->from, m_jit.label()); 1672 m_jit.link((++iter)->from, m_jit.label()); 1673 emitGetArg(instruction[i + 3].u.operand, X86::ecx); 1674 emitPutArg(X86::eax, 0); 1675 emitPutArg(X86::edx, 4); 1676 emitPutArg(X86::ecx, 8); 1677 emitCall(i, Machine::cti_op_put_by_val_array); 1678 1526 1679 i += 4; 1527 1680 break; … … 1701 1854 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i]); 1702 1855 } 1856 1857 ASSERT(structureIDInstructionIndex == m_codeBlock->structureIDInstructions.size()); 1703 1858 } 1704 1859 … … 1762 1917 X86Assembler::linkAbsoluteAddress(code, iter->addrPosition, iter->target); 1763 1918 1919 for (unsigned i = 0; i < m_codeBlock->structureIDInstructions.size(); ++i) { 1920 StructureStubInfo& info = m_codeBlock->structureIDInstructions[i]; 1921 info.callReturnLocation = X86Assembler::getRelocatedAddress(code, m_structureStubCompilationInfo[i].callReturnLocation); 1922 info.hotPathBegin = X86Assembler::getRelocatedAddress(code, m_structureStubCompilationInfo[i].hotPathBegin); 1923 } 1924 1925 1764 1926 m_codeBlock->ctiCode = code; 1765 1927 } 1766 1928 1767 void * CTI::privateCompileGetByIdSelf(StructureID* structureID, size_t cachedOffset)1929 void CTI::privateCompileGetByIdSelf(StructureID* structureID, size_t cachedOffset, void* returnAddress) 1768 1930 { 1769 1931 // Check eax is an object of the right StructureID. … … 1784 1946 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail)); 1785 1947 1786 m_codeBlock-> structureIDAccessStubs.append(code);1948 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code; 1787 1949 1788 return code; 1789 } 1790 1791 void* CTI::privateCompileGetByIdProto(ExecState* exec, StructureID* structureID, StructureID* prototypeStructureID, size_t cachedOffset) 1792 { 1950 ctiRepatchCallByReturnAddress(returnAddress, code); 1951 } 1952 1953 void CTI::privateCompileGetByIdProto(StructureID* structureID, StructureID* prototypeStructureID, size_t cachedOffset, void* returnAddress) 1954 { 1955 #if USE(CTI_REPATCH_PIC) 1956 StructureStubInfo& info = m_codeBlock->getStubInfo(returnAddress); 1957 1958 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic. 1959 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail)); 1960 1793 1961 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a StructureID that is 1794 1962 // referencing the prototype object - let's speculatively load it's table nice and early!) 1795 JSObject* protoObject = static_cast<JSObject*>(structureID->prototypeForLookup( exec));1963 JSObject* protoObject = static_cast<JSObject*>(structureID->prototypeForLookup(m_exec)); 1796 1964 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage; 1797 1965 m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx); … … 1809 1977 1810 1978 // Checks out okay! - getDirectOffset 1979 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::ecx); 1980 1981 X86Assembler::JmpSrc success = m_jit.emitUnlinkedJmp(); 1982 1983 void* code = m_jit.copy(); 1984 ASSERT(code); 1985 1986 // Use the repatch information to link the failure cases back to the original slow case routine. 1987 void* slowCaseBegin = reinterpret_cast<char*>(info.callReturnLocation) - repatchOffsetGetByIdSlowCaseCall; 1988 X86Assembler::link(code, failureCases1, slowCaseBegin); 1989 X86Assembler::link(code, failureCases2, slowCaseBegin); 1990 X86Assembler::link(code, failureCases3, slowCaseBegin); 1991 1992 // On success return back to the hot patch code, at a point it will perform the store to dest for us. 1993 intptr_t successDest = (intptr_t)(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset; 1994 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest)); 1995 1996 // Track the stub we have created so that it will be deleted later. 1997 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code; 1998 1999 // Finally repatch the jump to sow case back in the hot path to jump here instead. 2000 // FIXME: should revert this repatching, on failure. 2001 intptr_t jmpLocation = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase; 2002 X86Assembler::repatchBranchOffset(jmpLocation, code); 2003 #else 2004 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a StructureID that is 2005 // referencing the prototype object - let's speculatively load it's table nice and early!) 2006 JSObject* protoObject = static_cast<JSObject*>(structureID->prototypeForLookup(m_exec)); 2007 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage; 2008 m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx); 2009 2010 // check eax is an object of the right StructureID. 2011 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax); 2012 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne(); 2013 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax); 2014 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne(); 2015 2016 // Check the prototype object's StructureID had not changed. 2017 StructureID** protoStructureIDAddress = &(protoObject->m_structureID); 2018 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(prototypeStructureID), static_cast<void*>(protoStructureIDAddress)); 2019 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJne(); 2020 2021 // Checks out okay! - getDirectOffset 1811 2022 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax); 1812 2023 … … 1820 2031 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail)); 1821 2032 1822 m_codeBlock->structureIDAccessStubs.append(code); 1823 1824 return code; 1825 } 1826 1827 void* CTI::privateCompileGetByIdChain(ExecState* exec, StructureID* structureID, StructureIDChain* chain, size_t count, size_t cachedOffset) 2033 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code; 2034 2035 ctiRepatchCallByReturnAddress(returnAddress, code); 2036 #endif 2037 } 2038 2039 void CTI::privateCompileGetByIdChain(StructureID* structureID, StructureIDChain* chain, size_t count, size_t cachedOffset, void* returnAddress) 1828 2040 { 1829 2041 ASSERT(count); … … 1841 2053 JSObject* protoObject = 0; 1842 2054 for (unsigned i = 0; i<count; ++i) { 1843 protoObject = static_cast<JSObject*>(currStructureID->prototypeForLookup( exec));2055 protoObject = static_cast<JSObject*>(currStructureID->prototypeForLookup(m_exec)); 1844 2056 currStructureID = chainEntries[i].get(); 1845 2057 … … 1863 2075 for (unsigned i = 0; i < bucketsOfFail.size(); ++i) 1864 2076 X86Assembler::link(code, bucketsOfFail[i], reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail)); 1865 m_codeBlock->structureIDAccessStubs.append(code); 1866 return code; 1867 } 1868 1869 void* CTI::privateCompilePutByIdReplace(StructureID* structureID, size_t cachedOffset) 2077 2078 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code; 2079 2080 ctiRepatchCallByReturnAddress(returnAddress, code); 2081 } 2082 2083 void CTI::privateCompilePutByIdReplace(StructureID* structureID, size_t cachedOffset, void* returnAddress) 1870 2084 { 1871 2085 // check eax is an object of the right StructureID. … … 1886 2100 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_put_by_id_fail)); 1887 2101 1888 m_codeBlock-> structureIDAccessStubs.append(code);2102 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code; 1889 2103 1890 return code;2104 ctiRepatchCallByReturnAddress(returnAddress, code); 1891 2105 } 1892 2106 … … 1922 2136 } 1923 2137 1924 void * CTI::privateCompilePutByIdTransition(StructureID* oldStructureID, StructureID* newStructureID, size_t cachedOffset, StructureIDChain* sIDC)2138 void CTI::privateCompilePutByIdTransition(StructureID* oldStructureID, StructureID* newStructureID, size_t cachedOffset, StructureIDChain* sIDC, void* returnAddress) 1925 2139 { 1926 2140 Vector<X86Assembler::JmpSrc, 16> failureCases; … … 1993 2207 X86Assembler::link(code, callTarget, reinterpret_cast<void*>(transitionObject)); 1994 2208 1995 m_codeBlock-> structureIDAccessStubs.append(code);2209 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code; 1996 2210 1997 return code;1998 } 1999 2000 void* CTI::private ArrayLengthTrampoline()2211 ctiRepatchCallByReturnAddress(returnAddress, code); 2212 } 2213 2214 void* CTI::privateCompileArrayLengthTrampoline() 2001 2215 { 2002 2216 // Check eax is an array … … 2026 2240 } 2027 2241 2028 void* CTI::private StringLengthTrampoline()2242 void* CTI::privateCompileStringLengthTrampoline() 2029 2243 { 2030 2244 // Check eax is a string … … 2052 2266 2053 2267 return code; 2268 } 2269 2270 void CTI::patchGetByIdSelf(CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset, void* returnAddress) 2271 { 2272 StructureStubInfo& info = codeBlock->getStubInfo(returnAddress); 2273 2274 // We don't want to repatch more than once - in future go to cti_op_get_by_id_generic. 2275 // Should probably go to Machine::cti_op_get_by_id_fail, but that doesn't do anything interesting right now. 2276 ctiRepatchCallByReturnAddress(returnAddress, (void*)(Machine::cti_op_get_by_id_generic)); 2277 2278 // Repatch the offset into the propoerty map to load from, then repatch the StructureID to look for. 2279 X86Assembler::repatchDisplacement(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset, cachedOffset * sizeof(JSValue*)); 2280 X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdStructureID, reinterpret_cast<uint32_t>(structureID)); 2281 } 2282 2283 void CTI::patchPutByIdReplace(CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset, void* returnAddress) 2284 { 2285 StructureStubInfo& info = codeBlock->getStubInfo(returnAddress); 2286 2287 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic. 2288 // Should probably go to Machine::cti_op_put_by_id_fail, but that doesn't do anything interesting right now. 2289 ctiRepatchCallByReturnAddress(returnAddress, (void*)(Machine::cti_op_put_by_id_generic)); 2290 2291 // Repatch the offset into the propoerty map to load from, then repatch the StructureID to look for. 2292 X86Assembler::repatchDisplacement(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetPutByIdPropertyMapOffset, cachedOffset * sizeof(JSValue*)); 2293 X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetPutByIdStructureID, reinterpret_cast<uint32_t>(structureID)); 2294 } 2295 2296 void CTI::privateCompilePatchGetArrayLength(void* returnAddress) 2297 { 2298 StructureStubInfo& info = m_codeBlock->getStubInfo(returnAddress); 2299 2300 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic. 2301 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail)); 2302 2303 // Check eax is an array 2304 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax); 2305 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne(); 2306 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax); 2307 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne(); 2308 2309 // Checks out okay! - get the length from the storage 2310 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx); 2311 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_length), X86::ecx, X86::ecx); 2312 2313 m_jit.addl_rr(X86::ecx, X86::ecx); 2314 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJo(); 2315 m_jit.addl_i8r(1, X86::ecx); 2316 2317 X86Assembler::JmpSrc success = m_jit.emitUnlinkedJmp(); 2318 2319 void* code = m_jit.copy(); 2320 ASSERT(code); 2321 2322 // Use the repatch information to link the failure cases back to the original slow case routine. 2323 void* slowCaseBegin = reinterpret_cast<char*>(info.callReturnLocation) - repatchOffsetGetByIdSlowCaseCall; 2324 X86Assembler::link(code, failureCases1, slowCaseBegin); 2325 X86Assembler::link(code, failureCases2, slowCaseBegin); 2326 X86Assembler::link(code, failureCases3, slowCaseBegin); 2327 2328 // On success return back to the hot patch code, at a point it will perform the store to dest for us. 2329 intptr_t successDest = (intptr_t)(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset; 2330 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest)); 2331 2332 // Track the stub we have created so that it will be deleted later. 2333 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code; 2334 2335 // Finally repatch the jump to sow case back in the hot path to jump here instead. 2336 // FIXME: should revert this repatching, on failure. 2337 intptr_t jmpLocation = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase; 2338 X86Assembler::repatchBranchOffset(jmpLocation, code); 2054 2339 } 2055 2340 -
trunk/JavaScriptCore/VM/CTI.h
r36402 r36418 29 29 #if ENABLE(CTI) 30 30 31 #define WTF_USE_CTI_REPATCH_PIC 1 32 31 33 #include "Opcode.h" 32 34 #include "RegisterFile.h" … … 217 219 }; 218 220 221 struct StructureStubCompilationInfo { 222 X86Assembler::JmpSrc callReturnLocation; 223 X86Assembler::JmpDst hotPathBegin; 224 }; 225 219 226 extern "C" { 220 227 JSValue* ctiTrampoline(void* code, ExecState* exec, RegisterFile* registerFile, Register* r, ScopeChainNode* scopeChain, CodeBlock* codeBlock, JSValue** exception, Profiler**); … … 226 233 227 234 class CTI { 235 static const int repatchGetByIdDefaultStructureID = -1; 236 // Magic number - initial offset cannot be representable as a signed 8bit value, or the X86Assembler 237 // will compress the displacement, and we may not be able to fit a repatched offset. 238 static const int repatchGetByIdDefaultOffset = 256; 239 240 // These architecture specific value are used to enable repatching - see comment on op_put_by_id. 241 static const int repatchOffsetPutByIdStructureID = 19; 242 static const int repatchOffsetPutByIdPropertyMapOffset = 34; 243 // These architecture specific value are used to enable repatching - see comment on op_get_by_id. 244 static const int repatchOffsetGetByIdStructureID = 19; 245 static const int repatchOffsetGetByIdBranchToSlowCase = 25; 246 static const int repatchOffsetGetByIdPropertyMapOffset = 34; 247 static const int repatchOffsetGetByIdSlowCaseCall = 17; 248 228 249 public: 229 250 static void compile(Machine* machine, ExecState* exec, CodeBlock* codeBlock) … … 237 258 #endif 238 259 239 static void * compileGetByIdSelf(Machine* machine, ExecState* exec, CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset)240 { 241 CTI cti(machine, exec, codeBlock); 242 return cti.privateCompileGetByIdSelf(structureID, cachedOffset);243 } 244 245 static void * compileGetByIdProto(Machine* machine, ExecState* exec, CodeBlock* codeBlock, StructureID* structureID, StructureID* prototypeStructureID, size_t cachedOffset)246 { 247 CTI cti(machine, exec, codeBlock); 248 return cti.privateCompileGetByIdProto(exec, structureID, prototypeStructureID, cachedOffset);249 } 250 251 static void * compileGetByIdChain(Machine* machine, ExecState* exec, CodeBlock* codeBlock, StructureID* structureID, StructureIDChain* chain, size_t count, size_t cachedOffset)252 { 253 CTI cti(machine, exec, codeBlock); 254 return cti.privateCompileGetByIdChain(exec, structureID, chain, count, cachedOffset);255 } 256 257 static void * compilePutByIdReplace(Machine* machine, ExecState* exec, CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset)258 { 259 CTI cti(machine, exec, codeBlock); 260 return cti.privateCompilePutByIdReplace(structureID, cachedOffset);261 } 262 263 static void * compilePutByIdTransition(Machine* machine, ExecState* exec, CodeBlock* codeBlock, StructureID* oldStructureID, StructureID* newStructureID, size_t cachedOffset, StructureIDChain* sIDC)264 { 265 CTI cti(machine, exec, codeBlock); 266 return cti.privateCompilePutByIdTransition(oldStructureID, newStructureID, cachedOffset, sIDC);260 static void compileGetByIdSelf(Machine* machine, ExecState* exec, CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset, void* returnAddress) 261 { 262 CTI cti(machine, exec, codeBlock); 263 cti.privateCompileGetByIdSelf(structureID, cachedOffset, returnAddress); 264 } 265 266 static void compileGetByIdProto(Machine* machine, ExecState* exec, CodeBlock* codeBlock, StructureID* structureID, StructureID* prototypeStructureID, size_t cachedOffset, void* returnAddress) 267 { 268 CTI cti(machine, exec, codeBlock); 269 cti.privateCompileGetByIdProto(structureID, prototypeStructureID, cachedOffset, returnAddress); 270 } 271 272 static void compileGetByIdChain(Machine* machine, ExecState* exec, CodeBlock* codeBlock, StructureID* structureID, StructureIDChain* chain, size_t count, size_t cachedOffset, void* returnAddress) 273 { 274 CTI cti(machine, exec, codeBlock); 275 cti.privateCompileGetByIdChain(structureID, chain, count, cachedOffset, returnAddress); 276 } 277 278 static void compilePutByIdReplace(Machine* machine, ExecState* exec, CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset, void* returnAddress) 279 { 280 CTI cti(machine, exec, codeBlock); 281 cti.privateCompilePutByIdReplace(structureID, cachedOffset, returnAddress); 282 } 283 284 static void compilePutByIdTransition(Machine* machine, ExecState* exec, CodeBlock* codeBlock, StructureID* oldStructureID, StructureID* newStructureID, size_t cachedOffset, StructureIDChain* sIDC, void* returnAddress) 285 { 286 CTI cti(machine, exec, codeBlock); 287 cti.privateCompilePutByIdTransition(oldStructureID, newStructureID, cachedOffset, sIDC, returnAddress); 267 288 } 268 289 … … 270 291 { 271 292 CTI cti(machine, exec, codeBlock); 272 return cti.private ArrayLengthTrampoline();293 return cti.privateCompileArrayLengthTrampoline(); 273 294 } 274 295 … … 276 297 { 277 298 CTI cti(machine, exec, codeBlock); 278 return cti.privateStringLengthTrampoline(); 299 return cti.privateCompileStringLengthTrampoline(); 300 } 301 302 static void patchGetByIdSelf(CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset, void* returnAddress); 303 static void patchPutByIdReplace(CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset, void* returnAddress); 304 305 static void compilePatchGetArrayLength(Machine* machine, ExecState* exec, CodeBlock* codeBlock, void* returnAddress) 306 { 307 CTI cti(machine, exec, codeBlock); 308 return cti.privateCompilePatchGetArrayLength(returnAddress); 279 309 } 280 310 … … 294 324 void privateCompileSlowCases(); 295 325 void privateCompile(); 296 void* privateCompileGetByIdSelf(StructureID*, size_t cachedOffset); 297 void* privateCompileGetByIdProto(ExecState*, StructureID*, StructureID* prototypeStructureID, size_t cachedOffset); 298 void* privateCompileGetByIdChain(ExecState*, StructureID*, StructureIDChain*, size_t count, size_t cachedOffset); 299 void* privateCompilePutByIdReplace(StructureID*, size_t cachedOffset); 300 void* privateCompilePutByIdTransition(StructureID*, StructureID*, size_t cachedOffset, StructureIDChain*); 301 void* privateArrayLengthTrampoline(); 302 void* privateStringLengthTrampoline(); 326 void privateCompileGetByIdSelf(StructureID*, size_t cachedOffset, void* returnAddress); 327 void privateCompileGetByIdProto(StructureID*, StructureID* prototypeStructureID, size_t cachedOffset, void* returnAddress); 328 void privateCompileGetByIdChain(StructureID*, StructureIDChain*, size_t count, size_t cachedOffset, void* returnAddress); 329 void privateCompilePutByIdReplace(StructureID*, size_t cachedOffset, void* returnAddress); 330 void privateCompilePutByIdTransition(StructureID*, StructureID*, size_t cachedOffset, StructureIDChain*, void* returnAddress); 331 332 void* privateCompileArrayLengthTrampoline(); 333 void* privateCompileStringLengthTrampoline(); 334 void privateCompilePatchGetArrayLength(void* returnAddress); 303 335 304 336 enum CompileOpCallType { OpCallNormal, OpCallEval, OpConstruct }; … … 320 352 unsigned getDeTaggedConstantImmediate(JSValue* imm); 321 353 354 void CTI::emitJumpSlowCaseIfNotJSCell(X86Assembler::RegisterID reg, unsigned opcodeIndex); 322 355 void emitJumpSlowCaseIfNotImm(X86Assembler::RegisterID, unsigned opcodeIndex); 323 356 void emitJumpSlowCaseIfNotImms(X86Assembler::RegisterID, X86Assembler::RegisterID, unsigned opcodeIndex); … … 332 365 void emitDebugExceptionCheck(); 333 366 334 voidemitCall(unsigned opcodeIndex, CTIHelper_j);335 voidemitCall(unsigned opcodeIndex, CTIHelper_p);336 voidemitCall(unsigned opcodeIndex, CTIHelper_b);337 voidemitCall(unsigned opcodeIndex, CTIHelper_v);338 voidemitCall(unsigned opcodeIndex, CTIHelper_s);367 X86Assembler::JmpSrc emitCall(unsigned opcodeIndex, CTIHelper_j); 368 X86Assembler::JmpSrc emitCall(unsigned opcodeIndex, CTIHelper_p); 369 X86Assembler::JmpSrc emitCall(unsigned opcodeIndex, CTIHelper_b); 370 X86Assembler::JmpSrc emitCall(unsigned opcodeIndex, CTIHelper_v); 371 X86Assembler::JmpSrc emitCall(unsigned opcodeIndex, CTIHelper_s); 339 372 340 373 void emitGetVariableObjectRegister(X86Assembler::RegisterID variableObject, int index, X86Assembler::RegisterID dst); … … 353 386 Vector<CallRecord> m_calls; 354 387 Vector<X86Assembler::JmpDst> m_labels; 388 Vector<StructureStubCompilationInfo> m_structureStubCompilationInfo; 355 389 Vector<JmpTable> m_jmpTable; 356 390 -
trunk/JavaScriptCore/VM/CodeBlock.cpp
r36417 r36418 278 278 size_t i = 0; 279 279 do { 280 printStructureIDs(&instructions[structureIDInstructions[i] ]);280 printStructureIDs(&instructions[structureIDInstructions[i].opcodeIndex]); 281 281 ++i; 282 282 } while (i < structureIDInstructions.size()); … … 876 876 { 877 877 size_t size = structureIDInstructions.size(); 878 for (size_t i = 0; i < size; ++i) 879 derefStructureIDs(&instructions[structureIDInstructions[i]]); 880 881 size = structureIDAccessStubs.size(); 882 for (size_t i = 0; i < size; ++i) 883 fastFree(structureIDAccessStubs[i]); 884 878 for (size_t i = 0; i < size; ++i) { 879 derefStructureIDs(&instructions[structureIDInstructions[i].opcodeIndex]); 880 if (structureIDInstructions[i].stubRoutine) 881 fastFree(structureIDInstructions[i].stubRoutine); 882 } 885 883 #if ENABLE(CTI) 886 884 if (ctiCode) -
trunk/JavaScriptCore/VM/CodeBlock.h
r36267 r36418 77 77 }; 78 78 79 struct StructureStubInfo { 80 StructureStubInfo(unsigned opcodeIndex) 81 : opcodeIndex(opcodeIndex) 82 , stubRoutine(0) 83 , callReturnLocation(0) 84 , hotPathBegin(0) 85 { 86 } 87 88 unsigned opcodeIndex; 89 void* stubRoutine; 90 void* callReturnLocation; 91 void* hotPathBegin; 92 }; 93 79 94 struct StringJumpTable { 80 95 typedef HashMap<RefPtr<UString::Rep>, OffsetLocation> StringOffsetTable; … … 200 215 void derefStructureIDs(Instruction* vPC) const; 201 216 217 StructureStubInfo& getStubInfo(void* returnAddress) 218 { 219 // FIXME: would a binary chop be faster here? 220 for (unsigned i = 0; i < structureIDInstructions.size(); ++i) { 221 if (structureIDInstructions[i].callReturnLocation == returnAddress) 222 return structureIDInstructions[i]; 223 } 224 225 ASSERT_NOT_REACHED(); 226 // keep the compiler happy. 227 static StructureStubInfo duff(0); 228 return duff; 229 } 230 202 231 ScopeNode* ownerNode; 203 232 JSGlobalData* globalData; … … 219 248 220 249 Vector<Instruction> instructions; 221 Vector<size_t> structureIDInstructions; 222 Vector<void*> structureIDAccessStubs; 250 Vector<StructureStubInfo> structureIDInstructions; 223 251 224 252 // Constant pool -
trunk/JavaScriptCore/VM/Machine.cpp
r36417 r36418 3813 3813 vPC[7] = slot.cachedOffset(); 3814 3814 codeBlock->refStructureIDs(vPC); 3815 ctiRepatchCallByReturnAddress(returnAddress, CTI::compilePutByIdTransition(this, exec, codeBlock, structureID->previousID(), structureID, slot.cachedOffset(), chain));3815 CTI::compilePutByIdTransition(this, exec, codeBlock, structureID->previousID(), structureID, slot.cachedOffset(), chain, returnAddress); 3816 3816 return; 3817 3817 } … … 3822 3822 codeBlock->refStructureIDs(vPC); 3823 3823 3824 ctiRepatchCallByReturnAddress(returnAddress, CTI::compilePutByIdReplace(this, exec, codeBlock, structureID, slot.cachedOffset())); 3824 #if USE(CTI_REPATCH_PIC) 3825 UNUSED_PARAM(exec); 3826 CTI::patchPutByIdReplace(codeBlock, structureID, slot.cachedOffset(), returnAddress); 3827 #else 3828 CTI::compilePutByIdReplace(this, exec, codeBlock, structureID, slot.cachedOffset(), returnAddress); 3829 #endif 3825 3830 } 3826 3831 … … 3846 3851 3847 3852 if (isJSArray(baseValue) && propertyName == exec->propertyNames().length) { 3853 #if USE(CTI_REPATCH_PIC) 3854 CTI::compilePatchGetArrayLength(this, exec, codeBlock, returnAddress); 3855 #else 3848 3856 ctiRepatchCallByReturnAddress(returnAddress, getCTIArrayLengthTrampoline(exec, codeBlock)); 3857 #endif 3849 3858 return; 3850 3859 } 3851 3860 if (isJSString(baseValue) && propertyName == exec->propertyNames().length) { 3861 // The tradeoff of compiling an repatched inline string length access routine does not seem 3862 // to pay off, so we currently only do this for arrays. 3852 3863 ctiRepatchCallByReturnAddress(returnAddress, getCTIStringLengthTrampoline(exec, codeBlock)); 3853 3864 return; … … 3889 3900 codeBlock->refStructureIDs(vPC); 3890 3901 3891 ctiRepatchCallByReturnAddress(returnAddress, CTI::compileGetByIdSelf(this, exec, codeBlock, structureID, slot.cachedOffset())); 3902 #if USE(CTI_REPATCH_PIC) 3903 CTI::patchGetByIdSelf(codeBlock, structureID, slot.cachedOffset(), returnAddress); 3904 #else 3905 CTI::compileGetByIdSelf(this, exec, codeBlock, structureID, slot.cachedOffset(), returnAddress); 3906 #endif 3892 3907 return; 3893 3908 } … … 3912 3927 codeBlock->refStructureIDs(vPC); 3913 3928 3914 ctiRepatchCallByReturnAddress(returnAddress, CTI::compileGetByIdProto(this, exec, codeBlock, structureID, slotBaseObject->structureID(), slot.cachedOffset()));3929 CTI::compileGetByIdProto(this, exec, codeBlock, structureID, slotBaseObject->structureID(), slot.cachedOffset(), returnAddress); 3915 3930 return; 3916 3931 } … … 3954 3969 codeBlock->refStructureIDs(vPC); 3955 3970 3956 ctiRepatchCallByReturnAddress(returnAddress, CTI::compileGetByIdChain(this, exec, codeBlock, structureID, chain, count, slot.cachedOffset()));3971 CTI::compileGetByIdChain(this, exec, codeBlock, structureID, chain, count, slot.cachedOffset(), returnAddress); 3957 3972 } 3958 3973 … … 4641 4656 } 4642 4657 4658 void Machine::cti_op_put_by_val_array(CTI_ARGS) 4659 { 4660 ExecState* exec = ARG_exec; 4661 4662 JSValue* baseValue = ARG_src1; 4663 int i = ARG_int2; 4664 JSValue* value = ARG_src3; 4665 4666 ASSERT(exec->machine()->isJSArray(baseValue)); 4667 4668 if (LIKELY(i >= 0)) 4669 static_cast<JSArray*>(baseValue)->JSArray::put(exec, i, value); 4670 else { 4671 Identifier property(exec, JSImmediate::from(i)->toString(exec)); 4672 // FIXME: can toString throw an exception here? 4673 if (!exec->hadException()) { // Don't put to an object if toString threw an exception. 4674 PutPropertySlot slot; 4675 baseValue->put(exec, property, value, slot); 4676 } 4677 } 4678 4679 VM_CHECK_EXCEPTION_AT_END(); 4680 } 4681 4643 4682 JSValue* Machine::cti_op_lesseq(CTI_ARGS) 4644 4683 { -
trunk/JavaScriptCore/VM/Machine.h
r36412 r36418 170 170 static JSValue* SFX_CALL cti_op_sub(CTI_ARGS); 171 171 static void SFX_CALL cti_op_put_by_val(CTI_ARGS); 172 static void SFX_CALL cti_op_put_by_val_array(CTI_ARGS); 172 173 static JSValue* SFX_CALL cti_op_lesseq(CTI_ARGS); 173 174 static int SFX_CALL cti_op_loop_if_true(CTI_ARGS); -
trunk/JavaScriptCore/masm/X86Assembler.h
r36401 r36418 210 210 OP2_JNE_rel32 = 0x85, 211 211 OP2_JBE_rel32 = 0x86, 212 OP2_JA_rel32 = 0x87, 212 213 OP2_JL_rel32 = 0x8C, 213 214 OP2_JGE_rel32 = 0x8D, … … 370 371 emitModRm_opm(GROUP1_OP_CMP, addr); 371 372 m_buffer->putInt(imm); 373 } 374 375 void cmpl_i8m(int imm, int offset, RegisterID base, RegisterID index, int scale) 376 { 377 m_buffer->putByte(OP_GROUP1_EvIb); 378 emitModRm_opmsib(GROUP1_OP_CMP, base, index, scale, offset); 379 m_buffer->putByte(imm); 372 380 } 373 381 … … 742 750 } 743 751 752 JmpSrc emitUnlinkedJa() 753 { 754 m_buffer->putByte(OP_2BYTE_ESCAPE); 755 m_buffer->putByte(OP2_JA_rel32); 756 m_buffer->putInt(0); 757 return JmpSrc(m_buffer->getOffset()); 758 } 759 744 760 JmpSrc emitUnlinkedJae() 745 761 { … … 786 802 } 787 803 788 void* getRelocatedAddress(void* code, JmpSrc jump)804 static void* getRelocatedAddress(void* code, JmpSrc jump) 789 805 { 790 806 return reinterpret_cast<void*>((ptrdiff_t)code + jump.m_offset); 791 807 } 792 808 793 void* getRelocatedAddress(void* code, JmpDst jump)809 static void* getRelocatedAddress(void* code, JmpDst jump) 794 810 { 795 811 return reinterpret_cast<void*>((ptrdiff_t)code + jump.m_offset); 812 } 813 814 static int getDifferenceBetweenLabels(JmpDst src, JmpDst dst) 815 { 816 return dst.m_offset - src.m_offset; 817 } 818 819 static int getDifferenceBetweenLabels(JmpDst src, JmpSrc dst) 820 { 821 return dst.m_offset - src.m_offset; 822 } 823 824 static void repatchImmediate(intptr_t where, int32_t value) 825 { 826 reinterpret_cast<int32_t*>(where)[-1] = value; 827 } 828 829 static void repatchDisplacement(intptr_t where, intptr_t value) 830 { 831 reinterpret_cast<intptr_t*>(where)[-1] = value; 832 } 833 834 static void repatchBranchOffset(intptr_t where, void* destination) 835 { 836 reinterpret_cast<intptr_t*>(where)[-1] = (reinterpret_cast<intptr_t>(destination) - where); 796 837 } 797 838 … … 932 973 } 933 974 975 void emitModRm_opmsib(OpcodeID opcode, RegisterID base, RegisterID index, int scale, int offset) 976 { 977 emitModRm_rmsib(static_cast<RegisterID>(opcode), base, index, scale, offset); 978 } 979 934 980 JITCodeBuffer* m_buffer; 935 981 };
Note:
See TracChangeset
for help on using the changeset viewer.