Changeset 36418 in webkit for trunk/JavaScriptCore/VM/CTI.cpp
- Timestamp:
- Sep 14, 2008, 7:18:13 PM (17 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/JavaScriptCore/VM/CTI.cpp
r36417 r36418 243 243 #endif 244 244 245 ALWAYS_INLINE voidCTI::emitCall(unsigned opcodeIndex, CTIHelper_j helper)245 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_j helper) 246 246 { 247 247 #if ENABLE(SAMPLING_TOOL) 248 248 m_jit.movl_i32m(1, &inCalledCode); 249 249 #endif 250 m_calls.append(CallRecord(m_jit.emitCall(), helper, opcodeIndex)); 250 X86Assembler::JmpSrc call = m_jit.emitCall(); 251 m_calls.append(CallRecord(call, helper, opcodeIndex)); 251 252 emitDebugExceptionCheck(); 252 253 #if ENABLE(SAMPLING_TOOL) 253 254 m_jit.movl_i32m(0, &inCalledCode); 254 255 #endif 255 } 256 257 ALWAYS_INLINE void CTI::emitCall(unsigned opcodeIndex, CTIHelper_p helper) 256 257 return call; 258 } 259 260 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_p helper) 258 261 { 259 262 #if ENABLE(SAMPLING_TOOL) 260 263 m_jit.movl_i32m(1, &inCalledCode); 261 264 #endif 262 m_calls.append(CallRecord(m_jit.emitCall(), helper, opcodeIndex)); 265 X86Assembler::JmpSrc call = m_jit.emitCall(); 266 m_calls.append(CallRecord(call, helper, opcodeIndex)); 263 267 emitDebugExceptionCheck(); 264 268 #if ENABLE(SAMPLING_TOOL) 265 269 m_jit.movl_i32m(0, &inCalledCode); 266 270 #endif 267 } 268 269 ALWAYS_INLINE void CTI::emitCall(unsigned opcodeIndex, CTIHelper_b helper) 271 272 return call; 273 } 274 275 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_b helper) 270 276 { 271 277 #if ENABLE(SAMPLING_TOOL) 272 278 m_jit.movl_i32m(1, &inCalledCode); 273 279 #endif 274 m_calls.append(CallRecord(m_jit.emitCall(), helper, opcodeIndex)); 280 X86Assembler::JmpSrc call = m_jit.emitCall(); 281 m_calls.append(CallRecord(call, helper, opcodeIndex)); 275 282 emitDebugExceptionCheck(); 276 283 #if ENABLE(SAMPLING_TOOL) 277 284 m_jit.movl_i32m(0, &inCalledCode); 278 285 #endif 279 } 280 281 ALWAYS_INLINE void CTI::emitCall(unsigned opcodeIndex, CTIHelper_v helper) 286 287 return call; 288 } 289 290 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_v helper) 282 291 { 283 292 #if ENABLE(SAMPLING_TOOL) 284 293 m_jit.movl_i32m(1, &inCalledCode); 285 294 #endif 286 m_calls.append(CallRecord(m_jit.emitCall(), helper, opcodeIndex)); 295 X86Assembler::JmpSrc call = m_jit.emitCall(); 296 m_calls.append(CallRecord(call, helper, opcodeIndex)); 287 297 emitDebugExceptionCheck(); 288 298 #if ENABLE(SAMPLING_TOOL) 289 299 m_jit.movl_i32m(0, &inCalledCode); 290 300 #endif 291 } 292 293 ALWAYS_INLINE void CTI::emitCall(unsigned opcodeIndex, CTIHelper_s helper) 301 302 return call; 303 } 304 305 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_s helper) 294 306 { 295 307 #if ENABLE(SAMPLING_TOOL) 296 308 m_jit.movl_i32m(1, &inCalledCode); 297 309 #endif 298 m_calls.append(CallRecord(m_jit.emitCall(), helper, opcodeIndex)); 310 X86Assembler::JmpSrc call = m_jit.emitCall(); 311 m_calls.append(CallRecord(call, helper, opcodeIndex)); 299 312 emitDebugExceptionCheck(); 300 313 #if ENABLE(SAMPLING_TOOL) 301 314 m_jit.movl_i32m(0, &inCalledCode); 302 315 #endif 316 317 return call; 318 } 319 320 ALWAYS_INLINE void CTI::emitJumpSlowCaseIfNotJSCell(X86Assembler::RegisterID reg, unsigned opcodeIndex) 321 { 322 m_jit.testl_i32r(JSImmediate::TagMask, reg); 323 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), opcodeIndex)); 303 324 } 304 325 … … 362 383 , m_codeBlock(codeBlock) 363 384 , m_labels(codeBlock ? codeBlock->instructions.size() : 0) 385 , m_structureStubCompilationInfo(codeBlock ? codeBlock->structureIDInstructions.size() : 0) 364 386 { 365 387 } … … 469 491 Instruction* instruction = m_codeBlock->instructions.begin(); 470 492 unsigned instructionCount = m_codeBlock->instructions.size(); 493 494 unsigned structureIDInstructionIndex = 0; 471 495 472 496 for (unsigned i = 0; i < instructionCount; ) { … … 608 632 } 609 633 case op_put_by_id: { 610 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]); 611 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4); 634 // In order to be able to repatch both the StructureID, and the object offset, we store one pointer, 635 // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code 636 // such that the StructureID & offset are always at the same distance from this. 637 612 638 emitGetArg(instruction[i + 1].u.operand, X86::eax); 613 639 emitGetArg(instruction[i + 3].u.operand, X86::edx); 614 emitPutArg(X86::eax, 0); // leave the base in eax 615 emitPutArg(X86::edx, 8); // leave the base in edx 616 emitCall(i, Machine::cti_op_put_by_id); 640 641 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i); 642 X86Assembler::JmpDst hotPathBegin = m_jit.label(); 643 m_structureStubCompilationInfo[structureIDInstructionIndex].hotPathBegin = hotPathBegin; 644 ++structureIDInstructionIndex; 645 646 // Jump to a slow case if either the base object is an immediate, or if the StructureID does not match. 647 emitJumpSlowCaseIfNotJSCell(X86::eax, i); 648 // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over. 649 m_jit.cmpl_i32m(repatchGetByIdDefaultStructureID, OBJECT_OFFSET(JSCell, m_structureID), X86::eax); 650 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetPutByIdStructureID); 651 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i)); 652 653 // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used. 654 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax); 655 m_jit.movl_rm(X86::edx, repatchGetByIdDefaultOffset, X86::eax); 656 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetPutByIdPropertyMapOffset); 657 617 658 i += 8; 618 659 break; 619 660 } 620 661 case op_get_by_id: { 621 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]); 622 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4); 662 // As for put_by_id, get_by_id requires the offset of the StructureID and the offset of the access to be repatched. 663 // Additionally, for get_by_id we need repatch the offset of the branch to the slow case (we repatch this to jump 664 // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label 665 // to jump back to if one of these trampolies finds a match. 666 623 667 emitGetArg(instruction[i + 2].u.operand, X86::eax); 624 emitPutArg(X86::eax, 0); // leave the base in eax 625 emitCall(i, Machine::cti_op_get_by_id); 626 emitPutResult(instruction[i + 1].u.operand); 668 669 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i); 670 671 X86Assembler::JmpDst hotPathBegin = m_jit.label(); 672 m_structureStubCompilationInfo[structureIDInstructionIndex].hotPathBegin = hotPathBegin; 673 ++structureIDInstructionIndex; 674 675 emitJumpSlowCaseIfNotJSCell(X86::eax, i); 676 m_jit.cmpl_i32m(repatchGetByIdDefaultStructureID, OBJECT_OFFSET(JSCell, m_structureID), X86::eax); 677 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdStructureID); 678 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i)); 679 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdBranchToSlowCase); 680 681 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax); 682 m_jit.movl_mr(repatchGetByIdDefaultOffset, X86::eax, X86::ecx); 683 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdPropertyMapOffset); 684 emitPutResult(instruction[i + 1].u.operand, X86::ecx); 685 627 686 i += 8; 628 687 break; … … 778 837 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax); 779 838 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i)); 839 840 // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff 841 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx); 780 842 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSArray, m_fastAccessCutoff), X86::eax); 781 843 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJbe(), i)); 782 844 783 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::eax);784 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::e ax, X86::edx, sizeof(JSValue*), X86::eax);845 // Get the value from the vector 846 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*), X86::eax); 785 847 emitPutResult(instruction[i + 1].u.operand); 786 848 i += 4; … … 811 873 emitGetArg(instruction[i + 1].u.operand, X86::eax); 812 874 emitGetArg(instruction[i + 2].u.operand, X86::edx); 813 emitGetArg(instruction[i + 3].u.operand, X86::ecx);814 875 emitJumpSlowCaseIfNotImm(X86::edx, i); 815 876 emitFastArithImmToInt(X86::edx); … … 818 879 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax); 819 880 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i)); 881 882 // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff 883 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx); 820 884 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSArray, m_fastAccessCutoff), X86::eax); 885 X86Assembler::JmpSrc inFastVector = m_jit.emitUnlinkedJa(); 886 // No; oh well, check if the access if within the vector - if so, we may still be okay. 887 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(ArrayStorage, m_vectorLength), X86::ecx); 821 888 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJbe(), i)); 822 889 823 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::eax); 824 m_jit.movl_rm(X86::ecx, OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::eax, X86::edx, sizeof(JSValue*)); 890 // This is a write to the slow part of the vector; first, we have to check if this would be the first write to this location. 891 // FIXME: should be able to handle initial write to array; increment the the number of items in the array, and potentially update fast access cutoff. 892 m_jit.cmpl_i8m(0, OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*)); 893 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i)); 894 895 // All good - put the value into the array. 896 m_jit.link(inFastVector, m_jit.label()); 897 emitGetArg(instruction[i + 3].u.operand, X86::eax); 898 m_jit.movl_rm(X86::eax, OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*)); 825 899 i += 4; 826 900 break; … … 1340 1414 } 1341 1415 } 1416 1417 ASSERT(structureIDInstructionIndex == m_codeBlock->structureIDInstructions.size()); 1342 1418 } 1343 1419 … … 1364 1440 void CTI::privateCompileSlowCases() 1365 1441 { 1442 unsigned structureIDInstructionIndex = 0; 1443 1366 1444 Instruction* instruction = m_codeBlock->instructions.begin(); 1367 1445 for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end(); ++iter) { 1368 inti = iter->to;1446 unsigned i = iter->to; 1369 1447 m_jit.emitRestoreArgumentReference(); 1370 1448 switch (m_machine->getOpcodeID(instruction[i].u.opcode)) { … … 1403 1481 } 1404 1482 case op_get_by_val: { 1483 // The slow case that handles accesses to arrays (below) may jump back up to here. 1484 X86Assembler::JmpDst beginGetByValSlow = m_jit.label(); 1485 1405 1486 X86Assembler::JmpSrc notImm = iter->from; 1406 m_jit.link((++iter)->from, m_jit.label());1407 1487 m_jit.link((++iter)->from, m_jit.label()); 1408 1488 m_jit.link((++iter)->from, m_jit.label()); … … 1413 1493 emitCall(i, Machine::cti_op_get_by_val); 1414 1494 emitPutResult(instruction[i + 1].u.operand); 1495 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i + 4]); 1496 1497 // This is slow case that handles accesses to arrays above the fast cut-off. 1498 // First, check if this is an access to the vector 1499 m_jit.link((++iter)->from, m_jit.label()); 1500 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(ArrayStorage, m_vectorLength), X86::ecx); 1501 m_jit.link(m_jit.emitUnlinkedJbe(), beginGetByValSlow); 1502 1503 // okay, missed the fast region, but it is still in the vector. Get the value. 1504 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*), X86::ecx); 1505 // Check whether the value loaded is zero; if so we need to return undefined. 1506 m_jit.testl_rr(X86::ecx, X86::ecx); 1507 m_jit.link(m_jit.emitUnlinkedJe(), beginGetByValSlow); 1508 emitPutResult(instruction[i + 1].u.operand, X86::ecx); 1509 1415 1510 i += 4; 1416 1511 break; … … 1477 1572 break; 1478 1573 } 1574 case op_put_by_id: { 1575 m_jit.link(iter->from, m_jit.label()); 1576 m_jit.link((++iter)->from, m_jit.label()); 1577 1578 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]); 1579 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4); 1580 emitPutArg(X86::eax, 0); 1581 emitPutArg(X86::edx, 8); 1582 X86Assembler::JmpSrc call = emitCall(i, Machine::cti_op_put_by_id); 1583 1584 // Track the location of the call; this will be used to recover repatch information. 1585 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i); 1586 m_structureStubCompilationInfo[structureIDInstructionIndex].callReturnLocation = call; 1587 ++structureIDInstructionIndex; 1588 1589 i += 8; 1590 break; 1591 } 1592 case op_get_by_id: { 1593 // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset 1594 // so that we only need track one pointer into the slow case code - we track a pointer to the location 1595 // of the call (which we can use to look up the repatch information), but should a array-length or 1596 // prototype access tramopile fail we want to bail out back to here. To do so we can subtract back 1597 // the distance from the call to the head of the slow case. 1598 1599 m_jit.link(iter->from, m_jit.label()); 1600 m_jit.link((++iter)->from, m_jit.label()); 1601 1602 #ifndef NDEBUG 1603 X86Assembler::JmpDst coldPathBegin = m_jit.label(); 1604 #endif 1605 emitPutArg(X86::eax, 0); 1606 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]); 1607 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4); 1608 X86Assembler::JmpSrc call = emitCall(i, Machine::cti_op_get_by_id); 1609 ASSERT(X86Assembler::getDifferenceBetweenLabels(coldPathBegin, call) == repatchOffsetGetByIdSlowCaseCall); 1610 emitPutResult(instruction[i + 1].u.operand); 1611 1612 // Track the location of the call; this will be used to recover repatch information. 1613 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i); 1614 m_structureStubCompilationInfo[structureIDInstructionIndex].callReturnLocation = call; 1615 ++structureIDInstructionIndex; 1616 1617 i += 8; 1618 break; 1619 } 1479 1620 case op_loop_if_lesseq: { 1480 1621 emitSlowScriptCheck(i); … … 1514 1655 } 1515 1656 case op_put_by_val: { 1657 // Normal slow cases - either is not an immediate imm, or is an array. 1516 1658 X86Assembler::JmpSrc notImm = iter->from; 1517 m_jit.link((++iter)->from, m_jit.label());1518 1659 m_jit.link((++iter)->from, m_jit.label()); 1519 1660 m_jit.link((++iter)->from, m_jit.label()); 1520 1661 emitFastArithIntToImmNoCheck(X86::edx); 1521 1662 m_jit.link(notImm, m_jit.label()); 1663 emitGetArg(instruction[i + 3].u.operand, X86::ecx); 1522 1664 emitPutArg(X86::eax, 0); 1523 1665 emitPutArg(X86::edx, 4); 1524 1666 emitPutArg(X86::ecx, 8); 1525 1667 emitCall(i, Machine::cti_op_put_by_val); 1668 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i + 4]); 1669 1670 // slow cases for immediate int accesses to arrays 1671 m_jit.link((++iter)->from, m_jit.label()); 1672 m_jit.link((++iter)->from, m_jit.label()); 1673 emitGetArg(instruction[i + 3].u.operand, X86::ecx); 1674 emitPutArg(X86::eax, 0); 1675 emitPutArg(X86::edx, 4); 1676 emitPutArg(X86::ecx, 8); 1677 emitCall(i, Machine::cti_op_put_by_val_array); 1678 1526 1679 i += 4; 1527 1680 break; … … 1701 1854 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i]); 1702 1855 } 1856 1857 ASSERT(structureIDInstructionIndex == m_codeBlock->structureIDInstructions.size()); 1703 1858 } 1704 1859 … … 1762 1917 X86Assembler::linkAbsoluteAddress(code, iter->addrPosition, iter->target); 1763 1918 1919 for (unsigned i = 0; i < m_codeBlock->structureIDInstructions.size(); ++i) { 1920 StructureStubInfo& info = m_codeBlock->structureIDInstructions[i]; 1921 info.callReturnLocation = X86Assembler::getRelocatedAddress(code, m_structureStubCompilationInfo[i].callReturnLocation); 1922 info.hotPathBegin = X86Assembler::getRelocatedAddress(code, m_structureStubCompilationInfo[i].hotPathBegin); 1923 } 1924 1925 1764 1926 m_codeBlock->ctiCode = code; 1765 1927 } 1766 1928 1767 void * CTI::privateCompileGetByIdSelf(StructureID* structureID, size_t cachedOffset)1929 void CTI::privateCompileGetByIdSelf(StructureID* structureID, size_t cachedOffset, void* returnAddress) 1768 1930 { 1769 1931 // Check eax is an object of the right StructureID. … … 1784 1946 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail)); 1785 1947 1786 m_codeBlock-> structureIDAccessStubs.append(code);1948 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code; 1787 1949 1788 return code; 1789 } 1790 1791 void* CTI::privateCompileGetByIdProto(ExecState* exec, StructureID* structureID, StructureID* prototypeStructureID, size_t cachedOffset) 1792 { 1950 ctiRepatchCallByReturnAddress(returnAddress, code); 1951 } 1952 1953 void CTI::privateCompileGetByIdProto(StructureID* structureID, StructureID* prototypeStructureID, size_t cachedOffset, void* returnAddress) 1954 { 1955 #if USE(CTI_REPATCH_PIC) 1956 StructureStubInfo& info = m_codeBlock->getStubInfo(returnAddress); 1957 1958 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic. 1959 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail)); 1960 1793 1961 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a StructureID that is 1794 1962 // referencing the prototype object - let's speculatively load it's table nice and early!) 1795 JSObject* protoObject = static_cast<JSObject*>(structureID->prototypeForLookup( exec));1963 JSObject* protoObject = static_cast<JSObject*>(structureID->prototypeForLookup(m_exec)); 1796 1964 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage; 1797 1965 m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx); … … 1809 1977 1810 1978 // Checks out okay! - getDirectOffset 1979 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::ecx); 1980 1981 X86Assembler::JmpSrc success = m_jit.emitUnlinkedJmp(); 1982 1983 void* code = m_jit.copy(); 1984 ASSERT(code); 1985 1986 // Use the repatch information to link the failure cases back to the original slow case routine. 1987 void* slowCaseBegin = reinterpret_cast<char*>(info.callReturnLocation) - repatchOffsetGetByIdSlowCaseCall; 1988 X86Assembler::link(code, failureCases1, slowCaseBegin); 1989 X86Assembler::link(code, failureCases2, slowCaseBegin); 1990 X86Assembler::link(code, failureCases3, slowCaseBegin); 1991 1992 // On success return back to the hot patch code, at a point it will perform the store to dest for us. 1993 intptr_t successDest = (intptr_t)(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset; 1994 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest)); 1995 1996 // Track the stub we have created so that it will be deleted later. 1997 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code; 1998 1999 // Finally repatch the jump to sow case back in the hot path to jump here instead. 2000 // FIXME: should revert this repatching, on failure. 2001 intptr_t jmpLocation = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase; 2002 X86Assembler::repatchBranchOffset(jmpLocation, code); 2003 #else 2004 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a StructureID that is 2005 // referencing the prototype object - let's speculatively load it's table nice and early!) 2006 JSObject* protoObject = static_cast<JSObject*>(structureID->prototypeForLookup(m_exec)); 2007 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage; 2008 m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx); 2009 2010 // check eax is an object of the right StructureID. 2011 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax); 2012 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne(); 2013 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax); 2014 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne(); 2015 2016 // Check the prototype object's StructureID had not changed. 2017 StructureID** protoStructureIDAddress = &(protoObject->m_structureID); 2018 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(prototypeStructureID), static_cast<void*>(protoStructureIDAddress)); 2019 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJne(); 2020 2021 // Checks out okay! - getDirectOffset 1811 2022 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax); 1812 2023 … … 1820 2031 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail)); 1821 2032 1822 m_codeBlock->structureIDAccessStubs.append(code); 1823 1824 return code; 1825 } 1826 1827 void* CTI::privateCompileGetByIdChain(ExecState* exec, StructureID* structureID, StructureIDChain* chain, size_t count, size_t cachedOffset) 2033 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code; 2034 2035 ctiRepatchCallByReturnAddress(returnAddress, code); 2036 #endif 2037 } 2038 2039 void CTI::privateCompileGetByIdChain(StructureID* structureID, StructureIDChain* chain, size_t count, size_t cachedOffset, void* returnAddress) 1828 2040 { 1829 2041 ASSERT(count); … … 1841 2053 JSObject* protoObject = 0; 1842 2054 for (unsigned i = 0; i<count; ++i) { 1843 protoObject = static_cast<JSObject*>(currStructureID->prototypeForLookup( exec));2055 protoObject = static_cast<JSObject*>(currStructureID->prototypeForLookup(m_exec)); 1844 2056 currStructureID = chainEntries[i].get(); 1845 2057 … … 1863 2075 for (unsigned i = 0; i < bucketsOfFail.size(); ++i) 1864 2076 X86Assembler::link(code, bucketsOfFail[i], reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail)); 1865 m_codeBlock->structureIDAccessStubs.append(code); 1866 return code; 1867 } 1868 1869 void* CTI::privateCompilePutByIdReplace(StructureID* structureID, size_t cachedOffset) 2077 2078 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code; 2079 2080 ctiRepatchCallByReturnAddress(returnAddress, code); 2081 } 2082 2083 void CTI::privateCompilePutByIdReplace(StructureID* structureID, size_t cachedOffset, void* returnAddress) 1870 2084 { 1871 2085 // check eax is an object of the right StructureID. … … 1886 2100 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_put_by_id_fail)); 1887 2101 1888 m_codeBlock-> structureIDAccessStubs.append(code);2102 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code; 1889 2103 1890 return code;2104 ctiRepatchCallByReturnAddress(returnAddress, code); 1891 2105 } 1892 2106 … … 1922 2136 } 1923 2137 1924 void * CTI::privateCompilePutByIdTransition(StructureID* oldStructureID, StructureID* newStructureID, size_t cachedOffset, StructureIDChain* sIDC)2138 void CTI::privateCompilePutByIdTransition(StructureID* oldStructureID, StructureID* newStructureID, size_t cachedOffset, StructureIDChain* sIDC, void* returnAddress) 1925 2139 { 1926 2140 Vector<X86Assembler::JmpSrc, 16> failureCases; … … 1993 2207 X86Assembler::link(code, callTarget, reinterpret_cast<void*>(transitionObject)); 1994 2208 1995 m_codeBlock-> structureIDAccessStubs.append(code);2209 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code; 1996 2210 1997 return code;1998 } 1999 2000 void* CTI::private ArrayLengthTrampoline()2211 ctiRepatchCallByReturnAddress(returnAddress, code); 2212 } 2213 2214 void* CTI::privateCompileArrayLengthTrampoline() 2001 2215 { 2002 2216 // Check eax is an array … … 2026 2240 } 2027 2241 2028 void* CTI::private StringLengthTrampoline()2242 void* CTI::privateCompileStringLengthTrampoline() 2029 2243 { 2030 2244 // Check eax is a string … … 2052 2266 2053 2267 return code; 2268 } 2269 2270 void CTI::patchGetByIdSelf(CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset, void* returnAddress) 2271 { 2272 StructureStubInfo& info = codeBlock->getStubInfo(returnAddress); 2273 2274 // We don't want to repatch more than once - in future go to cti_op_get_by_id_generic. 2275 // Should probably go to Machine::cti_op_get_by_id_fail, but that doesn't do anything interesting right now. 2276 ctiRepatchCallByReturnAddress(returnAddress, (void*)(Machine::cti_op_get_by_id_generic)); 2277 2278 // Repatch the offset into the propoerty map to load from, then repatch the StructureID to look for. 2279 X86Assembler::repatchDisplacement(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset, cachedOffset * sizeof(JSValue*)); 2280 X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdStructureID, reinterpret_cast<uint32_t>(structureID)); 2281 } 2282 2283 void CTI::patchPutByIdReplace(CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset, void* returnAddress) 2284 { 2285 StructureStubInfo& info = codeBlock->getStubInfo(returnAddress); 2286 2287 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic. 2288 // Should probably go to Machine::cti_op_put_by_id_fail, but that doesn't do anything interesting right now. 2289 ctiRepatchCallByReturnAddress(returnAddress, (void*)(Machine::cti_op_put_by_id_generic)); 2290 2291 // Repatch the offset into the propoerty map to load from, then repatch the StructureID to look for. 2292 X86Assembler::repatchDisplacement(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetPutByIdPropertyMapOffset, cachedOffset * sizeof(JSValue*)); 2293 X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetPutByIdStructureID, reinterpret_cast<uint32_t>(structureID)); 2294 } 2295 2296 void CTI::privateCompilePatchGetArrayLength(void* returnAddress) 2297 { 2298 StructureStubInfo& info = m_codeBlock->getStubInfo(returnAddress); 2299 2300 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic. 2301 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail)); 2302 2303 // Check eax is an array 2304 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax); 2305 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne(); 2306 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax); 2307 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne(); 2308 2309 // Checks out okay! - get the length from the storage 2310 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx); 2311 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_length), X86::ecx, X86::ecx); 2312 2313 m_jit.addl_rr(X86::ecx, X86::ecx); 2314 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJo(); 2315 m_jit.addl_i8r(1, X86::ecx); 2316 2317 X86Assembler::JmpSrc success = m_jit.emitUnlinkedJmp(); 2318 2319 void* code = m_jit.copy(); 2320 ASSERT(code); 2321 2322 // Use the repatch information to link the failure cases back to the original slow case routine. 2323 void* slowCaseBegin = reinterpret_cast<char*>(info.callReturnLocation) - repatchOffsetGetByIdSlowCaseCall; 2324 X86Assembler::link(code, failureCases1, slowCaseBegin); 2325 X86Assembler::link(code, failureCases2, slowCaseBegin); 2326 X86Assembler::link(code, failureCases3, slowCaseBegin); 2327 2328 // On success return back to the hot patch code, at a point it will perform the store to dest for us. 2329 intptr_t successDest = (intptr_t)(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset; 2330 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest)); 2331 2332 // Track the stub we have created so that it will be deleted later. 2333 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code; 2334 2335 // Finally repatch the jump to sow case back in the hot path to jump here instead. 2336 // FIXME: should revert this repatching, on failure. 2337 intptr_t jmpLocation = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase; 2338 X86Assembler::repatchBranchOffset(jmpLocation, code); 2054 2339 } 2055 2340
Note:
See TracChangeset
for help on using the changeset viewer.