Changeset 49065 in webkit for trunk/JavaScriptCore
- Timestamp:
- Oct 3, 2009, 10:01:14 AM (16 years ago)
- Location:
- trunk/JavaScriptCore
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/JavaScriptCore/ChangeLog
r49056 r49065 1 2009-10-02 Geoffrey Garen <[email protected]> 2 3 Reviewed by Sam Weinig. 4 5 Removed the concept of a "fast access cutoff" in arrays, because it 6 punished some patterns of array access too much, and made things too 7 complex for inlining in some cases. 8 9 1.3% speedup on SunSpider. 10 11 * jit/JITOpcodes.cpp: 12 (JSC::JIT::emitSlow_op_get_by_val): 13 (JSC::JIT::emitSlow_op_put_by_val): 14 * jit/JITPropertyAccess.cpp: 15 (JSC::JIT::emit_op_get_by_val): 16 (JSC::JIT::emitSlow_op_get_by_val): 17 (JSC::JIT::emit_op_put_by_val): 18 (JSC::JIT::emitSlow_op_put_by_val): 19 * jit/JITStubs.cpp: 20 * jit/JITStubs.h: 21 (JSC::): Check m_vectorLength instead of m_fastAccessCutoff when 22 getting / putting from / to an array. Inline putting past the end of 23 the array. 24 25 * runtime/JSArray.cpp: 26 (JSC::JSArray::JSArray): 27 (JSC::JSArray::getOwnPropertySlot): 28 (JSC::JSArray::getOwnPropertyDescriptor): 29 (JSC::JSArray::put): 30 (JSC::JSArray::putSlowCase): 31 (JSC::JSArray::deleteProperty): 32 (JSC::JSArray::getOwnPropertyNames): 33 (JSC::JSArray::increaseVectorLength): 34 (JSC::JSArray::setLength): 35 (JSC::JSArray::pop): 36 (JSC::JSArray::push): 37 (JSC::JSArray::sort): 38 (JSC::JSArray::fillArgList): 39 (JSC::JSArray::copyToRegisters): 40 (JSC::JSArray::compactForSorting): 41 (JSC::JSArray::checkConsistency): 42 * runtime/JSArray.h: 43 (JSC::JSArray::canGetIndex): 44 (JSC::JSArray::canSetIndex): 45 (JSC::JSArray::setIndex): 46 (JSC::JSArray::markChildrenDirect): Removed m_fastAccessCutoff, and 47 replaced with checks for JSValue() to detect reads and writes from / to 48 uninitialized parts of the array. 49 1 50 2009-10-02 Jonni Rainisto <[email protected]> 2 51 -
trunk/JavaScriptCore/jit/JITOpcodes.cpp
r49030 r49065 2692 2692 void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 2693 2693 { 2694 // The slow void JIT::emitSlow_that handles accesses to arrays (below) may jump back up to here. 2695 Label beginGetByValSlow(this); 2696 2697 Jump notImm = getSlowCase(iter); 2698 linkSlowCase(iter); 2699 linkSlowCase(iter); 2700 emitFastArithIntToImmNoCheck(regT1, regT1); 2701 2702 notImm.link(this); 2694 unsigned dst = currentInstruction[1].u.operand; 2695 unsigned base = currentInstruction[2].u.operand; 2696 unsigned property = currentInstruction[3].u.operand; 2697 2698 linkSlowCase(iter); // property int32 check 2699 linkSlowCaseIfNotJSCell(iter, base); // base cell check 2700 linkSlowCase(iter); // base array check 2701 linkSlowCase(iter); // vector length check 2702 linkSlowCase(iter); // empty value 2703 2703 2704 JITStubCall stubCall(this, cti_op_get_by_val); 2704 stubCall.addArgument(regT0); 2705 stubCall.addArgument(regT1); 2706 stubCall.call(currentInstruction[1].u.operand); 2707 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val)); 2708 2709 // This is slow void JIT::emitSlow_that handles accesses to arrays above the fast cut-off. 2710 // First, check if this is an access to the vector 2711 linkSlowCase(iter); 2712 branch32(AboveOrEqual, regT1, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_vectorLength)), beginGetByValSlow); 2713 2714 // okay, missed the fast region, but it is still in the vector. Get the value. 2715 loadPtr(BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT2); 2716 // Check whether the value loaded is zero; if so we need to return undefined. 2717 branchTestPtr(Zero, regT2, beginGetByValSlow); 2718 move(regT2, regT0); 2719 emitPutVirtualRegister(currentInstruction[1].u.operand, regT0); 2705 stubCall.addArgument(base, regT2); 2706 stubCall.addArgument(property, regT2); 2707 stubCall.call(dst); 2720 2708 } 2721 2709 … … 2774 2762 void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 2775 2763 { 2776 // Normal slow cases - either is not an immediate imm, or is an array. 2777 Jump notImm = getSlowCase(iter); 2778 linkSlowCase(iter); 2779 linkSlowCase(iter); 2780 emitFastArithIntToImmNoCheck(regT1, regT1); 2781 2782 notImm.link(this); { 2783 JITStubCall stubCall(this, cti_op_put_by_val); 2784 stubCall.addArgument(regT0); 2785 stubCall.addArgument(regT1); 2786 stubCall.addArgument(currentInstruction[3].u.operand, regT2); 2787 stubCall.call(); 2788 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_put_by_val)); 2789 } 2790 2791 // slow cases for immediate int accesses to arrays 2792 linkSlowCase(iter); 2793 linkSlowCase(iter); { 2794 JITStubCall stubCall(this, cti_op_put_by_val_array); 2795 stubCall.addArgument(regT0); 2796 stubCall.addArgument(regT1); 2797 stubCall.addArgument(currentInstruction[3].u.operand, regT2); 2798 stubCall.call(); 2799 } 2764 unsigned base = currentInstruction[1].u.operand; 2765 unsigned property = currentInstruction[2].u.operand; 2766 unsigned value = currentInstruction[3].u.operand; 2767 2768 linkSlowCase(iter); // property int32 check 2769 linkSlowCaseIfNotJSCell(iter, base); // base cell check 2770 linkSlowCase(iter); // base not array check 2771 linkSlowCase(iter); // in vector check 2772 2773 JITStubCall stubPutByValCall(this, cti_op_put_by_val); 2774 stubPutByValCall.addArgument(regT0); 2775 stubPutByValCall.addArgument(property, regT2); 2776 stubPutByValCall.addArgument(value, regT2); 2777 stubPutByValCall.call(); 2800 2778 } 2801 2779 -
trunk/JavaScriptCore/jit/JITPropertyAccess.cpp
r49030 r49065 274 274 emitJumpSlowCaseIfNotJSCell(base, regT1); 275 275 addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr))); 276 addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, OBJECT_OFFSETOF(JSArray, m_fastAccessCutoff)))); 277 278 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT0); 279 load32(BaseIndex(regT0, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4), regT1); // tag 280 load32(BaseIndex(regT0, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0); // payload 276 277 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT3); 278 addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength)))); 279 280 load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4), regT1); // tag 281 load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0); // payload 282 addSlowCase(branch32(Equal, regT1, Imm32(JSValue::EmptyValueTag))); 283 281 284 emitStore(dst, regT1, regT0); 282 285 map(m_bytecodeIndex + OPCODE_LENGTH(op_get_by_val), dst, regT1, regT0); … … 289 292 unsigned property = currentInstruction[3].u.operand; 290 293 291 // The slow void JIT::emitSlow_that handles accesses to arrays (below) may jump back up to here.292 Label callGetByValJITStub(this);293 294 294 linkSlowCase(iter); // property int32 check 295 295 linkSlowCaseIfNotJSCell(iter, base); // base cell check 296 296 linkSlowCase(iter); // base array check 297 linkSlowCase(iter); // vector length check 298 linkSlowCase(iter); // empty value 297 299 298 300 JITStubCall stubCall(this, cti_op_get_by_val); … … 300 302 stubCall.addArgument(property); 301 303 stubCall.call(dst); 302 303 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));304 305 linkSlowCase(iter); // array fast cut-off check306 307 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT0);308 branch32(AboveOrEqual, regT2, Address(regT0, OBJECT_OFFSETOF(ArrayStorage, m_vectorLength)), callGetByValJITStub);309 310 // Missed the fast region, but it is still in the vector.311 load32(BaseIndex(regT0, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4), regT1); // tag312 load32(BaseIndex(regT0, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0); // payload313 branch32(Equal, regT1, Imm32(JSValue::EmptyValueTag)).linkTo(callGetByValJITStub, this);314 315 emitStore(dst, regT1, regT0);316 304 } 317 305 … … 327 315 emitJumpSlowCaseIfNotJSCell(base, regT1); 328 316 addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr))); 317 addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength)))); 318 329 319 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT3); 330 320 331 Jump inFastVector = branch32(Below, regT2, Address(regT0, OBJECT_OFFSETOF(JSArray, m_fastAccessCutoff))); 332 333 // Check if the access is within the vector. 334 addSlowCase(branch32(AboveOrEqual, regT2, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_vectorLength)))); 335 336 // This is a write to the slow part of the vector; first, we have to check if this would be the first write to this location. 337 // FIXME: should be able to handle initial write to array; increment the the number of items in the array, and potentially update fast access cutoff. 338 addSlowCase(branch32(Equal, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4), Imm32(JSValue::EmptyValueTag))); 339 340 inFastVector.link(this); 341 321 Jump empty = branch32(Equal, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4), Imm32(JSValue::EmptyValueTag)); 322 323 Label storeResult(this); 342 324 emitLoad(value, regT1, regT0); 343 325 store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); // payload 344 326 store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4)); // tag 327 Jump end = jump(); 328 329 empty.link(this); 330 add32(Imm32(1), Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector))); 331 branch32(Below, regT2, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length))).linkTo(storeResult, this); 332 333 add32(Imm32(1), regT2, regT0); 334 store32(regT0, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length))); 335 jump().linkTo(storeResult, this); 336 337 end.link(this); 345 338 } 346 339 … … 354 347 linkSlowCaseIfNotJSCell(iter, base); // base cell check 355 348 linkSlowCase(iter); // base not array check 349 linkSlowCase(iter); // in vector check 356 350 357 351 JITStubCall stubPutByValCall(this, cti_op_put_by_val); … … 360 354 stubPutByValCall.addArgument(value); 361 355 stubPutByValCall.call(); 362 363 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));364 365 // Slow cases for immediate int accesses to arrays.366 linkSlowCase(iter); // in vector check367 linkSlowCase(iter); // written to slot check368 369 JITStubCall stubCall(this, cti_op_put_by_val_array);370 stubCall.addArgument(regT1, regT0);371 stubCall.addArgument(regT2);372 stubCall.addArgument(value);373 stubCall.call();374 356 } 375 357 … … 953 935 void JIT::emit_op_get_by_val(Instruction* currentInstruction) 954 936 { 955 emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1); 937 unsigned dst = currentInstruction[1].u.operand; 938 unsigned base = currentInstruction[2].u.operand; 939 unsigned property = currentInstruction[3].u.operand; 940 941 emitGetVirtualRegisters(base, regT0, property, regT1); 956 942 emitJumpSlowCaseIfNotImmediateInteger(regT1); 957 943 #if USE(JSVALUE64) 958 944 // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter. 959 // We check the value as if it was a uint32 against the m_ fastAccessCutoff- which will always fail if960 // number was signed since m_ fastAccessCutoffis always less than intmax (since the total allocation945 // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if 946 // number was signed since m_vectorLength is always less than intmax (since the total allocation 961 947 // size is always less than 4Gb). As such zero extending wil have been correct (and extending the value 962 948 // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign … … 966 952 emitFastArithImmToInt(regT1); 967 953 #endif 968 emitJumpSlowCaseIfNotJSCell(regT0 );954 emitJumpSlowCaseIfNotJSCell(regT0, base); 969 955 addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr))); 970 956 971 // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff972 957 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2); 973 addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSArray, m_fastAccessCutoff)))); 974 975 // Get the value from the vector 958 addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength)))); 959 976 960 loadPtr(BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0); 977 emitPutVirtualRegister(currentInstruction[1].u.operand); 961 addSlowCase(branchTestPtr(Zero, regT0)); 962 963 emitPutVirtualRegister(dst); 978 964 } 979 965 980 966 void JIT::emit_op_put_by_val(Instruction* currentInstruction) 981 967 { 982 emitGetVirtualRegisters(currentInstruction[1].u.operand, regT0, currentInstruction[2].u.operand, regT1); 968 unsigned base = currentInstruction[1].u.operand; 969 unsigned property = currentInstruction[2].u.operand; 970 unsigned value = currentInstruction[3].u.operand; 971 972 emitGetVirtualRegisters(base, regT0, property, regT1); 983 973 emitJumpSlowCaseIfNotImmediateInteger(regT1); 984 974 #if USE(JSVALUE64) … … 988 978 emitFastArithImmToInt(regT1); 989 979 #endif 990 emitJumpSlowCaseIfNotJSCell(regT0 );980 emitJumpSlowCaseIfNotJSCell(regT0, base); 991 981 addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr))); 992 993 // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff 982 addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength)))); 983 994 984 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2); 995 Jump inFastVector = branch32(Below, regT1, Address(regT0, OBJECT_OFFSETOF(JSArray, m_fastAccessCutoff))); 996 // No; oh well, check if the access if within the vector - if so, we may still be okay. 997 addSlowCase(branch32(AboveOrEqual, regT1, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_vectorLength)))); 998 999 // This is a write to the slow part of the vector; first, we have to check if this would be the first write to this location. 1000 // FIXME: should be able to handle initial write to array; increment the the number of items in the array, and potentially update fast access cutoff. 1001 addSlowCase(branchTestPtr(Zero, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])))); 1002 1003 // All good - put the value into the array. 1004 inFastVector.link(this); 1005 emitGetVirtualRegister(currentInstruction[3].u.operand, regT0); 985 986 Jump empty = branchTestPtr(Zero, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); 987 988 Label storeResult(this); 989 emitGetVirtualRegister(value, regT0); 1006 990 storePtr(regT0, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); 991 Jump end = jump(); 992 993 empty.link(this); 994 add32(Imm32(1), Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector))); 995 branch32(Below, regT1, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length))).linkTo(storeResult, this); 996 997 move(regT1, regT0); 998 add32(Imm32(1), regT0); 999 store32(regT0, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length))); 1000 jump().linkTo(storeResult, this); 1001 1002 end.link(this); 1007 1003 } 1008 1004 -
trunk/JavaScriptCore/jit/JITStubs.cpp
r48774 r49065 1938 1938 baseValue.put(callFrame, property, value, slot); 1939 1939 } 1940 }1941 1942 CHECK_FOR_EXCEPTION_AT_END();1943 }1944 1945 DEFINE_STUB_FUNCTION(void, op_put_by_val_array)1946 {1947 STUB_INIT_STACK_FRAME(stackFrame);1948 1949 CallFrame* callFrame = stackFrame.callFrame;1950 JSValue baseValue = stackFrame.args[0].jsValue();1951 int i = stackFrame.args[1].int32();1952 JSValue value = stackFrame.args[2].jsValue();1953 1954 ASSERT(isJSArray(stackFrame.globalData, baseValue));1955 1956 if (LIKELY(i >= 0))1957 asArray(baseValue)->JSArray::put(callFrame, i, value);1958 else {1959 Identifier property(callFrame, UString::from(i));1960 PutPropertySlot slot;1961 baseValue.put(callFrame, property, value, slot);1962 1940 } 1963 1941 -
trunk/JavaScriptCore/jit/JITStubs.h
r48574 r49065 346 346 void JIT_STUB cti_op_put_by_index(STUB_ARGS_DECLARATION); 347 347 void JIT_STUB cti_op_put_by_val(STUB_ARGS_DECLARATION); 348 void JIT_STUB cti_op_put_by_val_array(STUB_ARGS_DECLARATION);349 348 void JIT_STUB cti_op_put_by_val_byte_array(STUB_ARGS_DECLARATION); 350 349 void JIT_STUB cti_op_put_getter(STUB_ARGS_DECLARATION); -
trunk/JavaScriptCore/runtime/JSArray.cpp
r48836 r49065 137 137 138 138 m_storage = static_cast<ArrayStorage*>(fastZeroedMalloc(storageSize(initialCapacity))); 139 m_storage->m_vectorLength = initialCapacity; 140 141 m_fastAccessCutoff = 0; 139 m_vectorLength = initialCapacity; 142 140 143 141 checkConsistency(); … … 151 149 m_storage = static_cast<ArrayStorage*>(fastMalloc(storageSize(initialCapacity))); 152 150 m_storage->m_length = initialLength; 153 m_ storage->m_vectorLength = initialCapacity;151 m_vectorLength = initialCapacity; 154 152 m_storage->m_numValuesInVector = 0; 155 153 m_storage->m_sparseValueMap = 0; … … 160 158 vector[i] = JSValue(); 161 159 162 m_fastAccessCutoff = 0;163 164 160 checkConsistency(); 165 161 … … 174 170 m_storage = static_cast<ArrayStorage*>(fastMalloc(storageSize(initialCapacity))); 175 171 m_storage->m_length = initialCapacity; 176 m_ storage->m_vectorLength = initialCapacity;172 m_vectorLength = initialCapacity; 177 173 m_storage->m_numValuesInVector = initialCapacity; 178 174 m_storage->m_sparseValueMap = 0; … … 183 179 m_storage->m_vector[i] = *it; 184 180 185 m_fastAccessCutoff = initialCapacity;186 187 181 checkConsistency(); 188 182 … … 208 202 } 209 203 210 if (i < storage->m_vectorLength) {204 if (i < m_vectorLength) { 211 205 JSValue& valueSlot = storage->m_vector[i]; 212 206 if (valueSlot) { … … 254 248 if (i >= m_storage->m_length) 255 249 return false; 256 if (i < m_ storage->m_vectorLength) {257 JSValue value = m_storage->m_vector[i];250 if (i < m_vectorLength) { 251 JSValue& value = m_storage->m_vector[i]; 258 252 if (value) { 259 253 descriptor.setDescriptor(value, 0); … … 306 300 } 307 301 308 if (i < m_ storage->m_vectorLength) {302 if (i < m_vectorLength) { 309 303 JSValue& valueSlot = m_storage->m_vector[i]; 310 304 if (valueSlot) { … … 314 308 } 315 309 valueSlot = value; 316 if (++m_storage->m_numValuesInVector == m_storage->m_length) 317 m_fastAccessCutoff = m_storage->m_length; 310 ++m_storage->m_numValuesInVector; 318 311 checkConsistency(); 319 312 return; … … 353 346 storage = m_storage; 354 347 storage->m_vector[i] = value; 355 if (++storage->m_numValuesInVector == storage->m_length) 356 m_fastAccessCutoff = storage->m_length; 348 ++storage->m_numValuesInVector; 357 349 checkConsistency(); 358 350 } else … … 364 356 unsigned newNumValuesInVector = storage->m_numValuesInVector + 1; 365 357 unsigned newVectorLength = increasedVectorLength(i + 1); 366 for (unsigned j = max( storage->m_vectorLength, MIN_SPARSE_ARRAY_INDEX); j < newVectorLength; ++j)358 for (unsigned j = max(m_vectorLength, MIN_SPARSE_ARRAY_INDEX); j < newVectorLength; ++j) 367 359 newNumValuesInVector += map->contains(j); 368 360 if (i >= MIN_SPARSE_ARRAY_INDEX) … … 387 379 } 388 380 389 unsigned vectorLength = storage->m_vectorLength;381 unsigned vectorLength = m_vectorLength; 390 382 391 383 Heap::heap(this)->reportExtraMemoryCost(storageSize(newVectorLength) - storageSize(vectorLength)); … … 405 397 storage->m_vector[i] = value; 406 398 407 storage->m_vectorLength = newVectorLength;399 m_vectorLength = newVectorLength; 408 400 storage->m_numValuesInVector = newNumValuesInVector; 409 401 … … 432 424 ArrayStorage* storage = m_storage; 433 425 434 if (i < storage->m_vectorLength) {426 if (i < m_vectorLength) { 435 427 JSValue& valueSlot = storage->m_vector[i]; 436 428 if (!valueSlot) { … … 440 432 valueSlot = JSValue(); 441 433 --storage->m_numValuesInVector; 442 if (m_fastAccessCutoff > i)443 m_fastAccessCutoff = i;444 434 checkConsistency(); 445 435 return true; … … 473 463 ArrayStorage* storage = m_storage; 474 464 475 unsigned usedVectorLength = min(storage->m_length, storage->m_vectorLength);465 unsigned usedVectorLength = min(storage->m_length, m_vectorLength); 476 466 for (unsigned i = 0; i < usedVectorLength; ++i) { 477 467 if (storage->m_vector[i]) … … 495 485 ArrayStorage* storage = m_storage; 496 486 497 unsigned vectorLength = storage->m_vectorLength;487 unsigned vectorLength = m_vectorLength; 498 488 ASSERT(newLength > vectorLength); 499 489 ASSERT(newLength <= MAX_STORAGE_VECTOR_INDEX); … … 504 494 505 495 Heap::heap(this)->reportExtraMemoryCost(storageSize(newVectorLength) - storageSize(vectorLength)); 506 storage->m_vectorLength = newVectorLength;496 m_vectorLength = newVectorLength; 507 497 508 498 for (unsigned i = vectorLength; i < newVectorLength; ++i) … … 522 512 523 513 if (newLength < length) { 524 if (m_fastAccessCutoff > newLength) 525 m_fastAccessCutoff = newLength; 526 527 unsigned usedVectorLength = min(length, storage->m_vectorLength); 514 unsigned usedVectorLength = min(length, m_vectorLength); 528 515 for (unsigned i = newLength; i < usedVectorLength; ++i) { 529 516 JSValue& valueSlot = storage->m_vector[i]; … … 564 551 JSValue result; 565 552 566 if ( m_fastAccessCutoff > length) {553 if (length < m_vectorLength) { 567 554 JSValue& valueSlot = m_storage->m_vector[length]; 568 result = valueSlot; 569 ASSERT(result); 570 valueSlot = JSValue(); 571 --m_storage->m_numValuesInVector; 572 m_fastAccessCutoff = length; 573 } else if (length < m_storage->m_vectorLength) { 574 JSValue& valueSlot = m_storage->m_vector[length]; 575 result = valueSlot; 576 valueSlot = JSValue(); 577 if (result) 555 if (valueSlot) { 578 556 --m_storage->m_numValuesInVector; 579 else 557 result = valueSlot; 558 valueSlot = JSValue(); 559 } else 580 560 result = jsUndefined(); 581 561 } else { … … 605 585 checkConsistency(); 606 586 607 if (m_storage->m_length < m_storage->m_vectorLength) { 608 ASSERT(!m_storage->m_vector[m_storage->m_length]); 587 if (m_storage->m_length < m_vectorLength) { 609 588 m_storage->m_vector[m_storage->m_length] = value; 610 if (++m_storage->m_numValuesInVector == ++m_storage->m_length)611 m_fastAccessCutoff =m_storage->m_length;589 ++m_storage->m_numValuesInVector; 590 ++m_storage->m_length; 612 591 checkConsistency(); 613 592 return; … … 619 598 if (increaseVectorLength(m_storage->m_length + 1)) { 620 599 m_storage->m_vector[m_storage->m_length] = value; 621 if (++m_storage->m_numValuesInVector == ++m_storage->m_length)622 m_fastAccessCutoff =m_storage->m_length;600 ++m_storage->m_numValuesInVector; 601 ++m_storage->m_length; 623 602 checkConsistency(); 624 603 return; … … 838 817 return; 839 818 840 unsigned usedVectorLength = min(m_storage->m_length, m_ storage->m_vectorLength);819 unsigned usedVectorLength = min(m_storage->m_length, m_vectorLength); 841 820 842 821 AVLTree<AVLTreeAbstractorForArrayCompare, 44> tree; // Depth 44 is enough for 2^31 items … … 887 866 if (SparseArrayValueMap* map = m_storage->m_sparseValueMap) { 888 867 newUsedVectorLength += map->size(); 889 if (newUsedVectorLength > m_ storage->m_vectorLength) {868 if (newUsedVectorLength > m_vectorLength) { 890 869 // Check that it is possible to allocate an array large enough to hold all the entries. 891 870 if ((newUsedVectorLength > MAX_STORAGE_VECTOR_LENGTH) || !increaseVectorLength(newUsedVectorLength)) { … … 927 906 m_storage->m_vector[i] = JSValue(); 928 907 929 m_fastAccessCutoff = newUsedVectorLength;930 908 m_storage->m_numValuesInVector = newUsedVectorLength; 931 909 … … 935 913 void JSArray::fillArgList(ExecState* exec, MarkedArgumentBuffer& args) 936 914 { 937 unsigned fastAccessLength = min(m_storage->m_length, m_fastAccessCutoff); 915 JSValue* vector = m_storage->m_vector; 916 unsigned vectorEnd = min(m_storage->m_length, m_vectorLength); 938 917 unsigned i = 0; 939 for (; i < fastAccessLength; ++i) 940 args.append(getIndex(i)); 918 for (; i < vectorEnd; ++i) { 919 JSValue& v = vector[i]; 920 if (!v) 921 break; 922 args.append(v); 923 } 924 941 925 for (; i < m_storage->m_length; ++i) 942 926 args.append(get(exec, i)); … … 947 931 ASSERT(m_storage->m_length == maxSize); 948 932 UNUSED_PARAM(maxSize); 949 unsigned fastAccessLength = min(m_storage->m_length, m_fastAccessCutoff); 933 JSValue* vector = m_storage->m_vector; 934 unsigned vectorEnd = min(m_storage->m_length, m_vectorLength); 950 935 unsigned i = 0; 951 for (; i < fastAccessLength; ++i) 952 buffer[i] = getIndex(i); 953 uint32_t size = m_storage->m_length; 954 for (; i < size; ++i) 936 for (; i < vectorEnd; ++i) { 937 JSValue& v = vector[i]; 938 if (!v) 939 break; 940 buffer[i] = v; 941 } 942 943 for (; i < m_storage->m_length; ++i) 955 944 buffer[i] = get(exec, i); 956 945 } … … 962 951 ArrayStorage* storage = m_storage; 963 952 964 unsigned usedVectorLength = min(m_storage->m_length, storage->m_vectorLength);953 unsigned usedVectorLength = min(m_storage->m_length, m_vectorLength); 965 954 966 955 unsigned numDefined = 0; … … 986 975 if (SparseArrayValueMap* map = storage->m_sparseValueMap) { 987 976 newUsedVectorLength += map->size(); 988 if (newUsedVectorLength > storage->m_vectorLength) {977 if (newUsedVectorLength > m_vectorLength) { 989 978 // Check that it is possible to allocate an array large enough to hold all the entries - if not, 990 979 // exception is thrown by caller. … … 1007 996 storage->m_vector[i] = JSValue(); 1008 997 1009 m_fastAccessCutoff = newUsedVectorLength;1010 998 storage->m_numValuesInVector = newUsedVectorLength; 1011 999 … … 1033 1021 ASSERT(!m_storage->m_sparseValueMap); 1034 1022 1035 ASSERT(m_fastAccessCutoff <= m_storage->m_length);1036 ASSERT(m_fastAccessCutoff <= m_storage->m_numValuesInVector);1037 1038 1023 unsigned numValuesInVector = 0; 1039 for (unsigned i = 0; i < m_ storage->m_vectorLength; ++i) {1024 for (unsigned i = 0; i < m_vectorLength; ++i) { 1040 1025 if (JSValue value = m_storage->m_vector[i]) { 1041 1026 ASSERT(i < m_storage->m_length); … … 1044 1029 ++numValuesInVector; 1045 1030 } else { 1046 ASSERT(i >= m_fastAccessCutoff);1047 1031 if (type == SortConsistencyCheck) 1048 1032 ASSERT(i >= m_storage->m_numValuesInVector); … … 1050 1034 } 1051 1035 ASSERT(numValuesInVector == m_storage->m_numValuesInVector); 1036 ASSERT(numValuesInVector <= m_storage->m_length); 1052 1037 1053 1038 if (m_storage->m_sparseValueMap) { … … 1056 1041 unsigned index = it->first; 1057 1042 ASSERT(index < m_storage->m_length); 1058 ASSERT(index >= m_ storage->m_vectorLength);1043 ASSERT(index >= m_vectorLength); 1059 1044 ASSERT(index <= MAX_ARRAY_INDEX); 1060 1045 ASSERT(it->second); -
trunk/JavaScriptCore/runtime/JSArray.h
r48836 r49065 30 30 struct ArrayStorage { 31 31 unsigned m_length; 32 unsigned m_vectorLength;33 32 unsigned m_numValuesInVector; 34 33 SparseArrayValueMap* m_sparseValueMap; … … 64 63 JSValue pop(); 65 64 66 bool canGetIndex(unsigned i) { return i < m_ fastAccessCutoff; }65 bool canGetIndex(unsigned i) { return i < m_vectorLength && m_storage->m_vector[i]; } 67 66 JSValue getIndex(unsigned i) 68 67 { … … 71 70 } 72 71 73 bool canSetIndex(unsigned i) { return i < m_ fastAccessCutoff; }74 JSValuesetIndex(unsigned i, JSValue v)72 bool canSetIndex(unsigned i) { return i < m_vectorLength; } 73 void setIndex(unsigned i, JSValue v) 75 74 { 76 75 ASSERT(canSetIndex(i)); 77 return m_storage->m_vector[i] = v; 76 JSValue& x = m_storage->m_vector[i]; 77 if (!x) { 78 ++m_storage->m_numValuesInVector; 79 if (i >= m_storage->m_length) 80 m_storage->m_length = i + 1; 81 } 82 x = v; 78 83 } 79 84 … … 111 116 void checkConsistency(ConsistencyCheckType = NormalConsistencyCheck); 112 117 113 unsigned m_ fastAccessCutoff;118 unsigned m_vectorLength; 114 119 ArrayStorage* m_storage; 115 120 }; … … 140 145 ArrayStorage* storage = m_storage; 141 146 142 unsigned usedVectorLength = std::min(storage->m_length, storage->m_vectorLength);147 unsigned usedVectorLength = std::min(storage->m_length, m_vectorLength); 143 148 markStack.appendValues(storage->m_vector, usedVectorLength, MayContainNullValues); 144 149
Note:
See TracChangeset
for help on using the changeset viewer.