source: webkit/trunk/JavaScriptCore/kjs/PropertyMap.cpp@ 36032

Last change on this file since 36032 was 36032, checked in by [email protected], 17 years ago

JavaScriptCore:

2008-09-02 Geoffrey Garen <[email protected]>

Reviewed by Sam Weinig.


Implemented the rest of Darin's review comments for the 09-01 inline
caching patch.


SunSpider says 0.5% faster, but that seems like noise.

  • JavaScriptCore.xcodeproj/project.pbxproj: Put PutPropertySlot into its own file, and added BatchedTransitionOptimizer.
  • VM/CodeBlock.cpp: (KJS::CodeBlock::~CodeBlock): Use array indexing instead of a pointer iterator.
  • VM/CodeGenerator.cpp: (KJS::CodeGenerator::CodeGenerator): Used BatchedTransitionOptimizer to make batched put and remove for declared variables fast, without forever pessimizing the global object. Removed the old getDirect/removeDirect hack that tried to do the same in a more limited way.
  • VM/CodeGenerator.h: Moved IdentifierRepHash to the KJS namespace since it doesn't specialize anything in WTF.
  • VM/Machine.cpp: (KJS::Machine::Machine): Nixed the DummyConstruct tag because it was confusingly named.

(KJS::Machine::execute): Used BatchedTransitionOptimizer, as above. Fixed
up some comments.

(KJS::cachePrototypeChain): Cast to JSObject*, since it's more specific.

(KJS::Machine::tryCachePutByID): Use isNull() instead of comparing to
jsNull(), since isNull() leaves more options open for the future.
(KJS::Machine::tryCacheGetByID): ditto
(KJS::Machine::privateExecute): ditto

  • VM/SamplingTool.cpp: (KJS::SamplingTool::dump): Use C++-style cast, to match our style guidelines.
  • kjs/BatchedTransitionOptimizer.h: Added. New class that allows host code to add a batch of properties to an object in an efficient way.
  • kjs/JSActivation.cpp: Use isNull(), as above.
  • kjs/JSArray.cpp: Get rid of DummyConstruct tag, as above.
  • kjs/JSArray.h:
  • kjs/JSGlobalData.cpp: Nixed two unused StructureIDs.
  • kjs/JSGlobalData.h:
  • kjs/JSImmediate.cpp: Use isNull(), as above.
  • kjs/JSObject.cpp: (KJS::JSObject::mark): Moved mark tracing code elsewhere, to make this function more readable.

(KJS::JSObject::put): Use isNull(), as above.

(KJS::JSObject::createInheritorID): Return a raw pointer, since the
object is owned by a data member, not necessarily the caller.

  • kjs/JSObject.h:
  • kjs/JSString.cpp: Use isNull(), as above.
  • kjs/PropertyMap.h: Updated to use PropertySlot::invalidOffset.
  • kjs/PropertySlot.h: Changed KJS_INVALID_OFFSET to WTF::notFound because C macros are so 80's.
  • kjs/PutPropertySlot.h: Added. Split out of PropertySlot.h. Also renamed PutPropertySlot::SlotType to PutPropertySlot::Type, and slotBase to base, since "slot" was redundant.
  • kjs/StructureID.cpp: Added a new transition *away* from dictionary status, to support BatchedTransitionOptimizer.

(KJS::StructureIDChain::StructureIDChain): No need to store m_size as
a data member, so keep it in a local, which might be faster.

  • kjs/StructureID.h:
  • kjs/SymbolTable.h: Moved IdentifierRepHash to KJS namespace, as above.
  • kjs/ustring.h:

JavaScriptGlue:

2008-09-02 Geoffrey Garen <[email protected]>

Reviewed by Sam Weinig.


Implemented the rest of Darin's review comments for the 09-01 inline
caching patch.


  • ForwardingHeaders/kjs/PutPropertySlot.h: Added.
  • Property svn:eol-style set to native
File size: 20.5 KB
Line 
1/*
2 * Copyright (C) 2004, 2005, 2006, 2007, 2008 Apple Inc. All rights reserved.
3 *
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Library General Public
6 * License as published by the Free Software Foundation; either
7 * version 2 of the License, or (at your option) any later version.
8 *
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Library General Public License for more details.
13 *
14 * You should have received a copy of the GNU Library General Public License
15 * along with this library; see the file COPYING.LIB. If not, write to
16 * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
17 * Boston, MA 02110-1301, USA.
18 *
19 */
20
21#include "config.h"
22#include "PropertyMap.h"
23
24#include "JSObject.h"
25#include "protect.h"
26#include "PropertyNameArray.h"
27#include <algorithm>
28#include <wtf/Assertions.h>
29#include <wtf/FastMalloc.h>
30#include <wtf/HashTable.h>
31#include <wtf/Vector.h>
32
33using std::max;
34using WTF::doubleHash;
35
36#ifndef NDEBUG
37#define DO_PROPERTYMAP_CONSTENCY_CHECK 0
38#define DUMP_PROPERTYMAP_STATS 0
39#else
40#define DO_PROPERTYMAP_CONSTENCY_CHECK 0
41#define DUMP_PROPERTYMAP_STATS 0
42#endif
43
44#define USE_SINGLE_ENTRY 1
45
46namespace KJS {
47
48// Choose a number for the following so that most property maps are smaller,
49// but it's not going to blow out the stack to allocate this number of pointers.
50static const int smallMapThreshold = 1024;
51
52// The point at which the function call overhead of the qsort implementation
53// becomes small compared to the inefficiency of insertion sort.
54static const unsigned tinyMapThreshold = 20;
55
56#if DUMP_PROPERTYMAP_STATS
57
58static int numProbes;
59static int numCollisions;
60static int numRehashes;
61static int numRemoves;
62
63struct PropertyMapStatisticsExitLogger {
64 ~PropertyMapStatisticsExitLogger();
65};
66
67static PropertyMapStatisticsExitLogger logger;
68
69PropertyMapStatisticsExitLogger::~PropertyMapStatisticsExitLogger()
70{
71 printf("\nKJS::PropertyMap statistics\n\n");
72 printf("%d probes\n", numProbes);
73 printf("%d collisions (%.1f%%)\n", numCollisions, 100.0 * numCollisions / numProbes);
74 printf("%d rehashes\n", numRehashes);
75 printf("%d removes\n", numRemoves);
76}
77
78#endif
79
80static const unsigned emptyEntryIndex = 0;
81static const unsigned deletedSentinelIndex = 1;
82
83#if !DO_PROPERTYMAP_CONSTENCY_CHECK
84
85inline void PropertyMap::checkConsistency()
86{
87}
88
89#endif
90
91PropertyMap::~PropertyMap()
92{
93 if (!m_usingTable) {
94#if USE_SINGLE_ENTRY
95 if (m_singleEntryKey)
96 m_singleEntryKey->deref();
97#endif
98 return;
99 }
100
101 unsigned entryCount = m_u.table->keyCount + m_u.table->deletedSentinelCount;
102 for (unsigned i = 1; i <= entryCount; i++) {
103 if (UString::Rep* key = m_u.table->entries()[i].key)
104 key->deref();
105 }
106 fastFree(m_u.table);
107}
108
109JSValue* PropertyMap::get(const Identifier& propertyName, unsigned& attributes) const
110{
111 ASSERT(!propertyName.isNull());
112
113 UString::Rep* rep = propertyName._ustring.rep();
114
115 if (!m_usingTable) {
116#if USE_SINGLE_ENTRY
117 if (rep == m_singleEntryKey) {
118 attributes = m_singleEntryAttributes;
119 return m_u.singleEntryValue;
120 }
121#endif
122 return 0;
123 }
124
125 unsigned i = rep->computedHash();
126
127#if DUMP_PROPERTYMAP_STATS
128 ++numProbes;
129#endif
130
131 unsigned entryIndex = m_u.table->entryIndicies[i & m_u.table->sizeMask];
132 if (entryIndex == emptyEntryIndex)
133 return 0;
134
135 if (rep == m_u.table->entries()[entryIndex - 1].key) {
136 attributes = m_u.table->entries()[entryIndex - 1].attributes;
137 return m_u.table->entries()[entryIndex - 1].value;
138 }
139
140#if DUMP_PROPERTYMAP_STATS
141 ++numCollisions;
142#endif
143
144 unsigned k = 1 | doubleHash(rep->computedHash());
145
146 while (1) {
147 i += k;
148
149#if DUMP_PROPERTYMAP_STATS
150 ++numRehashes;
151#endif
152
153 entryIndex = m_u.table->entryIndicies[i & m_u.table->sizeMask];
154 if (entryIndex == emptyEntryIndex)
155 return 0;
156
157 if (rep == m_u.table->entries()[entryIndex - 1].key) {
158 attributes = m_u.table->entries()[entryIndex - 1].attributes;
159 return m_u.table->entries()[entryIndex - 1].value;
160 }
161 }
162}
163
164JSValue* PropertyMap::get(const Identifier& propertyName) const
165{
166 ASSERT(!propertyName.isNull());
167
168 UString::Rep* rep = propertyName._ustring.rep();
169
170 if (!m_usingTable) {
171#if USE_SINGLE_ENTRY
172 if (rep == m_singleEntryKey)
173 return m_u.singleEntryValue;
174#endif
175 return 0;
176 }
177
178 unsigned i = rep->computedHash();
179
180#if DUMP_PROPERTYMAP_STATS
181 ++numProbes;
182#endif
183
184 unsigned entryIndex = m_u.table->entryIndicies[i & m_u.table->sizeMask];
185 if (entryIndex == emptyEntryIndex)
186 return 0;
187
188 if (rep == m_u.table->entries()[entryIndex - 1].key)
189 return m_u.table->entries()[entryIndex - 1].value;
190
191#if DUMP_PROPERTYMAP_STATS
192 ++numCollisions;
193#endif
194
195 unsigned k = 1 | doubleHash(rep->computedHash());
196
197 while (1) {
198 i += k;
199
200#if DUMP_PROPERTYMAP_STATS
201 ++numRehashes;
202#endif
203
204 entryIndex = m_u.table->entryIndicies[i & m_u.table->sizeMask];
205 if (entryIndex == emptyEntryIndex)
206 return 0;
207
208 if (rep == m_u.table->entries()[entryIndex - 1].key)
209 return m_u.table->entries()[entryIndex - 1].value;
210 }
211}
212
213JSValue** PropertyMap::getLocation(const Identifier& propertyName)
214{
215 ASSERT(!propertyName.isNull());
216
217 UString::Rep* rep = propertyName._ustring.rep();
218
219 if (!m_usingTable) {
220#if USE_SINGLE_ENTRY
221 if (rep == m_singleEntryKey)
222 return &m_u.singleEntryValue;
223#endif
224 return 0;
225 }
226
227 unsigned i = rep->computedHash();
228
229#if DUMP_PROPERTYMAP_STATS
230 ++numProbes;
231#endif
232
233 unsigned entryIndex = m_u.table->entryIndicies[i & m_u.table->sizeMask];
234 if (entryIndex == emptyEntryIndex)
235 return 0;
236
237 if (rep == m_u.table->entries()[entryIndex - 1].key)
238 return &m_u.table->entries()[entryIndex - 1].value;
239
240#if DUMP_PROPERTYMAP_STATS
241 ++numCollisions;
242#endif
243
244 unsigned k = 1 | doubleHash(rep->computedHash());
245
246 while (1) {
247 i += k;
248
249#if DUMP_PROPERTYMAP_STATS
250 ++numRehashes;
251#endif
252
253 entryIndex = m_u.table->entryIndicies[i & m_u.table->sizeMask];
254 if (entryIndex == emptyEntryIndex)
255 return 0;
256
257 if (rep == m_u.table->entries()[entryIndex - 1].key)
258 return &m_u.table->entries()[entryIndex - 1].value;
259 }
260}
261
262JSValue** PropertyMap::getLocation(const Identifier& propertyName, bool& isWriteable)
263{
264 ASSERT(!propertyName.isNull());
265
266 UString::Rep* rep = propertyName._ustring.rep();
267
268 if (!m_usingTable) {
269#if USE_SINGLE_ENTRY
270 if (rep == m_singleEntryKey) {
271 isWriteable = !(m_singleEntryAttributes & ReadOnly);
272 return &m_u.singleEntryValue;
273 }
274#endif
275 return 0;
276 }
277
278 unsigned i = rep->computedHash();
279
280#if DUMP_PROPERTYMAP_STATS
281 ++numProbes;
282#endif
283
284 unsigned entryIndex = m_u.table->entryIndicies[i & m_u.table->sizeMask];
285 if (entryIndex == emptyEntryIndex)
286 return 0;
287
288 if (rep == m_u.table->entries()[entryIndex - 1].key) {
289 isWriteable = !(m_u.table->entries()[entryIndex - 1].attributes & ReadOnly);
290 return &m_u.table->entries()[entryIndex - 1].value;
291 }
292
293#if DUMP_PROPERTYMAP_STATS
294 ++numCollisions;
295#endif
296
297 unsigned k = 1 | doubleHash(rep->computedHash());
298
299 while (1) {
300 i += k;
301
302#if DUMP_PROPERTYMAP_STATS
303 ++numRehashes;
304#endif
305
306 entryIndex = m_u.table->entryIndicies[i & m_u.table->sizeMask];
307 if (entryIndex == emptyEntryIndex)
308 return 0;
309
310 if (rep == m_u.table->entries()[entryIndex - 1].key) {
311 isWriteable = !(m_u.table->entries()[entryIndex - 1].attributes & ReadOnly);
312 return &m_u.table->entries()[entryIndex - 1].value;
313 }
314 }
315}
316
317void PropertyMap::put(const Identifier& propertyName, JSValue* value, unsigned attributes, bool checkReadOnly, JSObject* slotBase, PutPropertySlot& slot)
318{
319 ASSERT(!propertyName.isNull());
320 ASSERT(value);
321
322 checkConsistency();
323
324 UString::Rep* rep = propertyName._ustring.rep();
325
326#if USE_SINGLE_ENTRY
327 if (!m_usingTable) {
328 if (!m_singleEntryKey) {
329 rep->ref();
330 m_singleEntryKey = rep;
331 m_u.singleEntryValue = value;
332 m_singleEntryAttributes = static_cast<short>(attributes);
333 checkConsistency();
334 slot.setNewProperty(slotBase, WTF::notFound);
335 return;
336 }
337 if (rep == m_singleEntryKey && !(checkReadOnly && (m_singleEntryAttributes & ReadOnly))) {
338 m_u.singleEntryValue = value;
339 return;
340 }
341 }
342#endif
343
344 if (!m_usingTable)
345 expand();
346
347 // FIXME: Consider a fast case for tables with no deleted sentinels.
348
349 unsigned i = rep->computedHash();
350 unsigned k = 0;
351 bool foundDeletedElement = false;
352 unsigned deletedElementIndex = 0; // initialize to make the compiler happy
353
354#if DUMP_PROPERTYMAP_STATS
355 ++numProbes;
356#endif
357
358 while (1) {
359 unsigned entryIndex = m_u.table->entryIndicies[i & m_u.table->sizeMask];
360 if (entryIndex == emptyEntryIndex)
361 break;
362
363 if (m_u.table->entries()[entryIndex - 1].key == rep) {
364 if (checkReadOnly && (m_u.table->entries()[entryIndex - 1].attributes & ReadOnly))
365 return;
366 // Put a new value in an existing hash table entry.
367 m_u.table->entries()[entryIndex - 1].value = value;
368 // Attributes are intentionally not updated.
369 slot.setExistingProperty(slotBase, offsetForTableLocation(&m_u.table->entries()[entryIndex - 1].value));
370 return;
371 } else if (entryIndex == deletedSentinelIndex) {
372 // If we find a deleted-element sentinel, remember it for use later.
373 if (!foundDeletedElement) {
374 foundDeletedElement = true;
375 deletedElementIndex = i;
376 }
377 }
378
379 if (k == 0) {
380 k = 1 | doubleHash(rep->computedHash());
381#if DUMP_PROPERTYMAP_STATS
382 ++numCollisions;
383#endif
384 }
385
386 i += k;
387
388#if DUMP_PROPERTYMAP_STATS
389 ++numRehashes;
390#endif
391 }
392
393 // Figure out which entry to use.
394 unsigned entryIndex = m_u.table->keyCount + m_u.table->deletedSentinelCount + 2;
395 if (foundDeletedElement) {
396 i = deletedElementIndex;
397 --m_u.table->deletedSentinelCount;
398
399 // Since we're not making the table bigger, we can't use the entry one past
400 // the end that we were planning on using, so search backwards for the empty
401 // slot that we can use. We know it will be there because we did at least one
402 // deletion in the past that left an entry empty.
403 while (m_u.table->entries()[--entryIndex - 1].key) { }
404 }
405
406 // Create a new hash table entry.
407 m_u.table->entryIndicies[i & m_u.table->sizeMask] = entryIndex;
408
409 // Create a new hash table entry.
410 rep->ref();
411 m_u.table->entries()[entryIndex - 1].key = rep;
412 m_u.table->entries()[entryIndex - 1].value = value;
413 m_u.table->entries()[entryIndex - 1].attributes = attributes;
414 m_u.table->entries()[entryIndex - 1].index = ++m_u.table->lastIndexUsed;
415 ++m_u.table->keyCount;
416
417 if ((m_u.table->keyCount + m_u.table->deletedSentinelCount) * 2 >= m_u.table->size)
418 expand();
419
420 checkConsistency();
421 slot.setNewProperty(slotBase, offsetForTableLocation(&m_u.table->entries()[entryIndex - 1].value));
422}
423
424void PropertyMap::insert(const Entry& entry)
425{
426 ASSERT(m_u.table);
427
428 unsigned i = entry.key->computedHash();
429 unsigned k = 0;
430
431#if DUMP_PROPERTYMAP_STATS
432 ++numProbes;
433#endif
434
435 while (1) {
436 unsigned entryIndex = m_u.table->entryIndicies[i & m_u.table->sizeMask];
437 if (entryIndex == emptyEntryIndex)
438 break;
439
440 if (k == 0) {
441 k = 1 | doubleHash(entry.key->computedHash());
442#if DUMP_PROPERTYMAP_STATS
443 ++numCollisions;
444#endif
445 }
446
447 i += k;
448
449#if DUMP_PROPERTYMAP_STATS
450 ++numRehashes;
451#endif
452 }
453
454 unsigned entryIndex = m_u.table->keyCount + 2;
455 m_u.table->entryIndicies[i & m_u.table->sizeMask] = entryIndex;
456 m_u.table->entries()[entryIndex - 1] = entry;
457 ++m_u.table->keyCount;
458}
459
460void PropertyMap::expand()
461{
462 if (!m_usingTable)
463 createTable();
464 else
465 rehash(m_u.table->size * 2);
466}
467
468void PropertyMap::rehash()
469{
470 ASSERT(m_usingTable);
471 ASSERT(m_u.table);
472 ASSERT(m_u.table->size);
473 rehash(m_u.table->size);
474}
475
476void PropertyMap::createTable()
477{
478 const unsigned newTableSize = 16;
479
480 ASSERT(!m_usingTable);
481
482 checkConsistency();
483
484#if USE_SINGLE_ENTRY
485 JSValue* oldSingleEntryValue = m_u.singleEntryValue;
486#endif
487
488 m_u.table = static_cast<Table*>(fastZeroedMalloc(Table::allocationSize(newTableSize)));
489 m_u.table->size = newTableSize;
490 m_u.table->sizeMask = newTableSize - 1;
491 m_usingTable = true;
492
493#if USE_SINGLE_ENTRY
494 if (m_singleEntryKey) {
495 insert(Entry(m_singleEntryKey, oldSingleEntryValue, m_singleEntryAttributes));
496 m_singleEntryKey = 0;
497 }
498#endif
499
500 checkConsistency();
501}
502
503void PropertyMap::rehash(unsigned newTableSize)
504{
505 ASSERT(!m_singleEntryKey);
506 ASSERT(m_u.table);
507 ASSERT(m_usingTable);
508
509 checkConsistency();
510
511 Table* oldTable = m_u.table;
512
513 m_u.table = static_cast<Table*>(fastZeroedMalloc(Table::allocationSize(newTableSize)));
514 m_u.table->size = newTableSize;
515 m_u.table->sizeMask = newTableSize - 1;
516
517 unsigned lastIndexUsed = 0;
518 unsigned entryCount = oldTable->keyCount + oldTable->deletedSentinelCount;
519 for (unsigned i = 1; i <= entryCount; ++i) {
520 if (oldTable->entries()[i].key) {
521 lastIndexUsed = max(oldTable->entries()[i].index, lastIndexUsed);
522 insert(oldTable->entries()[i]);
523 }
524 }
525 m_u.table->lastIndexUsed = lastIndexUsed;
526
527 fastFree(oldTable);
528
529 checkConsistency();
530}
531
532void PropertyMap::remove(const Identifier& propertyName)
533{
534 ASSERT(!propertyName.isNull());
535
536 checkConsistency();
537
538 UString::Rep* rep = propertyName._ustring.rep();
539
540 if (!m_usingTable) {
541#if USE_SINGLE_ENTRY
542 if (rep == m_singleEntryKey) {
543 m_singleEntryKey->deref();
544 m_singleEntryKey = 0;
545 checkConsistency();
546 }
547#endif
548 return;
549 }
550
551#if DUMP_PROPERTYMAP_STATS
552 ++numProbes;
553 ++numRemoves;
554#endif
555
556 // Find the thing to remove.
557 unsigned i = rep->computedHash();
558 unsigned k = 0;
559 unsigned entryIndex;
560 UString::Rep* key = 0;
561 while (1) {
562 entryIndex = m_u.table->entryIndicies[i & m_u.table->sizeMask];
563 if (entryIndex == emptyEntryIndex)
564 return;
565
566 key = m_u.table->entries()[entryIndex - 1].key;
567 if (rep == key)
568 break;
569
570 if (k == 0) {
571 k = 1 | doubleHash(rep->computedHash());
572#if DUMP_PROPERTYMAP_STATS
573 ++numCollisions;
574#endif
575 }
576
577 i += k;
578
579#if DUMP_PROPERTYMAP_STATS
580 ++numRehashes;
581#endif
582 }
583
584 // Replace this one element with the deleted sentinel. Also clear out
585 // the entry so we can iterate all the entries as needed.
586 m_u.table->entryIndicies[i & m_u.table->sizeMask] = deletedSentinelIndex;
587 key->deref();
588 m_u.table->entries()[entryIndex - 1].key = 0;
589 m_u.table->entries()[entryIndex - 1].value = jsUndefined();
590 m_u.table->entries()[entryIndex - 1].attributes = 0;
591 ASSERT(m_u.table->keyCount >= 1);
592 --m_u.table->keyCount;
593 ++m_u.table->deletedSentinelCount;
594
595 if (m_u.table->deletedSentinelCount * 4 >= m_u.table->size)
596 rehash();
597
598 checkConsistency();
599}
600
601void PropertyMap::mark() const
602{
603 if (!m_usingTable) {
604#if USE_SINGLE_ENTRY
605 if (m_singleEntryKey) {
606 JSValue* v = m_u.singleEntryValue;
607 if (!v->marked())
608 v->mark();
609 }
610#endif
611 return;
612 }
613
614 unsigned entryCount = m_u.table->keyCount + m_u.table->deletedSentinelCount;
615 for (unsigned i = 1; i <= entryCount; i++) {
616 JSValue* v = m_u.table->entries()[i].value;
617 if (!v->marked())
618 v->mark();
619 }
620}
621
622static int comparePropertyMapEntryIndices(const void* a, const void* b)
623{
624 unsigned ia = static_cast<PropertyMapEntry* const*>(a)[0]->index;
625 unsigned ib = static_cast<PropertyMapEntry* const*>(b)[0]->index;
626 if (ia < ib)
627 return -1;
628 if (ia > ib)
629 return +1;
630 return 0;
631}
632
633void PropertyMap::getEnumerablePropertyNames(PropertyNameArray& propertyNames) const
634{
635 if (!m_usingTable) {
636#if USE_SINGLE_ENTRY
637 UString::Rep* key = m_singleEntryKey;
638 if (key && !(m_singleEntryAttributes & DontEnum))
639 propertyNames.add(key);
640#endif
641 return;
642 }
643
644 if (m_u.table->keyCount < tinyMapThreshold) {
645 Entry* a[tinyMapThreshold];
646 int i = 0;
647 unsigned entryCount = m_u.table->keyCount + m_u.table->deletedSentinelCount;
648 for (unsigned k = 1; k <= entryCount; k++) {
649 if (m_u.table->entries()[k].key && !(m_u.table->entries()[k].attributes & DontEnum)) {
650 Entry* value = &m_u.table->entries()[k];
651 int j;
652 for (j = i - 1; j >= 0 && a[j]->index > value->index; --j)
653 a[j + 1] = a[j];
654 a[j + 1] = value;
655 ++i;
656 }
657 }
658 if (!propertyNames.size()) {
659 for (int k = 0; k < i; ++k)
660 propertyNames.addKnownUnique(a[k]->key);
661 } else {
662 for (int k = 0; k < i; ++k)
663 propertyNames.add(a[k]->key);
664 }
665 return;
666 }
667
668 // Allocate a buffer to use to sort the keys.
669 Vector<Entry*, smallMapThreshold> sortedEnumerables(m_u.table->keyCount);
670
671 // Get pointers to the enumerable entries in the buffer.
672 Entry** p = sortedEnumerables.data();
673 unsigned entryCount = m_u.table->keyCount + m_u.table->deletedSentinelCount;
674 for (unsigned i = 1; i <= entryCount; i++) {
675 if (m_u.table->entries()[i].key && !(m_u.table->entries()[i].attributes & DontEnum))
676 *p++ = &m_u.table->entries()[i];
677 }
678
679 // Sort the entries by index.
680 qsort(sortedEnumerables.data(), p - sortedEnumerables.data(), sizeof(Entry*), comparePropertyMapEntryIndices);
681
682 // Put the keys of the sorted entries into the list.
683 for (Entry** q = sortedEnumerables.data(); q != p; ++q)
684 propertyNames.add(q[0]->key);
685}
686
687#if DO_PROPERTYMAP_CONSTENCY_CHECK
688
689void PropertyMap::checkConsistency()
690{
691 if (!m_usingTable)
692 return;
693
694 ASSERT(m_u.table->size >= 16);
695 ASSERT(m_u.table->sizeMask);
696 ASSERT(m_u.table->size == m_u.table->sizeMask + 1);
697 ASSERT(!(m_u.table->size & m_u.table->sizeMask));
698
699 ASSERT(m_u.table->keyCount <= m_u.table->size / 2);
700 ASSERT(m_u.table->deletedSentinelCount <= m_u.table->size / 4);
701
702 ASSERT(m_u.table->keyCount + m_u.table->deletedSentinelCount <= m_u.table->size / 2);
703
704 unsigned indexCount = 0;
705 unsigned deletedIndexCount = 0;
706 for (unsigned a = 0; a != m_u.table->size; ++a) {
707 unsigned entryIndex = m_u.table->entryIndicies[a];
708 if (entryIndex == emptyEntryIndex)
709 continue;
710 if (entryIndex == deletedSentinelIndex) {
711 ++deletedIndexCount;
712 continue;
713 }
714 ASSERT(entryIndex > deletedSentinelIndex);
715 ASSERT(entryIndex - 1 <= m_u.table->keyCount + m_u.table->deletedSentinelCount);
716 ++indexCount;
717
718 for (unsigned b = a + 1; b != m_u.table->size; ++b)
719 ASSERT(m_u.table->entryIndicies[b] != entryIndex);
720 }
721 ASSERT(indexCount == m_u.table->keyCount);
722 ASSERT(deletedIndexCount == m_u.table->deletedSentinelCount);
723
724 ASSERT(m_u.table->entries()[0].key == 0);
725
726 unsigned nonEmptyEntryCount = 0;
727 for (unsigned c = 1; c <= m_u.table->keyCount + m_u.table->deletedSentinelCount; ++c) {
728 UString::Rep* rep = m_u.table->entries()[c].key;
729 if (!rep) {
730 ASSERT(m_u.table->entries()[c].value->isUndefined());
731 continue;
732 }
733 ++nonEmptyEntryCount;
734 unsigned i = rep->computedHash();
735 unsigned k = 0;
736 unsigned entryIndex;
737 while (1) {
738 entryIndex = m_u.table->entryIndicies[i & m_u.table->sizeMask];
739 ASSERT(entryIndex != emptyEntryIndex);
740 if (rep == m_u.table->entries()[entryIndex - 1].key)
741 break;
742 if (k == 0)
743 k = 1 | doubleHash(rep->computedHash());
744 i += k;
745 }
746 ASSERT(entryIndex == c + 1);
747 }
748
749 ASSERT(nonEmptyEntryCount == m_u.table->keyCount);
750}
751
752#endif // DO_PROPERTYMAP_CONSTENCY_CHECK
753
754} // namespace KJS
Note: See TracBrowser for help on using the repository browser.