Skip to content

Commit

Permalink
Remove profile in CompactHashStore#insertIntoIndex
Browse files Browse the repository at this point in the history
* I tried making that a node but some Truffle DSL bug prevents using the
  node in PackedHashStoreLibrary.
  • Loading branch information
eregon committed Nov 29, 2023
1 parent 8c9072c commit 8062a74
Show file tree
Hide file tree
Showing 2 changed files with 24 additions and 25 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -160,14 +160,6 @@ private static int indexPosToValuePos(int[] index, int indexPos) {
return index[indexPos + 1];
}

// For promoting from packed to compact
public void putHashKeyValue(int hashcode, Object key, Object value) {
int pos = kvStoreInsertionPos;
SetKvAtNode.insertIntoKv(this, key, value);
SetKvAtNode.insertIntoIndex(hashcode, pos + 1, index,
InlinedLoopConditionProfile.getUncached(), null);
}

@ExportMessage
Object lookupOrDefault(Frame frame, RubyHash hash, Object key, PEBiFunction defaultNode,
@Cached @Shared GetIndexPosForKeyNode getIndexPosForKeyNode,
Expand Down Expand Up @@ -586,7 +578,7 @@ static boolean keyDoesntExist(
}

keyPos = store.kvStoreInsertionPos;
insertIntoKv(store, key, value);
insertIntoKv(store, keyPos, key, value);

assert store.index[indexPos + 1] <= 0;
store.index[indexPos] = keyHash;
Expand All @@ -596,43 +588,42 @@ static boolean keyDoesntExist(

if (indexResizingIsNeeded.profile(node, hash.size >= store.indexGrowthThreshold)) {
// Resize the index array after insertion, as it invalidates indexPos
resizeIndex(store, node);
resizeIndex(store);
}

return true;
}

private static void insertIntoIndex(int keyHash, int kvPos, int[] index,
InlinedLoopConditionProfile unavailableSlot, Node node) {
int pos = indexPosFromHashCode(keyHash, index.length);
private static void insertIntoIndex(int hashCode, int valuePos, int[] index) {
int pos = indexPosFromHashCode(hashCode, index.length);

while (unavailableSlot.profile(node, index[pos + 1] > INDEX_SLOT_UNUSED)) {
while (index[pos + 1] > INDEX_SLOT_UNUSED) {
pos = incrementIndexPos(pos, index.length);
}

index[pos] = keyHash;
index[pos + 1] = kvPos;
index[pos] = hashCode;
index[pos + 1] = valuePos;
}

private static void insertIntoKv(CompactHashStore store, Object key, Object value) {
store.kvStore[store.kvStoreInsertionPos] = key;
store.kvStore[store.kvStoreInsertionPos + 1] = value;
store.kvStoreInsertionPos += 2;
private static void insertIntoKv(CompactHashStore store, int keyPos, Object key, Object value) {
store.kvStore[keyPos] = key;
store.kvStore[keyPos + 1] = value;
store.kvStoreInsertionPos = keyPos + 2;
}

@TruffleBoundary
private static void resizeIndex(CompactHashStore store, Node node) {
private static void resizeIndex(CompactHashStore store) {
int[] oldIndex = store.index;
int[] newIndex = new int[2 * oldIndex.length];
int newIndexCapacity = newIndex.length >> 1;

int i = 0;
for (; i < oldIndex.length; i += 2) {
int hash = oldIndex[i];
int kvPos = oldIndex[i + 1];
int valuePos = oldIndex[i + 1];

if (kvPos > INDEX_SLOT_UNUSED) {
insertIntoIndex(hash, kvPos, newIndex, InlinedLoopConditionProfile.getUncached(), node);
if (valuePos > INDEX_SLOT_UNUSED) {
insertIntoIndex(hash, valuePos, newIndex);
}
}

Expand All @@ -645,6 +636,14 @@ private static void resizeKvStore(CompactHashStore store) {
}
}

/** For promoting from packed to compact */
void insertHashKeyValue(int hashCode, Object key, Object value) {
int keyPos = kvStoreInsertionPos;
int valuePos = keyPos + 1;
SetKvAtNode.insertIntoKv(this, keyPos, key, value);
SetKvAtNode.insertIntoIndex(hashCode, valuePos, index);
}

public static final class CompactHashLiteralNode extends HashLiteralNode {

@Child HashStoreLibrary hashes;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ private static void promoteToBuckets(RubyHash hash, Object[] store, int size) {
private static void promoteToCompact(RubyHash hash, Object[] store) {
CompactHashStore newStore = new CompactHashStore(MAX_ENTRIES);
for (int n = 0; n < MAX_ENTRIES; n++) {
newStore.putHashKeyValue(getHashed(store, n), getKey(store, n), getValue(store, n));
newStore.insertHashKeyValue(getHashed(store, n), getKey(store, n), getValue(store, n));
}
hash.store = newStore;
hash.size = MAX_ENTRIES;
Expand Down

0 comments on commit 8062a74

Please sign in to comment.