aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBad Diode <bd@badd10de.dev>2021-10-21 15:18:32 +0200
committerBad Diode <bd@badd10de.dev>2021-10-21 15:18:32 +0200
commit528bcf1eaf7ec6a4441ce792733c9c5a649f61f9 (patch)
tree691ecdeddd72f2f3ae82bd653d02cc41ac153cd8
parent8a8b6595ebec509f1b5ae155a38a99347820225c (diff)
downloadbdl-528bcf1eaf7ec6a4441ce792733c9c5a649f61f9.tar.gz
bdl-528bcf1eaf7ec6a4441ce792733c9c5a649f61f9.zip
Make the hash table growable
-rw-r--r--src/bootstrap/hashtable.h60
1 files changed, 51 insertions, 9 deletions
diff --git a/src/bootstrap/hashtable.h b/src/bootstrap/hashtable.h
index baffec0..59f0660 100644
--- a/src/bootstrap/hashtable.h
+++ b/src/bootstrap/hashtable.h
@@ -5,8 +5,8 @@
5#include "objects.h" 5#include "objects.h"
6 6
7// Minimum table capacity. 7// Minimum table capacity.
8#define HT_MIN_CAP 4 8#define HT_MIN_CAP 2
9#define HT_MIN_SHIFT 2 9#define HT_MIN_SHIFT 1
10 10
11// Adjust the load factor threshold at which the table will grow on insertion. 11// Adjust the load factor threshold at which the table will grow on insertion.
12#define HT_LOAD_THRESHOLD 0.75 12#define HT_LOAD_THRESHOLD 0.75
@@ -44,9 +44,9 @@ ht_hash(const HashTable *table, const Object *key) {
44 return 0; 44 return 0;
45} 45}
46 46
47static inline size_t 47static inline float
48ht_load_factor(const HashTable *table) { 48ht_load_factor(const HashTable *table) {
49 return array_size(table->pairs) / array_cap(table->pairs); 49 return (float)array_size(table->pairs) / (float)array_cap(table->pairs);
50} 50}
51 51
52HashTable * 52HashTable *
@@ -54,7 +54,6 @@ ht_init(void) {
54 HashTable *table = malloc(sizeof(HashTable)); 54 HashTable *table = malloc(sizeof(HashTable));
55 table->pairs = NULL; 55 table->pairs = NULL;
56 array_init(table->pairs, HT_MIN_CAP); 56 array_init(table->pairs, HT_MIN_CAP);
57 // Clear the table ensuring all references point to NULL.
58 for (size_t i = 0; i < array_cap(table->pairs); i++) { 57 for (size_t i = 0; i < array_cap(table->pairs); i++) {
59 table->pairs[i] = (HashTablePair){NULL, NULL}; 58 table->pairs[i] = (HashTablePair){NULL, NULL};
60 } 59 }
@@ -63,8 +62,7 @@ ht_init(void) {
63} 62}
64 63
65void 64void
66ht_insert(HashTable *table, const Object *key, const Object *value) { 65_ht_insert(HashTable *table, const Object *key, const Object *value) {
67 // TODO: Grow if needed.
68 size_t position = ht_hash(table, key); 66 size_t position = ht_hash(table, key);
69 size_t probe_position = position; 67 size_t probe_position = position;
70 68
@@ -73,12 +71,13 @@ ht_insert(HashTable *table, const Object *key, const Object *value) {
73 HashTablePair *pairs = table->pairs; 71 HashTablePair *pairs = table->pairs;
74 while (true) { 72 while (true) {
75 if (pairs[probe_position].key == NULL) { 73 if (pairs[probe_position].key == NULL) {
74 array_head(pairs)->size++;
76 break; 75 break;
77 } 76 }
78 if (obj_eq(pairs[probe_position].key, key)) { 77 if (obj_eq(pairs[probe_position].key, key)) {
79 break; 78 break;
80 } 79 }
81 if (probe_position == array_cap(pairs)) { 80 if (probe_position == array_cap(pairs) - 1) {
82 probe_position = 0; 81 probe_position = 0;
83 } else { 82 } else {
84 probe_position++; 83 probe_position++;
@@ -86,6 +85,37 @@ ht_insert(HashTable *table, const Object *key, const Object *value) {
86 } 85 }
87 pairs[probe_position].key = (Object *)key; 86 pairs[probe_position].key = (Object *)key;
88 pairs[probe_position].value = (Object *)value; 87 pairs[probe_position].value = (Object *)value;
88}
89
90void
91ht_maybe_grow(HashTable *table) {
92 HashTablePair *pairs = table->pairs;
93 if (ht_load_factor(table) < HT_LOAD_THRESHOLD) {
94 return;
95 }
96
97 // Create a new array with 2x capacity.
98 table->pairs = NULL;
99 array_init(table->pairs, array_cap(pairs) * 2);
100 for (size_t i = 0; i < array_cap(table->pairs); i++) {
101 table->pairs[i] = (HashTablePair){NULL, NULL};
102 }
103
104 // Hash everything in the table for the new array capacity.
105 for (size_t i = 0; i < array_cap(pairs); i++) {
106 if (pairs[i].key != NULL) {
107 _ht_insert(table, pairs[i].key, pairs[i].value);
108 }
109 }
110
111 // Free the old array.
112 array_free(pairs);
113}
114
115void
116ht_insert(HashTable *table, const Object *key, const Object *value) {
117 ht_maybe_grow(table);
118 _ht_insert(table, key, value);
89 return; 119 return;
90} 120}
91 121
@@ -104,7 +134,7 @@ ht_lookup(const HashTable *table, const Object *key) {
104 if (obj_eq(pairs[probe_position].key, key)) { 134 if (obj_eq(pairs[probe_position].key, key)) {
105 break; 135 break;
106 } 136 }
107 if (probe_position == array_cap(pairs)) { 137 if (probe_position == array_cap(pairs) - 1) {
108 probe_position = 0; 138 probe_position = 0;
109 } else { 139 } else {
110 probe_position++; 140 probe_position++;
@@ -130,6 +160,18 @@ ht_debug(HashTable *table) {
130 } 160 }
131} 161}
132 162
163void
164ht_free(HashTable *table) {
165 if (table == NULL) {
166 return;
167 }
168 if (table->pairs == NULL) {
169 return;
170 }
171 array_free(table->pairs);
172 free(table);
173}
174
133// // Use Fibonacci hashing to map a hash to a value in range of the table. 175// // Use Fibonacci hashing to map a hash to a value in range of the table.
134// static inline 176// static inline
135// uint64_t _fibonacci_hash(uint64_t hash, size_t shift_amount) { 177// uint64_t _fibonacci_hash(uint64_t hash, size_t shift_amount) {