Created
November 7, 2012 11:20
-
-
Save Hydrotoast/4030930 to your computer and use it in GitHub Desktop.
Standard Hash Table Implementation
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
/** | |
* Gio Borje | |
* 41894135 | |
*/ | |
#include "HashMap.h" | |
using namespace std; | |
/** | |
* Initializes the node with the specified parameters | |
*/ | |
#define INITIALIZE_NODE(node, p_key, p_val, p_next) \ | |
(node)->key = p_key; \ | |
(node)->value = p_val; \ | |
(node)->next = p_next; \ | |
/** | |
* Links a node to a bucket (acting as a stack) | |
*/ | |
#define LINK_NODE_TO_BUCKET(node, bucketHead) \ | |
(node)->next = (bucketHead); \ | |
(bucketHead) = (node); \ | |
/** | |
* Unlinks a node by replacing it with its next pointer | |
*/ | |
#define UNLINK_NODE(node) \ | |
Node* next = node->next; \ | |
delete node; \ | |
node = next; \ | |
/** | |
* Unlinks the next node following the specified pointer | |
*/ | |
#define UNLINK_NEXT_NODE(node) \ | |
Node* next = nullptr; \ | |
if ((node)->next->next != nullptr) { \ | |
next = (node)->next->next; \ | |
} \ | |
delete (node)->next; \ | |
(node)->next = next; \ | |
/** | |
* Initializes the bucket array to nullptr | |
*/ | |
#define INITIALIZE_BUCKET_ARRAY(bucketArray, bucketArraySize) \ | |
for (size_t i = 0; i < (bucketArraySize); ++i) { \ | |
(mapArray)[i] = nullptr; \ | |
} \ | |
/** | |
* Deallocates entire bucket given the head | |
*/ | |
#define UNLINK_BUCKET(bucketHead) { \ | |
Node* current = (bucketHead); \ | |
Node* next; \ | |
while (current != nullptr) { \ | |
next = current->next; \ | |
delete current; \ | |
current = next; \ | |
} \ | |
} \ | |
HashMap::HashMap(HashFunction hashFunction) : mapArray(new Node*[INITIAL_BUCKET_COUNT]), hashFn(hashFunction), mapSz(0), mapCap(INITIAL_BUCKET_COUNT) { | |
INITIALIZE_BUCKET_ARRAY(mapArray, INITIAL_BUCKET_COUNT); | |
} | |
HashMap::HashMap(const HashMap& hm) { | |
mapSz = hm.mapSz; | |
mapCap = hm.mapCap; | |
mapArray = new Node*[mapCap]; | |
INITIALIZE_BUCKET_ARRAY(mapArray, mapCap); | |
for (unsigned int i = 0; i < hm.mapSz; ++i) | |
add(hm.mapArray[i]->key, hm.mapArray[i]->value); | |
} | |
HashMap::~HashMap() { | |
for (size_t i = 0; i < mapCap; ++i) | |
UNLINK_BUCKET(mapArray[i]); | |
delete[] mapArray; | |
} | |
HashMap& HashMap::operator=(const HashMap& hm) { | |
if (this != &hm) { | |
for (size_t i = 0; i < mapCap; ++i) | |
UNLINK_BUCKET(mapArray[i]); | |
delete[] mapArray; | |
mapSz = hm.mapSz; | |
mapCap = hm.mapCap; | |
mapArray = new Node*[mapCap]; | |
INITIALIZE_BUCKET_ARRAY(mapArray, mapCap); | |
for (unsigned int i = 0; i < hm.mapSz; ++i) | |
add(hm.mapArray[i]->key, hm.mapArray[i]->value); | |
} | |
return *this; | |
} | |
void HashMap::add(const string& key, const string& value) { | |
Node* v = new Node; | |
INITIALIZE_NODE(v, key, value, nullptr); | |
Node*& u = mapArray[compress(hashFn(key))]; | |
LINK_NODE_TO_BUCKET(v, u); | |
mapSz++; | |
rehash(); | |
} | |
void HashMap::remove(const string& key) { | |
if (mapSz <= 0) | |
return; | |
if (contains(key)) { | |
Node*& bucketHead = mapArray[compress(hashFunction(key))]; | |
if (bucketHead->key.compare(key) == 0) { | |
UNLINK_NODE(bucketHead); | |
} else { | |
Node* u = bucketHead; | |
while (u->next != nullptr) { | |
if (u->next->key.compare(key) == 0) { | |
UNLINK_NEXT_NODE(u); | |
break; | |
} | |
u = u->next; | |
} | |
} | |
mapSz--; | |
} | |
} | |
bool HashMap::contains(const string& key) const { | |
unsigned int index = compress(hashFn(key)); | |
const Node* u = mapArray[index]; | |
while (u != nullptr) { | |
if (u->key.compare(key) == 0) | |
return true; | |
u = u->next; | |
} | |
return false; | |
} | |
string HashMap::value(const string& key) const { | |
const Node* u = mapArray[compress(hashFn(key))]; | |
while (u != nullptr) { | |
if (u->key.compare(key) == 0) | |
return u->value; | |
u = u->next; | |
} | |
return nullptr; | |
} | |
unsigned int HashMap::size() const { | |
return mapSz; | |
} | |
unsigned int HashMap::bucketCount() const { | |
return mapCap; | |
} | |
double HashMap::loadFactor() const { | |
return mapSz / static_cast<double>(mapCap); | |
} | |
void HashMap::rehash() { | |
if (loadFactor() < LOAD_THRESHOLD) | |
return; | |
size_t oldMapCap = mapCap; | |
mapCap <<= 2; | |
Node** newArray = new Node*[mapCap]; | |
INITIALIZE_BUCKET_ARRAY(newArray, mapCap); | |
const Node* u; | |
for (size_t i = 0; i < oldMapCap; ++i) { | |
u = mapArray[i]; | |
while (u != nullptr) { | |
Node* v = new Node; | |
INITIALIZE_NODE(v, u->key, u->value, nullptr); | |
Node*& bucket = newArray[compress(hashFunction(u->key))]; | |
LINK_NODE_TO_BUCKET(v, bucket); | |
u = u->next; | |
} | |
} | |
for (size_t i = 0; i < oldMapCap; ++i) | |
UNLINK_BUCKET(mapArray[i]); | |
mapArray = newArray; | |
} | |
unsigned int HashMap::compress(unsigned int i) const { | |
return i % mapCap; | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment