




Sponsored
Sponsored
This method employs an array of linked lists (chaining) to handle collisions. Each index represents a hash code, and if multiple keys hash to the same index, we store them in a linked list at that index. This helps us efficiently manage key collisions.
Time Complexity: For each operation (put, get, remove), it is O(n) in the worst case, where n is the number of keys stored at a particular index.
Space Complexity: O(N) where N is the number of distinct keys inserted into the hash map.
1#include<stdio.h>
2#include<stdlib.h>
3
4#define MAX_SIZE 1000
5
6typedef struct Node {
7    int key, value;
8    struct Node* next;
9} Node;
10
11Node* hashMap[MAX_SIZE];
12
13unsigned int hashFunction(int key) {
14    return key % MAX_SIZE;
15}
16
17Node* createNode(int key, int value) {
18    Node* newNode = (Node*)malloc(sizeof(Node));
19    newNode->key = key;
20    newNode->value = value;
21    newNode->next = NULL;
22    return newNode;
23}
24
25void myHashMapPut(int key, int value) {
26    int hash = hashFunction(key);
27    Node* current = hashMap[hash];
28    while (current != NULL) {
29        if (current->key == key) {
30            current->value = value;
31            return;
32        }
33        current = current->next;
34    }
35    Node* newNode = createNode(key, value);
36    newNode->next = hashMap[hash];
37    hashMap[hash] = newNode;
38}
39
40int myHashMapGet(int key) {
41    int hash = hashFunction(key);
42    Node* current = hashMap[hash];
43    while (current != NULL) {
44        if (current->key == key) {
45            return current->value;
46        }
47        current = current->next;
48    }
49    return -1;
50}
51
52void myHashMapRemove(int key) {
53    int hash = hashFunction(key);
54    Node* current = hashMap[hash];
55    Node* prev = NULL;
56    while (current != NULL) {
57        if (current->key == key) {
58            if (prev != NULL) {
59                prev->next = current->next;
60            } else {
61                hashMap[hash] = current->next;
62            }
63            free(current);
64            return;
65        }
66        prev = current;
67        current = current->next;
68    }
69}The above C code initializes a simple hash table with separate chaining using linked lists. Each bucket of our hash table is a linked list to handle collisions.
hashFunction computes the index for a given key.createNode initializes a new linked list node.myHashMapPut inserts or updates a key-value pair.myHashMapGet retrieves the value for a corresponding key or returns -1 if not found.myHashMapRemove deletes a key and its associated value.Double hashing avoids the clustering problem by calculating two hash functions. If a collision occurs, a second hash determines the step size for probing.
Time Complexity: O(1) on average for put, get, and remove due to effective hashing; O(n) in worst-case.
Space Complexity: O(N) for storing entries with keys and values.
1using System;
2
public class MyHashMap {
    private int[][] map;
    private bool[] occupied;
    private const int size = 1000;
    public MyHashMap() {
        map = new int[size][];
        occupied = new bool[size];
        for (int i = 0; i < size; i++) {
            map[i] = new int[] { -1, -1 };
        }
    }
    private int PrimaryHash(int key) {
        return key % size;
    }
    private int SecondaryHash(int key) {
        return 1 + (key % (size - 1));
    }
    public void Put(int key, int value) {
        int index = PrimaryHash(key);
        int step = SecondaryHash(key);
        while (true) {
            if (!occupied[index] || map[index][0] == key) {
                map[index][0] = key;
                map[index][1] = value;
                occupied[index] = true;
                return;
            }
            index = (index + step) % size;
        }
    }
    public int Get(int key) {
        int index = PrimaryHash(key);
        int step = SecondaryHash(key);
        while (true) {
            if (!occupied[index]) return -1;
            if (map[index][0] == key) return map[index][1];
            index = (index + step) % size;
        }
    }
    public void Remove(int key) {
        int index = PrimaryHash(key);
        int step = SecondaryHash(key);
        while (true) {
            if (occupied[index] && map[index][0] == key) {
                occupied[index] = false;
                map[index][0] = -1;
                return;
            }
            index = (index + step) % size;
        }
    }
}Throughout this C# double hashing model, dual hashes streamline practicality across core MyHashMap operations, mandatory for double duty within clustered settings.