Skip to content

Instantly share code, notes, and snippets.

@Wizmann
Last active October 23, 2025 08:47
Show Gist options
  • Save Wizmann/bcee8db9a757ed91ac5bfd94b3f2d3b8 to your computer and use it in GitHub Desktop.
Save Wizmann/bcee8db9a757ed91ac5bfd94b3f2d3b8 to your computer and use it in GitHub Desktop.
// g++ -std=c++17 -Wall -Wextra -pthread -I./oneTBB/include test_header_only.cpp -o test_header_only
#include <iostream>
#include <vector>
#include <thread>
#include <atomic>
#include <memory>
#include <string>
// Disable OneTBB advanced features for header-only mode
#define TBB_USE_EXCEPTIONS 0
#define TBB_USE_ASSERT 0
#define TBB_USE_PROFILING_TOOLS 0
#define TBB_USE_DEBUG 0
// Enable preview features
#define TBB_PREVIEW_CONCURRENT_LRU_CACHE 1
// Test control macros - set to 1 to enable, 0 to disable
#define TEST_CONCURRENT_VECTOR 1
#define TEST_CONCURRENT_QUEUE 1
#define TEST_CONCURRENT_MAP 1
#define TEST_CONCURRENT_SET 1
#define TEST_CONCURRENT_PRIORITY_QUEUE 1
#define TEST_CONCURRENT_HASH_MAP 1
#define TEST_CONCURRENT_LRU_CACHE 1
// Include OneTBB concurrent data structures
#if TEST_CONCURRENT_VECTOR
#include "oneTBB/include/oneapi/tbb/concurrent_vector.h"
#endif
#if TEST_CONCURRENT_QUEUE
#include "oneTBB/include/oneapi/tbb/concurrent_queue.h"
#endif
#if TEST_CONCURRENT_MAP
#include "oneTBB/include/oneapi/tbb/concurrent_unordered_map.h"
#endif
#if TEST_CONCURRENT_SET
#include "oneTBB/include/oneapi/tbb/concurrent_unordered_set.h"
#endif
#if TEST_CONCURRENT_PRIORITY_QUEUE
#include "oneTBB/include/oneapi/tbb/concurrent_priority_queue.h"
#endif
#if TEST_CONCURRENT_HASH_MAP
#include "oneTBB/include/oneapi/tbb/concurrent_hash_map.h"
#endif
#if TEST_CONCURRENT_LRU_CACHE
#include "oneTBB/include/oneapi/tbb/concurrent_lru_cache.h"
#endif
// Minimal implementation for header-only mode
namespace tbb {
namespace detail {
namespace r1 {
// Simple exception handling - log warning and continue execution
void throw_exception(tbb::detail::d0::exception_id id) {
std::cerr << "TBB Warning: Exception condition (ID: " << static_cast<int>(id) << "), continuing..." << std::endl;
// Don't terminate, let operation continue
}
// Cache-aligned memory allocation functions
void* cache_aligned_allocate(size_t size) {
// Simple implementation using standard malloc with alignment
constexpr size_t cache_line_size = 64; // Typical cache line size
size_t aligned_size = (size + cache_line_size - 1) & ~(cache_line_size - 1);
return std::aligned_alloc(cache_line_size, aligned_size);
}
void cache_aligned_deallocate(void* ptr) {
std::free(ptr);
}
// General memory allocation functions for tbb_allocator
void* allocate_memory(size_t size) {
return std::malloc(size);
}
void deallocate_memory(void* ptr) {
std::free(ptr);
}
}
}
}
// Type aliases using standard allocators
#if TEST_CONCURRENT_VECTOR
using SafeVector = tbb::concurrent_vector<int, std::allocator<int>>;
#endif
#if TEST_CONCURRENT_QUEUE
using SafeQueue = tbb::concurrent_queue<int, std::allocator<int>>;
#endif
#if TEST_CONCURRENT_MAP
using SafeMap = tbb::concurrent_unordered_map<int, std::string, std::hash<int>, std::equal_to<int>, std::allocator<std::pair<const int, std::string>>>;
#endif
#if TEST_CONCURRENT_SET
using SafeSet = tbb::concurrent_unordered_set<int, std::hash<int>, std::equal_to<int>, std::allocator<int>>;
#endif
#if TEST_CONCURRENT_PRIORITY_QUEUE
using SafePriorityQueue = tbb::concurrent_priority_queue<int, std::less<int>, std::allocator<int>>;
#endif
#if TEST_CONCURRENT_HASH_MAP
using SafeHashMap = tbb::concurrent_hash_map<int, std::string>;
#endif
#if TEST_CONCURRENT_LRU_CACHE
using SafeLRUCache = tbb::concurrent_lru_cache<int, std::string>;
#endif
#if TEST_CONCURRENT_VECTOR
void test_concurrent_vector() {
std::cout << "\n=== Testing concurrent_vector ===" << std::endl;
SafeVector vec;
// Basic operations test
vec.push_back(1);
vec.push_back(2);
vec.push_back(3);
std::cout << "Vector size: " << vec.size() << std::endl;
std::cout << "Vector elements: ";
for (size_t i = 0; i < vec.size(); ++i) {
std::cout << vec[i] << " ";
}
std::cout << std::endl;
// Multi-threading test
const int num_threads = 4;
const int items_per_thread = 10;
std::vector<std::thread> threads;
for (int t = 0; t < num_threads; ++t) {
threads.emplace_back([&vec, t, items_per_thread]() {
for (int i = 0; i < items_per_thread; ++i) {
vec.push_back(t * 100 + i);
}
});
}
for (auto& thread : threads) {
thread.join();
}
std::cout << "After multi-threading, vector size: " << vec.size() << std::endl;
std::cout << "Expected size: " << (3 + num_threads * items_per_thread) << std::endl;
}
#endif
#if TEST_CONCURRENT_QUEUE
void test_concurrent_queue() {
std::cout << "\n=== Testing concurrent_queue ===" << std::endl;
SafeQueue queue;
// Basic operations test
queue.push(10);
queue.push(20);
queue.push(30);
std::cout << "Added 3 elements to queue" << std::endl;
// Pop elements
int value;
if (queue.try_pop(value)) {
std::cout << "Popped: " << value << std::endl;
}
// Multi-threading test - producers and consumers
const int num_producers = 2;
const int num_consumers = 2;
const int items_per_producer = 5;
std::atomic<int> consumed_count{0};
std::vector<std::thread> threads;
// Producer threads
for (int p = 0; p < num_producers; ++p) {
threads.emplace_back([&queue, p, items_per_producer]() {
for (int i = 0; i < items_per_producer; ++i) {
queue.push(p * 1000 + i);
}
});
}
// Consumer threads
for (int c = 0; c < num_consumers; ++c) {
threads.emplace_back([&queue, &consumed_count]() {
int val;
while (consumed_count.load() < 10) { // Total items to consume
if (queue.try_pop(val)) {
consumed_count.fetch_add(1);
}
std::this_thread::yield();
}
});
}
for (auto& thread : threads) {
thread.join();
}
std::cout << "Consumed items: " << consumed_count.load() << std::endl;
// Check if queue is empty
int remaining;
bool has_remaining = queue.try_pop(remaining);
std::cout << "Queue empty: " << (has_remaining ? "No" : "Yes") << std::endl;
}
#endif
#if TEST_CONCURRENT_MAP
void test_concurrent_map() {
std::cout << "\n=== Testing concurrent_unordered_map ===" << std::endl;
SafeMap map;
// Basic operations test
map.insert({1, "one"});
map.insert({2, "two"});
map.insert({3, "three"});
std::cout << "Map size: " << map.size() << std::endl;
// Find elements
auto it = map.find(2);
if (it != map.end()) {
std::cout << "Found key 2: " << it->second << std::endl;
}
// Multi-threading test
const int num_threads = 4;
const int items_per_thread = 5;
std::vector<std::thread> threads;
for (int t = 0; t < num_threads; ++t) {
threads.emplace_back([&map, t, items_per_thread]() {
for (int i = 0; i < items_per_thread; ++i) {
int key = t * 100 + i;
map.insert({key, "thread_" + std::to_string(t) + "_item_" + std::to_string(i)});
}
});
}
for (auto& thread : threads) {
thread.join();
}
std::cout << "After multi-threading, map size: " << map.size() << std::endl;
std::cout << "Expected size: " << (3 + num_threads * items_per_thread) << std::endl;
}
#endif
#if TEST_CONCURRENT_SET
void test_concurrent_set() {
std::cout << "\n=== Testing concurrent_unordered_set ===" << std::endl;
SafeSet set;
// Basic operations test
set.insert(100);
set.insert(200);
set.insert(300);
std::cout << "Set size: " << set.size() << std::endl;
// Find elements
auto it = set.find(200);
if (it != set.end()) {
std::cout << "Found element: " << *it << std::endl;
}
// Multi-threading test
const int num_threads = 4;
const int items_per_thread = 5;
std::vector<std::thread> threads;
for (int t = 0; t < num_threads; ++t) {
threads.emplace_back([&set, t, items_per_thread]() {
for (int i = 0; i < items_per_thread; ++i) {
set.insert(t * 1000 + i);
}
});
}
for (auto& thread : threads) {
thread.join();
}
std::cout << "After multi-threading, set size: " << set.size() << std::endl;
std::cout << "Expected size: " << (3 + num_threads * items_per_thread) << std::endl;
}
#endif
#if TEST_CONCURRENT_PRIORITY_QUEUE
void test_concurrent_priority_queue() {
std::cout << "\n=== Testing concurrent_priority_queue ===" << std::endl;
SafePriorityQueue pq;
// Basic operations test
pq.push(30);
pq.push(10);
pq.push(20);
pq.push(40);
std::cout << "Added 4 elements to priority queue: 30, 10, 20, 40" << std::endl;
std::cout << "Size: " << pq.size() << std::endl;
// Pop elements (should come out in priority order)
int value;
std::cout << "Popping elements: ";
while (pq.try_pop(value)) {
std::cout << value << " ";
}
std::cout << std::endl;
// Multi-threading test
const int num_threads = 4;
const int items_per_thread = 5;
std::vector<std::thread> threads;
// Producer threads
for (int t = 0; t < num_threads; ++t) {
threads.emplace_back([&pq, t, items_per_thread]() {
for (int i = 0; i < items_per_thread; ++i) {
pq.push(t * 10 + i);
}
});
}
for (auto& thread : threads) {
thread.join();
}
std::cout << "After multi-threading, priority queue size: " << pq.size() << std::endl;
std::cout << "Expected size: " << (num_threads * items_per_thread) << std::endl;
// Clear the queue
while (pq.try_pop(value)) {
// Just consume all elements
}
}
#endif
#if TEST_CONCURRENT_HASH_MAP
void test_concurrent_hash_map() {
std::cout << "\n=== Testing concurrent_hash_map ===" << std::endl;
SafeHashMap hmap;
// Basic operations test
{
SafeHashMap::accessor a;
if (hmap.insert(a, 1)) {
a->second = "one";
}
}
{
SafeHashMap::accessor a;
if (hmap.insert(a, 2)) {
a->second = "two";
}
}
{
SafeHashMap::accessor a;
if (hmap.insert(a, 3)) {
a->second = "three";
}
}
std::cout << "Hash map size: " << hmap.size() << std::endl;
// Find elements
{
SafeHashMap::const_accessor ca;
if (hmap.find(ca, 2)) {
std::cout << "Found key 2: " << ca->second << std::endl;
}
}
// Multi-threading test
const int num_threads = 4;
const int items_per_thread = 5;
std::vector<std::thread> threads;
for (int t = 0; t < num_threads; ++t) {
threads.emplace_back([&hmap, t, items_per_thread]() {
for (int i = 0; i < items_per_thread; ++i) {
int key = t * 100 + i;
SafeHashMap::accessor a;
if (hmap.insert(a, key)) {
a->second = "thread_" + std::to_string(t) + "_item_" + std::to_string(i);
}
}
});
}
for (auto& thread : threads) {
thread.join();
}
std::cout << "After multi-threading, hash map size: " << hmap.size() << std::endl;
std::cout << "Expected size: " << (3 + num_threads * items_per_thread) << std::endl;
}
#endif
#if TEST_CONCURRENT_LRU_CACHE
void test_concurrent_lru_cache() {
std::cout << "\n=== Testing concurrent_lru_cache ===" << std::endl;
// Create cache with capacity of 5
SafeLRUCache cache([](int key) -> std::string {
return "value_" + std::to_string(key);
}, 5);
// Basic operations test
std::cout << "Testing LRU cache with capacity 5" << std::endl;
// Insert some values
auto handle1 = cache[1];
auto handle2 = cache[2];
auto handle3 = cache[3];
std::cout << "Inserted keys 1, 2, 3" << std::endl;
std::cout << "Value for key 1: " << handle1.value() << std::endl;
std::cout << "Value for key 2: " << handle2.value() << std::endl;
std::cout << "Value for key 3: " << handle3.value() << std::endl;
// Multi-threading test
const int num_threads = 3;
const int items_per_thread = 3;
std::vector<std::thread> threads;
std::atomic<int> access_count{0};
for (int t = 0; t < num_threads; ++t) {
threads.emplace_back([&cache, &access_count, t, items_per_thread]() {
for (int i = 0; i < items_per_thread; ++i) {
int key = t * 10 + i;
auto handle = cache[key];
access_count.fetch_add(1);
}
});
}
for (auto& thread : threads) {
thread.join();
}
std::cout << "After multi-threading, total cache accesses: " << access_count.load() << std::endl;
std::cout << "Expected accesses: " << (num_threads * items_per_thread) << std::endl;
}
#endif
int main() {
std::cout << "Testing OneTBB concurrent data structures in header-only mode..." << std::endl;
std::cout << "================================================================" << std::endl;
// Display which tests are enabled
std::cout << "Test configuration:" << std::endl;
std::cout << "- concurrent_vector: " << (TEST_CONCURRENT_VECTOR ? "ENABLED" : "DISABLED") << std::endl;
std::cout << "- concurrent_queue: " << (TEST_CONCURRENT_QUEUE ? "ENABLED" : "DISABLED") << std::endl;
std::cout << "- concurrent_unordered_map: " << (TEST_CONCURRENT_MAP ? "ENABLED" : "DISABLED") << std::endl;
std::cout << "- concurrent_unordered_set: " << (TEST_CONCURRENT_SET ? "ENABLED" : "DISABLED") << std::endl;
std::cout << "- concurrent_priority_queue: " << (TEST_CONCURRENT_PRIORITY_QUEUE ? "ENABLED" : "DISABLED") << std::endl;
std::cout << "- concurrent_hash_map: " << (TEST_CONCURRENT_HASH_MAP ? "ENABLED" : "DISABLED") << std::endl;
std::cout << "- concurrent_lru_cache: " << (TEST_CONCURRENT_LRU_CACHE ? "ENABLED" : "DISABLED") << std::endl;
try {
#if TEST_CONCURRENT_VECTOR
test_concurrent_vector();
#endif
#if TEST_CONCURRENT_QUEUE
test_concurrent_queue();
#endif
#if TEST_CONCURRENT_MAP
test_concurrent_map();
#endif
#if TEST_CONCURRENT_SET
test_concurrent_set();
#endif
#if TEST_CONCURRENT_PRIORITY_QUEUE
test_concurrent_priority_queue();
#endif
#if TEST_CONCURRENT_HASH_MAP
test_concurrent_hash_map();
#endif
#if TEST_CONCURRENT_LRU_CACHE
test_concurrent_lru_cache();
#endif
std::cout << "\n================================================================" << std::endl;
std::cout << "All enabled tests completed successfully!" << std::endl;
#if TEST_CONCURRENT_VECTOR
std::cout << "✓ concurrent_vector: Thread-safe dynamic array" << std::endl;
#endif
#if TEST_CONCURRENT_QUEUE
std::cout << "✓ concurrent_queue: Thread-safe FIFO queue" << std::endl;
#endif
#if TEST_CONCURRENT_MAP
std::cout << "✓ concurrent_unordered_map: Thread-safe hash map" << std::endl;
#endif
#if TEST_CONCURRENT_SET
std::cout << "✓ concurrent_unordered_set: Thread-safe hash set" << std::endl;
#endif
#if TEST_CONCURRENT_PRIORITY_QUEUE
std::cout << "✓ concurrent_priority_queue: Thread-safe priority queue" << std::endl;
#endif
#if TEST_CONCURRENT_HASH_MAP
std::cout << "✓ concurrent_hash_map: Thread-safe hash map with read-write locking" << std::endl;
#endif
#if TEST_CONCURRENT_LRU_CACHE
std::cout << "✓ concurrent_lru_cache: Thread-safe LRU cache" << std::endl;
#endif
} catch (const std::exception& e) {
std::cout << "Test failed with exception: " << e.what() << std::endl;
return 1;
}
return 0;
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment