about summary refs log tree commit diff stats
path: root/archive/2025
diff options
context:
space:
mode:
authorDimitris <dimstav23@gmail.com>2025-07-15 15:11:36 +0200
committerGitHub <noreply@github.com>2025-07-15 15:11:36 +0200
commit73e505f04d17eba36c41fce7b48bc4d6884b8fd0 (patch)
tree44b5f4627309a48d6f22b54bb2ad9a2976e8601b /archive/2025
parentca92e7ad181a02890496872012ecc6c1d08b1658 (diff)
parentd8c365681a41961ebe2daea5701a4d56f5400d1d (diff)
downloadresearch-work-archive-artifacts-73e505f04d17eba36c41fce7b48bc4d6884b8fd0.tar.gz
research-work-archive-artifacts-73e505f04d17eba36c41fce7b48bc4d6884b8fd0.zip
Merge pull request #6 from chriskari/upload-artifacts
Add bsc_karidas
Diffstat (limited to 'archive/2025')
-rw-r--r--archive/2025/README.md1
-rw-r--r--archive/2025/summer/bsc_karidas/.gitignore2
-rw-r--r--archive/2025/summer/bsc_karidas/.gitmodules3
-rw-r--r--archive/2025/summer/bsc_karidas/CMakeLists.txt9
-rw-r--r--archive/2025/summer/bsc_karidas/README.md155
-rw-r--r--archive/2025/summer/bsc_karidas/benchmarks/BenchmarkUtils.cpp263
-rw-r--r--archive/2025/summer/bsc_karidas/benchmarks/BenchmarkUtils.hpp79
-rw-r--r--archive/2025/summer/bsc_karidas/benchmarks/validation/batch_size.cpp226
-rw-r--r--archive/2025/summer/bsc_karidas/benchmarks/validation/concurrency.cpp235
-rw-r--r--archive/2025/summer/bsc_karidas/benchmarks/validation/encryption_compression_usage.cpp248
-rw-r--r--archive/2025/summer/bsc_karidas/benchmarks/validation/file_rotation.cpp270
-rw-r--r--archive/2025/summer/bsc_karidas/benchmarks/validation/queue_capacity.cpp225
-rw-r--r--archive/2025/summer/bsc_karidas/benchmarks/validation/scaling_concurrency.cpp248
-rw-r--r--archive/2025/summer/bsc_karidas/benchmarks/workloads/compression_ratio.cpp62
-rw-r--r--archive/2025/summer/bsc_karidas/benchmarks/workloads/diverse_filepaths.cpp248
-rw-r--r--archive/2025/summer/bsc_karidas/benchmarks/workloads/large_batches.cpp102
-rw-r--r--archive/2025/summer/bsc_karidas/benchmarks/workloads/main.cpp103
-rw-r--r--archive/2025/summer/bsc_karidas/benchmarks/workloads/multi_producer_small_batches.cpp102
-rw-r--r--archive/2025/summer/bsc_karidas/benchmarks/workloads/single_entry_appends.cpp102
-rw-r--r--archive/2025/summer/bsc_karidas/cmake/benchmarks.cmake37
-rw-r--r--archive/2025/summer/bsc_karidas/cmake/dependencies.cmake12
-rw-r--r--archive/2025/summer/bsc_karidas/cmake/examples.cmake16
-rw-r--r--archive/2025/summer/bsc_karidas/cmake/library.cmake23
-rw-r--r--archive/2025/summer/bsc_karidas/cmake/project_settings.cmake15
-rw-r--r--archive/2025/summer/bsc_karidas/cmake/testing.cmake41
-rw-r--r--archive/2025/summer/bsc_karidas/examples/main.cpp68
-rw-r--r--archive/2025/summer/bsc_karidas/external/concurrentqueue/.gitignore33
-rw-r--r--archive/2025/summer/bsc_karidas/external/concurrentqueue/CMakeLists.txt75
-rw-r--r--archive/2025/summer/bsc_karidas/external/concurrentqueue/LICENSE.md62
-rw-r--r--archive/2025/summer/bsc_karidas/external/concurrentqueue/README.md533
-rw-r--r--archive/2025/summer/bsc_karidas/external/concurrentqueue/blockingconcurrentqueue.h582
-rw-r--r--archive/2025/summer/bsc_karidas/external/concurrentqueue/c_api/blockingconcurrentqueue.cpp40
-rw-r--r--archive/2025/summer/bsc_karidas/external/concurrentqueue/c_api/concurrentqueue.cpp39
-rw-r--r--archive/2025/summer/bsc_karidas/external/concurrentqueue/c_api/concurrentqueue.h41
-rw-r--r--archive/2025/summer/bsc_karidas/external/concurrentqueue/concurrentqueue.h3747
-rw-r--r--archive/2025/summer/bsc_karidas/external/concurrentqueue/concurrentqueueConfig.cmake.in3
-rw-r--r--archive/2025/summer/bsc_karidas/external/concurrentqueue/internal/concurrentqueue_internal_debug.h87
-rw-r--r--archive/2025/summer/bsc_karidas/external/concurrentqueue/lightweightsemaphore.h427
-rw-r--r--archive/2025/summer/bsc_karidas/external/concurrentqueue/samples.md375
-rw-r--r--archive/2025/summer/bsc_karidas/include/BufferQueue.hpp49
-rw-r--r--archive/2025/summer/bsc_karidas/include/Compression.hpp17
-rw-r--r--archive/2025/summer/bsc_karidas/include/Config.hpp28
-rw-r--r--archive/2025/summer/bsc_karidas/include/Crypto.hpp33
-rw-r--r--archive/2025/summer/bsc_karidas/include/LogEntry.hpp61
-rw-r--r--archive/2025/summer/bsc_karidas/include/Logger.hpp59
-rw-r--r--archive/2025/summer/bsc_karidas/include/LoggingManager.hpp53
-rw-r--r--archive/2025/summer/bsc_karidas/include/QueueItem.hpp25
-rw-r--r--archive/2025/summer/bsc_karidas/include/SegmentedStorage.hpp144
-rw-r--r--archive/2025/summer/bsc_karidas/include/Writer.hpp40
-rw-r--r--archive/2025/summer/bsc_karidas/shell.nix12
-rw-r--r--archive/2025/summer/bsc_karidas/src/BufferQueue.cpp131
-rw-r--r--archive/2025/summer/bsc_karidas/src/Compression.cpp109
-rw-r--r--archive/2025/summer/bsc_karidas/src/Crypto.cpp211
-rw-r--r--archive/2025/summer/bsc_karidas/src/LogEntry.cpp375
-rw-r--r--archive/2025/summer/bsc_karidas/src/Logger.cpp139
-rw-r--r--archive/2025/summer/bsc_karidas/src/LoggingManager.cpp145
-rw-r--r--archive/2025/summer/bsc_karidas/src/SegmentedStorage.cpp303
-rw-r--r--archive/2025/summer/bsc_karidas/src/Writer.cpp103
-rw-r--r--archive/2025/summer/bsc_karidas/tests/integration/test_CompressionCrypto.cpp91
-rw-r--r--archive/2025/summer/bsc_karidas/tests/integration/test_WriterQueue.cpp117
-rw-r--r--archive/2025/summer/bsc_karidas/tests/unit/test_BufferQueue.cpp920
-rw-r--r--archive/2025/summer/bsc_karidas/tests/unit/test_Compression.cpp133
-rw-r--r--archive/2025/summer/bsc_karidas/tests/unit/test_Crypto.cpp328
-rw-r--r--archive/2025/summer/bsc_karidas/tests/unit/test_LogEntry.cpp196
-rw-r--r--archive/2025/summer/bsc_karidas/tests/unit/test_Logger.cpp294
-rw-r--r--archive/2025/summer/bsc_karidas/tests/unit/test_SegmentedStorage.cpp637
-rw-r--r--archive/2025/summer/bsc_karidas/tests/unit/test_Writer.cpp108
67 files changed, 13800 insertions, 0 deletions
diff --git a/archive/2025/README.md b/archive/2025/README.md
index 20de5ee6d..2b08f8b4e 100644
--- a/archive/2025/README.md
+++ b/archive/2025/README.md
@@ -7,6 +7,7 @@
 | Berkay Eren Ürün                  | LLM-OS: Orchestrating Edge Inference with Priority Scheduling and Adaptive KV Cache Management | MA | Teofil Bodea                          | [Source](/archive/2025/summer/msc_berkay_eren_ueruen)
 | Anders Choi                  | Airlift: A Binary Lifter Based on a Machine-Readable Architecture Specification | MA | Martin Fink                          | [Source](/archive/2025/summer/msc_choi)
 | Dominik Kreutzer                  | vDPDK: A Para-Virtualized DPDK Device Model for vMux                                      | MA   | Peter Okelmann, Masanori Misono          | [Source](/archive/2025/summer/msc_kreutzer)                   |
+| Christian Karidas | Tamperproof Logging System for GDPR-compliant Key-Value Stores | BA | Dimitrios Stavrakakis | [Source](/archive/2025/summer/bsc_karidas/) |
 
 ## Winter semester
 
diff --git a/archive/2025/summer/bsc_karidas/.gitignore b/archive/2025/summer/bsc_karidas/.gitignore
new file mode 100644
index 000000000..7c60cf8b8
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/.gitignore
@@ -0,0 +1,2 @@
+/build/
+.vscode/
\ No newline at end of file
diff --git a/archive/2025/summer/bsc_karidas/.gitmodules b/archive/2025/summer/bsc_karidas/.gitmodules
new file mode 100644
index 000000000..1a38b8d27
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/.gitmodules
@@ -0,0 +1,3 @@
+[submodule "external/concurrentqueue"]
+	path = external/concurrentqueue
+	url = https://github.com/cameron314/concurrentqueue.git
diff --git a/archive/2025/summer/bsc_karidas/CMakeLists.txt b/archive/2025/summer/bsc_karidas/CMakeLists.txt
new file mode 100644
index 000000000..3db4a5f9e
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/CMakeLists.txt
@@ -0,0 +1,9 @@
+cmake_minimum_required(VERSION 3.10)
+project(GDPR_Logging)
+
+include(cmake/project_settings.cmake)
+include(cmake/dependencies.cmake)
+include(cmake/library.cmake)
+include(cmake/testing.cmake)
+include(cmake/benchmarks.cmake)
+include(cmake/examples.cmake)
\ No newline at end of file
diff --git a/archive/2025/summer/bsc_karidas/README.md b/archive/2025/summer/bsc_karidas/README.md
new file mode 100644
index 000000000..0677c32fb
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/README.md
@@ -0,0 +1,155 @@
+# Secure and Tamperproof Logging System for GDPR-compliant Key-Value Stores
+
+## Overview
+
+This bachelor thesis presents a **secure**, **tamper-evident**, **performant** and **modular** logging system for GDPR compliance.
+Its impact lies in:
+
+- Enabling verifiable audit trails with minimal integration effort
+- Supporting GDPR accountability with high performance
+- Laying the groundwork for future improvements (e.g. key management, export)
+
+**Key features** include:
+
+- **Asynchronous batch logging** to minimize client-side latency.
+- **Lock-free, multi-threaded architecture** for high concurrent throughput.
+- **Compression before encryption** to reduce I/O overhead and storage costs.
+- **Authenticated encryption (AES-GCM)** to ensure confidentiality and integrity.
+- **Immutable, append-only storage** for compliance and auditability.
+- **Future-proof design** prepared for secure export and verification support.
+
+## Setup and Usage
+
+### Prerequisites
+
+- C++17 compatible compiler (GCC 9+ or Clang 10+)
+- CMake 3.15 or higher
+- Git (for submodule management)
+
+### Dependencies
+Make sure the following libraries are available on your system:
+- OpenSSL - For cryptographic operations (AES-GCM encryption)
+- ZLIB - For compression functionality
+- Google Test (GTest) - For running unit and integration tests
+
+### Building the System
+
+1. **Clone the repository with submodules:**
+   ```bash
+   git clone --recursive <repository-url>
+   ```
+
+2. **If not cloned with `--recursive`, initialize submodules manually:**
+   ```bash
+   git submodule update --init --recursive
+   ```
+
+3. **Configure and build:**
+   ```bash
+   mkdir build
+   cd build
+   cmake ..
+   make -j$(nproc)
+   ```
+
+### Running the System
+
+A simple usage example is provided in `/examples/main.cpp` that demonstrates how to integrate and use the logging system:
+```bash
+# Run the usage example
+./logging_example
+```
+
+#### Running Tests
+```bash
+# Run all tests
+ctest
+
+# Or run specific tests
+./test_<component>
+```
+
+### Development Environment (Optional)
+
+A reproducible environment is provided using Nix:
+```bash
+nix-shell
+```
+## System Workflow
+
+1. **Log Entry Submission**: When a database proxy intercepts a request to the underlying database involving personal data, it generates a structured log entry containing metadata such as operation type, key identifier, and timestamp. This entry is submitted to the logging API.
+2. **Enqueuing**: Log entries are immediately enqueued into a thread-safe buffer, allowing the calling process to proceed without blocking on disk I/O or encryption tasks.
+3. **Batch Processing**: Dedicated writer threads continuously monitor the queue, dequeueing entries in bulk for optimized batch processing. Batched entries undergo serialization, compression and authenticated encryption (AES-GCM) for both confidentiality and integrity.
+4. **Persistent Storage**: Encrypted batches are concurrently written to append-only segment files. When a segment reaches its configured size limit, a new segment is automatically created.
+5. **Export and Verification**: _(Planned)_: Closed segments can be exported for audit purposes. The export process involves decryption, decompression, and verification of batch-level integrity using authentication tags and cryptographic chaining.
+
+## Design Details
+
+### Concurrent Thread-Safe Buffer Queue
+
+The buffer queue is a lock-free, high-throughput structure composed of multiple single-producer, multi-consumer (SPMC) sub-queues. Each producer thread is assigned its own sub-queue, eliminating contention and maximizing cache locality. Writer threads use round-robin scanning with consumer tokens to fairly and efficiently drain entries. The queue supports both blocking and batch-based enqueue/dequeue operations, enabling smooth operation under load and predictable performance in concurrent environments. This component is built upon [moodycamel's ConcurrentQueue](https://github.com/cameron314/concurrentqueue), a well-known C++ queue library designed for high-performance multi-threaded scenarios. It has been adapted to fit the blocking enqueue requirements by this system.
+
+### Writer Thread
+
+Writer threads asynchronously consume entries from the buffer, group them by destination, and apply a multi-stage processing pipeline: serialization, compression, authenticated encryption (AES-GCM), and persistent write. Each writer operates independently and coordinates concurrent access to log files using atomic file offset reservations, thus minimizing synchronization overhead.
+
+### Segmented Storage
+
+The segmented storage component provides append-only, immutable log files with support for concurrent writers. Files are rotated once a configurable size threshold is reached, and access is optimized via an LRU-based file descriptor cache. Threads reserve byte ranges atomically before writing, ensuring data consistency without locking. This design supports scalable audit logging while balancing durability, performance, and resource usage.
+
+## Benchmarks
+
+To evaluate performance under realistic heavy-load conditions, the system was benchmarked on the following hardware:
+
+- **CPU**: 2× Intel Xeon Gold 6236 (32 cores, 64 threads)
+- **Memory**: 320 GiB DDR4-3200 ECC
+- **Storage**: Intel S4510 SSD (960 GB)
+- **OS**: NixOS 24.11, ZFS filesystem
+- **Execution Model**: NUMA-optimized (pinned to a single node)
+
+### Scalability benchmark
+
+To evaluate parallel scalability, a proportional-load benchmark was conducted where the number of producer and writer threads was scaled together from 1 to 16, resulting in an input data volume that grew linearly with thread count.
+
+#### Configuration Highlights
+
+- **Thread scaling**: 1–16 producers and 1–16 writers
+- **Entries per Producer**: 2,000,000
+- **Entry size**: ~4 KiB
+- **Total data at 16× scale**: ~125 GiB
+- **Writer Batch Size**: 2048 entries
+- **Producer Batch Size**: 4096 entries
+- **Queue Capacity**: 2,000,000 entries
+- **Encryption**: Enabled
+- **Compression**: Level 4 (balanced-fast)
+
+#### Results Summary
+
+The system was executed on a single NUMA node with **16 physical cores**. At **16 total threads** (8 producers + 8 writers), the system achieved **95% scaling efficiency**, demonstrating near-ideal parallelism. Even beyond this point—up to **32 total threads**—the system continued to deliver strong throughput gains by leveraging hyperthreading, reaching approximately **80% scaling efficiency** at full utilization. This shows the system maintains solid performance even under increased CPU contention.
+
+### Main benchmark
+
+#### Workload Configuration
+
+- **Producers**: 16 asynchronous log producers
+- **Entries per Producer**: 2,000,000
+- **Entry Size**: ~4 KiB
+- **Total Input Volume**: ~125 GiB
+- **Writer Batch Size**: 2048 entries
+- **Producer Batch Size**: 4096 entries
+- **Queue Capacity**: 2,000,000 entries
+- **Encryption**: Enabled
+- **Compression**: Level 1, fast
+
+#### Results
+
+| **Metric**                  | **Value**                                    |
+| --------------------------- | -------------------------------------------- |
+| **Execution Time**          | 59.95 seconds                                |
+| **Throughput (Entries)**    | 533,711 entries/sec                          |
+| **Throughput (Data)**       | 2.08 GiB/sec                                 |
+| **Latency**                 | Median: 54.7 ms, Avg: 55.9 ms, Max: 182.7 ms |
+| **Write Amplification**     | 0.109                                        |
+| **Final Storage Footprint** | 13.62 GiB for 124.6 GiB input                |
+
+These results demonstrate the system’s ability to sustain high-throughput logging with low latency and low storage overhead, even under encryption and compression.
diff --git a/archive/2025/summer/bsc_karidas/benchmarks/BenchmarkUtils.cpp b/archive/2025/summer/bsc_karidas/benchmarks/BenchmarkUtils.cpp
new file mode 100644
index 000000000..070d0672c
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/benchmarks/BenchmarkUtils.cpp
@@ -0,0 +1,263 @@
+#include "BenchmarkUtils.hpp"
+
+LatencyCollector appendLogEntries(LoggingManager &loggingManager, const std::vector<BatchWithDestination> &batches)
+{
+    LatencyCollector localCollector;
+    // Pre-allocate to avoid reallocations during measurement
+    localCollector.reserve(batches.size());
+
+    auto token = loggingManager.createProducerToken();
+
+    for (const auto &batchWithDest : batches)
+    {
+        // Measure latency for each appendBatch call
+        auto startTime = std::chrono::high_resolution_clock::now();
+
+        bool success = loggingManager.appendBatch(batchWithDest.first, token, batchWithDest.second);
+
+        auto endTime = std::chrono::high_resolution_clock::now();
+        auto latency = std::chrono::duration_cast<std::chrono::nanoseconds>(endTime - startTime);
+
+        // Record the latency measurement in thread-local collector
+        localCollector.addMeasurement(latency);
+
+        if (!success)
+        {
+            std::cerr << "Failed to append batch of " << batchWithDest.first.size() << " entries to "
+                      << (batchWithDest.second ? *batchWithDest.second : "default") << std::endl;
+        }
+    }
+
+    return localCollector;
+}
+
+void cleanupLogDirectory(const std::string &logDir)
+{
+    try
+    {
+        if (std::filesystem::exists(logDir))
+        {
+            std::filesystem::remove_all(logDir);
+        }
+    }
+    catch (const std::exception &e)
+    {
+        std::cerr << "Error cleaning log directory: " << e.what() << std::endl;
+    }
+}
+
+size_t calculateTotalDataSize(const std::vector<BatchWithDestination> &batches, int numProducers)
+{
+    size_t totalSize = 0;
+
+    for (const auto &batchWithDest : batches)
+    {
+        for (const auto &entry : batchWithDest.first)
+        {
+            totalSize += entry.serialize().size();
+        }
+    }
+
+    return totalSize * numProducers;
+}
+
+size_t calculateDirectorySize(const std::string &dirPath)
+{
+    size_t totalSize = 0;
+    for (const auto &entry : std::filesystem::recursive_directory_iterator(dirPath))
+    {
+        if (entry.is_regular_file())
+        {
+            totalSize += std::filesystem::file_size(entry.path());
+        }
+    }
+    return totalSize;
+}
+
+std::vector<BatchWithDestination> generateBatches(
+    int numEntries,
+    int numSpecificFiles,
+    int batchSize,
+    int payloadSize)
+{
+    std::vector<BatchWithDestination> batches;
+
+    // Generate specific filenames
+    std::vector<std::string> specificFilenames;
+    for (int i = 0; i < numSpecificFiles; i++)
+    {
+        specificFilenames.push_back("specific_log_file" + std::to_string(i + 1) + ".log");
+    }
+
+    int totalChoices = numSpecificFiles + 1; // +1 for default (std::nullopt)
+    int generated = 0;
+    int destinationIndex = 0;
+
+    // Random number generation setup
+    std::random_device rd;
+    std::mt19937 rng(rd());
+
+    // Define pools similar to compressionRatio.cpp
+    std::vector<std::string> userIds;
+    for (int i = 1; i <= 1000; ++i)
+    {
+        userIds.push_back("user_" + std::to_string(i));
+    }
+
+    std::vector<std::string> attributes = {
+        "profile", "settings", "history", "preferences", "contacts",
+        "messages", "photos", "documents", "videos", "audio"};
+
+    std::vector<std::string> controllerIds;
+    for (int i = 1; i <= 10; ++i)
+    {
+        controllerIds.push_back("controller_" + std::to_string(i));
+    }
+
+    std::vector<std::string> processorIds;
+    for (int i = 1; i <= 20; ++i)
+    {
+        processorIds.push_back("processor_" + std::to_string(i));
+    }
+
+    std::vector<std::string> wordList = {
+        "the", "data", //"to", "and", "user","is", "in", "for", "of", "access",
+        //"system", "time", "log", "with", "on", "from", "request", "error", "file", "server",
+        //"update", "status", "by", "at", "process", "information", "new", "this", "connection", "failed",
+        //"success", "operation", "id", "network", "event", "application", "check", "value", "into", "service",
+        //"query", "response", "get", "set", "action", "report", "now", "client", "device", "start"
+    };
+
+    // Zipfian distribution for payload words
+    std::vector<double> weights;
+    for (size_t k = 0; k < wordList.size(); ++k)
+    {
+        weights.push_back(1.0 / (k + 1.0));
+    }
+    std::discrete_distribution<size_t> wordDist(weights.begin(), weights.end());
+
+    // Generate power-of-2 sizes for variable payload
+    std::vector<size_t> powerOf2Sizes;
+    int minPowerOf2 = 5; // 2^5 = 32
+    int maxPowerOf2 = static_cast<int>(std::log2(payloadSize));
+    for (int power = minPowerOf2; power <= maxPowerOf2; power++)
+    {
+        powerOf2Sizes.push_back(1 << power); // 2^power
+    }
+
+    // Distributions for random selections
+    std::uniform_int_distribution<int> actionDist(0, 3); // CREATE, READ, UPDATE, DELETE
+    std::uniform_int_distribution<size_t> userDist(0, userIds.size() - 1);
+    std::uniform_int_distribution<size_t> attrDist(0, attributes.size() - 1);
+    std::uniform_int_distribution<size_t> controllerDist(0, controllerIds.size() - 1);
+    std::uniform_int_distribution<size_t> processorDist(0, processorIds.size() - 1);
+    std::uniform_int_distribution<size_t> powerOf2SizeDist(0, powerOf2Sizes.size() - 1);
+
+    while (generated < numEntries)
+    {
+        int currentBatchSize = std::min(batchSize, numEntries - generated);
+
+        // Assign destination in round-robin manner
+        std::optional<std::string> targetFilename = std::nullopt;
+        if (destinationIndex % totalChoices > 0)
+        {
+            targetFilename = specificFilenames[(destinationIndex % totalChoices) - 1];
+        }
+
+        // Generate the batch
+        std::vector<LogEntry> batch;
+        batch.reserve(currentBatchSize);
+        for (int i = 0; i < currentBatchSize; i++)
+        {
+            // Generate realistic log entry
+            auto action = static_cast<LogEntry::ActionType>(actionDist(rng));
+            std::string user_id = userIds[userDist(rng)];
+            std::string attribute = attributes[attrDist(rng)];
+            std::string dataLocation = "user/" + user_id + "/" + attribute;
+            std::string dataSubjectId = user_id;
+            std::string dataControllerId = controllerIds[controllerDist(rng)];
+            std::string dataProcessorId = processorIds[processorDist(rng)];
+
+            // Determine targetSize
+            size_t targetSize = static_cast<size_t>(payloadSize);
+
+            // Build payload
+            std::string payloadStr;
+            while (payloadStr.size() < targetSize)
+            {
+                if (!payloadStr.empty())
+                    payloadStr += " ";
+                size_t wordIndex = wordDist(rng);
+                payloadStr += wordList[wordIndex];
+            }
+            if (payloadStr.size() > targetSize)
+            {
+                payloadStr = payloadStr.substr(0, targetSize);
+            }
+            std::vector<uint8_t> payload(payloadStr.begin(), payloadStr.end());
+
+            LogEntry entry(action,
+                           dataLocation,
+                           dataControllerId,
+                           dataProcessorId,
+                           dataSubjectId,
+                           std::move(payload));
+            batch.push_back(std::move(entry));
+        }
+
+        batches.push_back({std::move(batch), targetFilename});
+        generated += currentBatchSize;
+        destinationIndex++; // Move to the next destination
+    }
+
+    return batches;
+}
+
+LatencyStats calculateLatencyStats(const LatencyCollector &collector)
+{
+    const auto &latencies = collector.getMeasurements();
+
+    if (latencies.empty())
+    {
+        return {0.0, 0.0, 0.0, 0};
+    }
+
+    // Convert to milliseconds for easier reading
+    std::vector<double> latenciesMs;
+    latenciesMs.reserve(latencies.size());
+    for (const auto &lat : latencies)
+    {
+        latenciesMs.push_back(static_cast<double>(lat.count()) / 1e6); // ns to ms
+    }
+
+    // Sort for percentile calculations
+    std::sort(latenciesMs.begin(), latenciesMs.end());
+
+    LatencyStats stats;
+    stats.count = latenciesMs.size();
+    stats.maxMs = latenciesMs.back();
+    stats.avgMs = std::accumulate(latenciesMs.begin(), latenciesMs.end(), 0.0) / latenciesMs.size();
+
+    // Median
+    size_t medianIdx = latenciesMs.size() / 2;
+    if (latenciesMs.size() % 2 == 0)
+    {
+        stats.medianMs = (latenciesMs[medianIdx - 1] + latenciesMs[medianIdx]) / 2.0;
+    }
+    else
+    {
+        stats.medianMs = latenciesMs[medianIdx];
+    }
+
+    return stats;
+}
+
+void printLatencyStats(const LatencyStats &stats)
+{
+    std::cout << "============== Latency Statistics ==============" << std::endl;
+    std::cout << "Total append operations: " << stats.count << std::endl;
+    std::cout << "Max latency: " << stats.maxMs << " ms" << std::endl;
+    std::cout << "Average latency: " << stats.avgMs << " ms" << std::endl;
+    std::cout << "Median latency: " << stats.medianMs << " ms" << std::endl;
+    std::cout << "===============================================" << std::endl;
+}
\ No newline at end of file
diff --git a/archive/2025/summer/bsc_karidas/benchmarks/BenchmarkUtils.hpp b/archive/2025/summer/bsc_karidas/benchmarks/BenchmarkUtils.hpp
new file mode 100644
index 000000000..ec94d89ce
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/benchmarks/BenchmarkUtils.hpp
@@ -0,0 +1,79 @@
+#ifndef BENCHMARK_UTILS_HPP
+#define BENCHMARK_UTILS_HPP
+
+#include "LoggingManager.hpp"
+#include <vector>
+#include <string>
+#include <optional>
+#include <filesystem>
+#include <iostream>
+#include <chrono>
+#include <algorithm>
+#include <random>
+#include <numeric>
+
+using BatchWithDestination = std::pair<std::vector<LogEntry>, std::optional<std::string>>;
+
+class LatencyCollector
+{
+private:
+    std::vector<std::chrono::nanoseconds> latencies;
+
+public:
+    void addMeasurement(std::chrono::nanoseconds latency)
+    {
+        latencies.push_back(latency);
+    }
+
+    void reserve(size_t capacity)
+    {
+        latencies.reserve(capacity);
+    }
+
+    const std::vector<std::chrono::nanoseconds> &getMeasurements() const
+    {
+        return latencies;
+    }
+
+    void clear()
+    {
+        latencies.clear();
+    }
+
+    // Merge another collector's measurements into this one
+    void merge(const LatencyCollector &other)
+    {
+        const auto &otherLatencies = other.getMeasurements();
+        latencies.insert(latencies.end(), otherLatencies.begin(), otherLatencies.end());
+    }
+};
+
+struct LatencyStats
+{
+    double maxMs;
+    double avgMs;
+    double medianMs;
+    size_t count;
+};
+
+// Function to calculate statistics from a merged collector
+LatencyStats calculateLatencyStats(const LatencyCollector &collector);
+
+// Modified to return latency measurements instead of using global state
+LatencyCollector appendLogEntries(LoggingManager &loggingManager, const std::vector<BatchWithDestination> &batches);
+
+void cleanupLogDirectory(const std::string &logDir);
+
+size_t calculateTotalDataSize(const std::vector<BatchWithDestination> &batches, int numProducers);
+
+size_t calculateDirectorySize(const std::string &dirPath);
+
+std::vector<BatchWithDestination> generateBatches(
+    int numEntries,
+    int numSpecificFiles,
+    int batchSize,
+    int payloadSize);
+
+void printLatencyStats(const LatencyStats &stats);
+
+#endif
\ No newline at end of file
diff --git a/archive/2025/summer/bsc_karidas/benchmarks/validation/batch_size.cpp b/archive/2025/summer/bsc_karidas/benchmarks/validation/batch_size.cpp
new file mode 100644
index 000000000..e5f2a44e6
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/benchmarks/validation/batch_size.cpp
@@ -0,0 +1,226 @@
+#include "BenchmarkUtils.hpp"
+#include "LoggingManager.hpp"
+#include <iostream>
+#include <fstream>
+#include <thread>
+#include <chrono>
+#include <vector>
+#include <future>
+#include <optional>
+#include <iomanip>
+#include <filesystem>
+
+struct BenchmarkResult
+{
+    double elapsedSeconds;
+    double throughputEntries;
+    double logicalThroughputGiB;
+    double physicalThroughputGiB;
+    double writeAmplification;
+    LatencyStats latencyStats;
+};
+
+BenchmarkResult runBatchSizeBenchmark(const LoggingConfig &baseConfig, int writerBatchSize, int numProducerThreads,
+                                      int entriesPerProducer, int numSpecificFiles, int producerBatchSize, int payloadSize)
+{
+    LoggingConfig config = baseConfig;
+    config.basePath = "./logs/batch_" + std::to_string(writerBatchSize);
+    config.batchSize = writerBatchSize;
+
+    cleanupLogDirectory(config.basePath);
+
+    std::cout << "Generating batches with pre-determined destinations for all threads...";
+    std::vector<BatchWithDestination> batches = generateBatches(entriesPerProducer, numSpecificFiles, producerBatchSize, payloadSize);
+    std::cout << " Done." << std::endl;
+
+    size_t totalDataSizeBytes = calculateTotalDataSize(batches, numProducerThreads);
+    double totalDataSizeGiB = static_cast<double>(totalDataSizeBytes) / (1024 * 1024 * 1024);
+    std::cout << "Total data to be written: " << totalDataSizeBytes << " bytes ("
+              << totalDataSizeGiB << " GiB)" << std::endl;
+
+    LoggingManager loggingManager(config);
+    loggingManager.start();
+    auto startTime = std::chrono::high_resolution_clock::now();
+
+    // Each future now returns a LatencyCollector with thread-local measurements
+    std::vector<std::future<LatencyCollector>> futures;
+    for (int i = 0; i < numProducerThreads; i++)
+    {
+        futures.push_back(std::async(
+            std::launch::async,
+            appendLogEntries,
+            std::ref(loggingManager),
+            std::ref(batches)));
+    }
+
+    // Collect latency measurements from all threads
+    LatencyCollector masterCollector;
+    for (auto &future : futures)
+    {
+        LatencyCollector threadCollector = future.get();
+        masterCollector.merge(threadCollector);
+    }
+
+    loggingManager.stop();
+    auto endTime = std::chrono::high_resolution_clock::now();
+    std::chrono::duration<double> elapsed = endTime - startTime;
+
+    size_t finalStorageSize = calculateDirectorySize(config.basePath);
+    double writeAmplification = static_cast<double>(finalStorageSize) / totalDataSizeBytes;
+
+    double elapsedSeconds = elapsed.count();
+    const size_t totalEntries = numProducerThreads * entriesPerProducer;
+    double throughputEntries = totalEntries / elapsedSeconds;
+    double logicalThroughputGiB = totalDataSizeGiB / elapsedSeconds;
+    double physicalThroughputGiB = static_cast<double>(finalStorageSize) / (1024.0 * 1024.0 * 1024.0 * elapsedSeconds);
+
+    // Calculate latency statistics from merged measurements
+    LatencyStats latencyStats = calculateLatencyStats(masterCollector);
+
+    cleanupLogDirectory(config.basePath);
+
+    return BenchmarkResult{
+        elapsedSeconds,
+        throughputEntries,
+        logicalThroughputGiB,
+        physicalThroughputGiB,
+        writeAmplification,
+        latencyStats};
+}
+
+// Write CSV header
+void writeCSVHeader(std::ofstream &csvFile)
+{
+    csvFile << "batch_size,elapsed_seconds,throughput_entries_per_sec,logical_throughput_gib_per_sec,"
+            << "physical_throughput_gib_per_sec,relative_performance,write_amplification,"
+            << "avg_latency_ms,median_latency_ms,max_latency_ms,latency_count\n";
+}
+
+// Write a single result row to CSV
+void writeCSVRow(std::ofstream &csvFile, int batchSize, const BenchmarkResult &result, double relativePerf)
+{
+    csvFile << batchSize << ","
+            << std::fixed << std::setprecision(6) << result.elapsedSeconds << ","
+            << std::fixed << std::setprecision(2) << result.throughputEntries << ","
+            << std::fixed << std::setprecision(6) << result.logicalThroughputGiB << ","
+            << std::fixed << std::setprecision(6) << result.physicalThroughputGiB << ","
+            << std::fixed << std::setprecision(6) << relativePerf << ","
+            << std::fixed << std::setprecision(8) << result.writeAmplification << ","
+            << std::fixed << std::setprecision(6) << result.latencyStats.avgMs << ","
+            << std::fixed << std::setprecision(6) << result.latencyStats.medianMs << ","
+            << std::fixed << std::setprecision(6) << result.latencyStats.maxMs << ","
+            << result.latencyStats.count << "\n";
+}
+
+void runBatchSizeComparison(const LoggingConfig &baseConfig, const std::vector<int> &batchSizes,
+                            int numProducerThreads, int entriesPerProducer,
+                            int numSpecificFiles, int producerBatchSize, int payloadSize,
+                            const std::string &csvFilename = "batch_size_benchmark.csv")
+{
+    std::vector<BenchmarkResult> results;
+
+    // Open CSV file for writing
+    std::ofstream csvFile(csvFilename);
+    if (!csvFile.is_open())
+    {
+        std::cerr << "Error: Could not open CSV file " << csvFilename << " for writing." << std::endl;
+        return;
+    }
+
+    writeCSVHeader(csvFile);
+
+    std::cout << "Running batch size benchmark with " << batchSizes.size() << " data points..." << std::endl;
+    std::cout << "Results will be saved to: " << csvFilename << std::endl;
+
+    for (size_t i = 0; i < batchSizes.size(); i++)
+    {
+        int batchSize = batchSizes[i];
+        std::cout << "\nProgress: " << (i + 1) << "/" << batchSizes.size()
+                  << " - Running benchmark with writer batch size: " << batchSize << "..." << std::endl;
+
+        BenchmarkResult result = runBatchSizeBenchmark(
+            baseConfig, batchSize, numProducerThreads,
+            entriesPerProducer, numSpecificFiles, producerBatchSize, payloadSize);
+
+        results.push_back(result);
+
+        // Calculate relative performance (using first result as baseline)
+        double relativePerf = results.size() > 1 ? result.throughputEntries / results[0].throughputEntries : 1.0;
+
+        // Write result to CSV immediately
+        writeCSVRow(csvFile, batchSize, result, relativePerf);
+        csvFile.flush(); // Ensure data is written in case of early termination
+
+        // Print progress summary
+        std::cout << "  Completed: " << std::fixed << std::setprecision(2)
+                  << result.throughputEntries << " entries/s, "
+                  << std::fixed << std::setprecision(3) << result.logicalThroughputGiB << " GiB/s" << std::endl;
+
+        // Small delay between runs
+        std::this_thread::sleep_for(std::chrono::seconds(1));
+    }
+
+    csvFile.close();
+    std::cout << "\nBenchmark completed! Results saved to " << csvFilename << std::endl;
+
+    std::cout << "\n=========== WRITER BATCH SIZE BENCHMARK SUMMARY ===========" << std::endl;
+    std::cout << std::left << std::setw(12) << "Batch Size"
+              << std::setw(15) << "Time (sec)"
+              << std::setw(20) << "Throughput (entries/s)"
+              << std::setw(15) << "Logical (GiB/s)"
+              << std::setw(15) << "Physical (GiB/s)"
+              << std::setw(12) << "Rel. Perf"
+              << std::setw(15) << "Write Amp."
+              << std::setw(12) << "Avg Lat(ms)" << std::endl;
+    std::cout << "--------------------------------------------------------------------------------------------------------------------------------" << std::endl;
+
+    for (size_t i = 0; i < batchSizes.size(); i++)
+    {
+        double relativePerf = results[i].throughputEntries / results[0].throughputEntries;
+        std::cout << std::left << std::setw(12) << batchSizes[i]
+                  << std::setw(15) << std::fixed << std::setprecision(2) << results[i].elapsedSeconds
+                  << std::setw(20) << std::fixed << std::setprecision(2) << results[i].throughputEntries
+                  << std::setw(15) << std::fixed << std::setprecision(3) << results[i].logicalThroughputGiB
+                  << std::setw(15) << std::fixed << std::setprecision(3) << results[i].physicalThroughputGiB
+                  << std::setw(12) << std::fixed << std::setprecision(2) << relativePerf
+                  << std::setw(15) << std::fixed << std::setprecision(4) << results[i].writeAmplification
+                  << std::setw(12) << std::fixed << std::setprecision(3) << results[i].latencyStats.avgMs << std::endl;
+    }
+    std::cout << "======================================================================================================================================" << std::endl;
+}
+
+int main()
+{
+    // System parameters
+    LoggingConfig baseConfig;
+    baseConfig.baseFilename = "default";
+    baseConfig.maxSegmentSize = 500 * 1024 * 1024; // 100 MB
+    baseConfig.maxAttempts = 5;
+    baseConfig.baseRetryDelay = std::chrono::milliseconds(1);
+    baseConfig.queueCapacity = 2000000;
+    baseConfig.maxExplicitProducers = 16;
+    baseConfig.numWriterThreads = 16;
+    baseConfig.appendTimeout = std::chrono::minutes(2);
+    baseConfig.useEncryption = true;
+    baseConfig.compressionLevel = 4;
+    baseConfig.maxOpenFiles = 512;
+    // Benchmark parameters
+    const int numSpecificFiles = 256;
+    const int producerBatchSize = 4096;
+    const int numProducers = 16;
+    const int entriesPerProducer = 2000000;
+    const int payloadSize = 4096;
+
+    std::vector<int> batchSizes = {1, 4, 8, 16, 32, 64, 96, 128, 256, 512, 768, 1024, 1536, 2048, 4096, 8192, 16384, 32768, 65536, 131072};
+
+    runBatchSizeComparison(baseConfig,
+                           batchSizes,
+                           numProducers,
+                           entriesPerProducer,
+                           numSpecificFiles,
+                           producerBatchSize,
+                           payloadSize,
+                           "batch_size_benchmark_results.csv");
+
+    return 0;
+}
\ No newline at end of file
diff --git a/archive/2025/summer/bsc_karidas/benchmarks/validation/concurrency.cpp b/archive/2025/summer/bsc_karidas/benchmarks/validation/concurrency.cpp
new file mode 100644
index 000000000..428fd40e7
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/benchmarks/validation/concurrency.cpp
@@ -0,0 +1,235 @@
+#include "BenchmarkUtils.hpp"
+#include "LoggingManager.hpp"
+#include <iostream>
+#include <fstream>
+#include <thread>
+#include <chrono>
+#include <vector>
+#include <future>
+#include <optional>
+#include <iomanip>
+#include <filesystem>
+
+struct BenchmarkResult
+{
+    double executionTime;
+    double throughputEntries;
+    double logicalThroughputGiB;
+    double physicalThroughputGiB;
+    size_t inputDataSizeBytes;
+    size_t outputDataSizeBytes;
+    double writeAmplification;
+    LatencyStats latencyStats;
+};
+
+BenchmarkResult runBenchmark(const LoggingConfig &baseConfig, int numWriterThreads, int numProducerThreads,
+                             int entriesPerProducer, int numSpecificFiles, int producerBatchSize, int payloadSize)
+{
+    LoggingConfig config = baseConfig;
+    config.basePath = "./logs_writers";
+    config.numWriterThreads = numWriterThreads;
+
+    cleanupLogDirectory(config.basePath);
+
+    std::cout << "Generating batches with pre-determined destinations for all threads...";
+    std::vector<BatchWithDestination> batches = generateBatches(entriesPerProducer, numSpecificFiles, producerBatchSize, payloadSize);
+    std::cout << " Done." << std::endl;
+
+    size_t totalDataSizeBytes = calculateTotalDataSize(batches, numProducerThreads);
+    double totalDataSizeGiB = static_cast<double>(totalDataSizeBytes) / (1024 * 1024 * 1024);
+    std::cout << "Total data to be written: " << totalDataSizeBytes << " bytes ("
+              << totalDataSizeGiB << " GiB)" << std::endl;
+
+    LoggingManager loggingManager(config);
+    loggingManager.start();
+    auto startTime = std::chrono::high_resolution_clock::now();
+
+    // Each future now returns a LatencyCollector with thread-local measurements
+    std::vector<std::future<LatencyCollector>> futures;
+    for (int i = 0; i < numProducerThreads; i++)
+    {
+        futures.push_back(std::async(
+            std::launch::async,
+            appendLogEntries,
+            std::ref(loggingManager),
+            std::ref(batches)));
+    }
+
+    // Collect latency measurements from all threads
+    LatencyCollector masterCollector;
+    for (auto &future : futures)
+    {
+        LatencyCollector threadCollector = future.get();
+        masterCollector.merge(threadCollector);
+    }
+
+    loggingManager.stop();
+    auto endTime = std::chrono::high_resolution_clock::now();
+    std::chrono::duration<double> elapsed = endTime - startTime;
+
+    size_t finalStorageSize = calculateDirectorySize(config.basePath);
+    double writeAmplification = static_cast<double>(finalStorageSize) / totalDataSizeBytes;
+
+    double elapsedSeconds = elapsed.count();
+    const size_t totalEntries = numProducerThreads * entriesPerProducer;
+    double throughputEntries = totalEntries / elapsedSeconds;
+    double logicalThroughputGiB = totalDataSizeGiB / elapsedSeconds;
+    double physicalThroughputGiB = static_cast<double>(finalStorageSize) / (1024.0 * 1024.0 * 1024.0 * elapsedSeconds);
+
+    // Calculate latency statistics from merged measurements
+    LatencyStats latencyStats = calculateLatencyStats(masterCollector);
+
+    cleanupLogDirectory(config.basePath);
+
+    return BenchmarkResult{
+        elapsedSeconds,
+        throughputEntries,
+        logicalThroughputGiB,
+        physicalThroughputGiB,
+        totalDataSizeBytes,
+        finalStorageSize,
+        writeAmplification,
+        latencyStats};
+}
+
+// Write CSV header
+void writeCSVHeader(std::ofstream &csvFile)
+{
+    csvFile << "writer_threads,execution_time_seconds,throughput_entries_per_sec,logical_throughput_gib_per_sec,"
+            << "physical_throughput_gib_per_sec,input_data_size_bytes,output_data_size_bytes,speedup_vs_baseline,"
+            << "write_amplification,avg_latency_ms,median_latency_ms,max_latency_ms,latency_count\n";
+}
+
+// Write a single result row to CSV
+void writeCSVRow(std::ofstream &csvFile, int writerThreads, const BenchmarkResult &result, double speedup)
+{
+    csvFile << writerThreads << ","
+            << std::fixed << std::setprecision(6) << result.executionTime << ","
+            << std::fixed << std::setprecision(2) << result.throughputEntries << ","
+            << std::fixed << std::setprecision(6) << result.logicalThroughputGiB << ","
+            << std::fixed << std::setprecision(6) << result.physicalThroughputGiB << ","
+            << result.inputDataSizeBytes << ","
+            << result.outputDataSizeBytes << ","
+            << std::fixed << std::setprecision(6) << speedup << ","
+            << std::fixed << std::setprecision(8) << result.writeAmplification << ","
+            << std::fixed << std::setprecision(6) << result.latencyStats.avgMs << ","
+            << std::fixed << std::setprecision(6) << result.latencyStats.medianMs << ","
+            << std::fixed << std::setprecision(6) << result.latencyStats.maxMs << ","
+            << result.latencyStats.count << "\n";
+}
+
+void runConcurrencyBenchmark(const LoggingConfig &baseConfig, const std::vector<int> &writerThreadCounts,
+                             int numProducerThreads, int entriesPerProducer,
+                             int numSpecificFiles, int producerBatchSize, int payloadSize,
+                             const std::string &csvFilename = "concurrency_benchmark.csv")
+{
+    std::vector<BenchmarkResult> results;
+
+    // Open CSV file for writing
+    std::ofstream csvFile(csvFilename);
+    if (!csvFile.is_open())
+    {
+        std::cerr << "Error: Could not open CSV file " << csvFilename << " for writing." << std::endl;
+        return;
+    }
+
+    writeCSVHeader(csvFile);
+
+    std::cout << "Running concurrency benchmark with " << writerThreadCounts.size() << " data points..." << std::endl;
+    std::cout << "Results will be saved to: " << csvFilename << std::endl;
+
+    for (size_t i = 0; i < writerThreadCounts.size(); i++)
+    {
+        int writerCount = writerThreadCounts[i];
+        std::cout << "\nProgress: " << (i + 1) << "/" << writerThreadCounts.size()
+                  << " - Running benchmark with " << writerCount << " writer thread(s)..." << std::endl;
+
+        BenchmarkResult result = runBenchmark(baseConfig, writerCount, numProducerThreads, entriesPerProducer,
+                                              numSpecificFiles, producerBatchSize, payloadSize);
+
+        results.push_back(result);
+
+        // Calculate speedup (using first result as baseline)
+        double speedup = results.size() > 1 ? result.throughputEntries / results[0].throughputEntries : 1.0;
+
+        // Write result to CSV immediately
+        writeCSVRow(csvFile, writerCount, result, speedup);
+        csvFile.flush(); // Ensure data is written in case of early termination
+
+        // Print progress summary
+        std::cout << "  Completed: " << std::fixed << std::setprecision(2)
+                  << result.throughputEntries << " entries/s, "
+                  << std::fixed << std::setprecision(3) << result.logicalThroughputGiB << " GiB/s, "
+                  << std::fixed << std::setprecision(2) << speedup << "x speedup" << std::endl;
+    }
+
+    csvFile.close();
+    std::cout << "\nBenchmark completed! Results saved to " << csvFilename << std::endl;
+
+    // Still print summary table to console for immediate review
+    std::cout << "\n=================== CONCURRENCY BENCHMARK SUMMARY ===================" << std::endl;
+    std::cout << std::left << std::setw(20) << "Writer Threads"
+              << std::setw(15) << "Time (sec)"
+              << std::setw(20) << "Throughput (ent/s)"
+              << std::setw(15) << "Logical (GiB/s)"
+              << std::setw(15) << "Physical (GiB/s)"
+              << std::setw(20) << "Input Size (bytes)"
+              << std::setw(20) << "Storage Size (bytes)"
+              << std::setw(15) << "Write Amp."
+              << std::setw(12) << "Speedup vs. 1"
+              << std::setw(12) << "Avg Lat(ms)" << std::endl;
+    std::cout << "--------------------------------------------------------------------------------------------------------------------------------" << std::endl;
+
+    double baselineThroughputEntries = results[0].throughputEntries;
+
+    for (size_t i = 0; i < writerThreadCounts.size(); i++)
+    {
+        double speedup = results[i].throughputEntries / baselineThroughputEntries;
+        std::cout << std::left << std::setw(20) << writerThreadCounts[i]
+                  << std::setw(15) << std::fixed << std::setprecision(2) << results[i].executionTime
+                  << std::setw(20) << std::fixed << std::setprecision(2) << results[i].throughputEntries
+                  << std::setw(15) << std::fixed << std::setprecision(3) << results[i].logicalThroughputGiB
+                  << std::setw(15) << std::fixed << std::setprecision(3) << results[i].physicalThroughputGiB
+                  << std::setw(20) << results[i].inputDataSizeBytes
+                  << std::setw(20) << results[i].outputDataSizeBytes
+                  << std::setw(15) << std::fixed << std::setprecision(4) << results[i].writeAmplification
+                  << std::setw(12) << std::fixed << std::setprecision(2) << speedup
+                  << std::setw(12) << std::fixed << std::setprecision(3) << results[i].latencyStats.avgMs << std::endl;
+    }
+    std::cout << "================================================================================================================================" << std::endl;
+}
+
+int main()
+{
+    // system parameters
+    LoggingConfig baseConfig;
+    baseConfig.baseFilename = "default";
+    baseConfig.maxSegmentSize = 50 * 1024 * 1024; // 50 MB
+    baseConfig.maxAttempts = 5;
+    baseConfig.baseRetryDelay = std::chrono::milliseconds(1);
+    baseConfig.queueCapacity = 3000000;
+    baseConfig.maxExplicitProducers = 16;
+    baseConfig.batchSize = 8192;
+    baseConfig.appendTimeout = std::chrono::minutes(5);
+    baseConfig.useEncryption = true;
+    baseConfig.compressionLevel = 9;
+    // benchmark parameters
+    const int numSpecificFiles = 256;
+    const int producerBatchSize = 512;
+    const int numProducers = 16;
+    const int entriesPerProducer = 2000000;
+    const int payloadSize = 2048;
+
+    std::vector<int> writerThreadCounts = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
+
+    runConcurrencyBenchmark(baseConfig,
+                            writerThreadCounts,
+                            numProducers,
+                            entriesPerProducer,
+                            numSpecificFiles,
+                            producerBatchSize,
+                            payloadSize,
+                            "concurrency_benchmark_results.csv");
+
+    return 0;
+}
\ No newline at end of file
diff --git a/archive/2025/summer/bsc_karidas/benchmarks/validation/encryption_compression_usage.cpp b/archive/2025/summer/bsc_karidas/benchmarks/validation/encryption_compression_usage.cpp
new file mode 100644
index 000000000..34e0cb929
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/benchmarks/validation/encryption_compression_usage.cpp
@@ -0,0 +1,248 @@
+#include "BenchmarkUtils.hpp"
+#include "LoggingManager.hpp"
+#include <iostream>
+#include <fstream>
+#include <thread>
+#include <chrono>
+#include <vector>
+#include <future>
+#include <optional>
+#include <iomanip>
+#include <filesystem>
+
+struct BenchmarkResult
+{
+    bool useEncryption;
+    int compressionLevel;
+    double executionTime;
+    size_t totalEntries;
+    double throughputEntries;
+    size_t totalDataSizeBytes;
+    size_t finalStorageSize;
+    double logicalThroughputGiB;
+    double physicalThroughputGiB;
+    double writeAmplification;
+    LatencyStats latencyStats;
+};
+
+BenchmarkResult runBenchmark(const LoggingConfig &baseConfig, bool useEncryption, int compressionLevel,
+                             const std::vector<BatchWithDestination> &batches,
+                             int numProducerThreads, int entriesPerProducer)
+{
+    LoggingConfig config = baseConfig;
+    config.basePath = "./encryption_compression_usage";
+    config.useEncryption = useEncryption;
+    config.compressionLevel = compressionLevel;
+
+    cleanupLogDirectory(config.basePath);
+
+    size_t totalDataSizeBytes = calculateTotalDataSize(batches, numProducerThreads);
+    double totalDataSizeGiB = static_cast<double>(totalDataSizeBytes) / (1024 * 1024 * 1024);
+    std::cout << "Benchmark with Encryption: " << (useEncryption ? "Enabled" : "Disabled")
+              << ", Compression: " << (compressionLevel != 0 ? "Enabled" : "Disabled")
+              << " - Total data to be written: " << totalDataSizeBytes
+              << " bytes (" << totalDataSizeGiB << " GiB)" << std::endl;
+
+    LoggingManager loggingManager(config);
+    loggingManager.start();
+    auto startTime = std::chrono::high_resolution_clock::now();
+
+    // Each future now returns a LatencyCollector with thread-local measurements
+    std::vector<std::future<LatencyCollector>> futures;
+    for (int i = 0; i < numProducerThreads; i++)
+    {
+        futures.push_back(std::async(
+            std::launch::async,
+            appendLogEntries,
+            std::ref(loggingManager),
+            std::ref(batches)));
+    }
+
+    // Collect latency measurements from all threads
+    LatencyCollector masterCollector;
+    for (auto &future : futures)
+    {
+        LatencyCollector threadCollector = future.get();
+        masterCollector.merge(threadCollector);
+    }
+
+    loggingManager.stop();
+    auto endTime = std::chrono::high_resolution_clock::now();
+    std::chrono::duration<double> elapsed = endTime - startTime;
+
+    size_t finalStorageSize = calculateDirectorySize(config.basePath);
+    double writeAmplification = static_cast<double>(finalStorageSize) / totalDataSizeBytes;
+
+    double elapsedSeconds = elapsed.count();
+    const size_t totalEntries = numProducerThreads * entriesPerProducer;
+    double throughputEntries = totalEntries / elapsedSeconds;
+    double logicalThroughputGiB = totalDataSizeGiB / elapsedSeconds;
+    double physicalThroughputGiB = static_cast<double>(finalStorageSize) / (1024.0 * 1024.0 * 1024.0 * elapsedSeconds);
+
+    // Calculate latency statistics from merged measurements
+    LatencyStats latencyStats = calculateLatencyStats(masterCollector);
+
+    cleanupLogDirectory(config.basePath);
+
+    return BenchmarkResult{
+        useEncryption,
+        compressionLevel,
+        elapsedSeconds,
+        totalEntries,
+        throughputEntries,
+        totalDataSizeBytes,
+        finalStorageSize,
+        logicalThroughputGiB,
+        physicalThroughputGiB,
+        writeAmplification,
+        latencyStats};
+}
+
+// Write CSV header
+void writeCSVHeader(std::ofstream &csvFile)
+{
+    csvFile << "encryption_enabled,compression_level,execution_time_seconds,total_entries,"
+            << "throughput_entries_per_sec,total_data_size_bytes,final_storage_size_bytes,logical_throughput_gib_per_sec,"
+            << "physical_throughput_gib_per_sec,write_amplification,avg_latency_ms,median_latency_ms,"
+            << "max_latency_ms,latency_count\n";
+}
+
+// Write a single result row to CSV
+void writeCSVRow(std::ofstream &csvFile, const BenchmarkResult &result)
+{
+    csvFile << (result.useEncryption ? "true" : "false") << ","
+            << result.compressionLevel << ","
+            << std::fixed << std::setprecision(6) << result.executionTime << ","
+            << result.totalEntries << ","
+            << std::fixed << std::setprecision(2) << result.throughputEntries << ","
+            << result.totalDataSizeBytes << ","
+            << result.finalStorageSize << ","
+            << std::fixed << std::setprecision(6) << result.logicalThroughputGiB << ","
+            << std::fixed << std::setprecision(6) << result.physicalThroughputGiB << ","
+            << std::fixed << std::setprecision(8) << result.writeAmplification << ","
+            << std::fixed << std::setprecision(6) << result.latencyStats.avgMs << ","
+            << std::fixed << std::setprecision(6) << result.latencyStats.medianMs << ","
+            << std::fixed << std::setprecision(6) << result.latencyStats.maxMs << ","
+            << result.latencyStats.count << "\n";
+}
+
+void runEncryptionCompressionBenchmark(const LoggingConfig &baseConfig,
+                                       const std::vector<bool> &encryptionSettings,
+                                       const std::vector<int> &compressionLevels,
+                                       const std::vector<BatchWithDestination> &batches,
+                                       int numProducers, int entriesPerProducer,
+                                       const std::string &csvFilename = "encryption_compression_benchmark.csv")
+{
+    std::vector<BenchmarkResult> results;
+
+    // Open CSV file for writing
+    std::ofstream csvFile(csvFilename);
+    if (!csvFile.is_open())
+    {
+        std::cerr << "Error: Could not open CSV file " << csvFilename << " for writing." << std::endl;
+        return;
+    }
+
+    writeCSVHeader(csvFile);
+
+    int totalCombinations = encryptionSettings.size() * compressionLevels.size();
+    std::cout << "Running encryption/compression benchmark with " << totalCombinations << " configurations..." << std::endl;
+    std::cout << "Results will be saved to: " << csvFilename << std::endl;
+
+    int currentTest = 0;
+    for (bool useEncryption : encryptionSettings)
+    {
+        for (int compressionLevel : compressionLevels)
+        {
+            currentTest++;
+            std::cout << "\nProgress: " << currentTest << "/" << totalCombinations
+                      << " - Testing Encryption: " << (useEncryption ? "Enabled" : "Disabled")
+                      << ", Compression: " << compressionLevel << "..." << std::endl;
+
+            BenchmarkResult result = runBenchmark(baseConfig, useEncryption, compressionLevel, batches, numProducers, entriesPerProducer);
+            results.push_back(result);
+
+            // Write result to CSV immediately
+            writeCSVRow(csvFile, result);
+            csvFile.flush(); // Ensure data is written in case of early termination
+
+            // Print progress summary
+            std::cout << "  Completed: " << std::fixed << std::setprecision(2)
+                      << result.throughputEntries << " entries/s, "
+                      << std::fixed << std::setprecision(3) << result.logicalThroughputGiB << " GiB/s, "
+                      << "write amp: " << std::fixed << std::setprecision(3) << result.writeAmplification << std::endl;
+        }
+    }
+
+    csvFile.close();
+    std::cout << "\nBenchmark completed! Results saved to " << csvFilename << std::endl;
+
+    // Still print summary table to console for immediate review
+    std::cout << "\n============== ENCRYPTION/COMPRESSION LEVEL BENCHMARK SUMMARY ==============" << std::endl;
+    std::cout << std::left << std::setw(12) << "Encryption"
+              << std::setw(15) << "Comp. Level"
+              << std::setw(15) << "Exec. Time (s)"
+              << std::setw(20) << "Input Size (bytes)"
+              << std::setw(20) << "Storage Size (bytes)"
+              << std::setw(12) << "Write Amp."
+              << std::setw(20) << "Throughput (ent/s)"
+              << std::setw(15) << "Logical (GiB/s)"
+              << std::setw(15) << "Physical (GiB/s)"
+              << std::setw(12) << "Avg Lat(ms)" << std::endl;
+    std::cout << "--------------------------------------------------------------------------------------------------------------------------------" << std::endl;
+
+    // Display results for each configuration
+    for (const auto &result : results)
+    {
+        std::cout << std::left << std::setw(12) << (result.useEncryption ? "True" : "False")
+                  << std::setw(15) << result.compressionLevel
+                  << std::fixed << std::setprecision(2) << std::setw(15) << result.executionTime
+                  << std::setw(20) << result.totalDataSizeBytes
+                  << std::setw(20) << result.finalStorageSize
+                  << std::fixed << std::setprecision(3) << std::setw(12) << result.writeAmplification
+                  << std::fixed << std::setprecision(2) << std::setw(20) << result.throughputEntries
+                  << std::fixed << std::setprecision(3) << std::setw(15) << result.logicalThroughputGiB
+                  << std::fixed << std::setprecision(3) << std::setw(15) << result.physicalThroughputGiB
+                  << std::fixed << std::setprecision(3) << std::setw(12) << result.latencyStats.avgMs << std::endl;
+    }
+
+    std::cout << "================================================================================================================================" << std::endl;
+}
+
+int main()
+{
+    // system parameters
+    LoggingConfig baseConfig;
+    baseConfig.baseFilename = "default";
+    baseConfig.maxSegmentSize = 50 * 1024 * 1024; // 50 MB
+    baseConfig.maxAttempts = 10;
+    baseConfig.baseRetryDelay = std::chrono::milliseconds(2);
+    baseConfig.queueCapacity = 3000000;
+    baseConfig.maxExplicitProducers = 96;
+    baseConfig.batchSize = 8192;
+    baseConfig.numWriterThreads = 64;
+    baseConfig.appendTimeout = std::chrono::minutes(2);
+    // Benchmark parameters
+    const int numSpecificFiles = 256;
+    const int producerBatchSize = 512;
+    const int numProducers = 96;
+    const int entriesPerProducer = 260000;
+    const int payloadSize = 4096;
+
+    const std::vector<int> compressionLevels = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+    const std::vector<bool> encryptionSettings = {false, true};
+
+    std::cout << "Generating batches with pre-determined destinations for all threads...";
+    std::vector<BatchWithDestination> batches = generateBatches(entriesPerProducer, numSpecificFiles, producerBatchSize, payloadSize);
+    std::cout << " Done." << std::endl;
+
+    runEncryptionCompressionBenchmark(baseConfig,
+                                      encryptionSettings,
+                                      compressionLevels,
+                                      batches,
+                                      numProducers,
+                                      entriesPerProducer,
+                                      "encryption_compression_benchmark_results.csv");
+
+    return 0;
+}
\ No newline at end of file
diff --git a/archive/2025/summer/bsc_karidas/benchmarks/validation/file_rotation.cpp b/archive/2025/summer/bsc_karidas/benchmarks/validation/file_rotation.cpp
new file mode 100644
index 000000000..9132180c6
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/benchmarks/validation/file_rotation.cpp
@@ -0,0 +1,270 @@
+#include "BenchmarkUtils.hpp"
+#include "LoggingManager.hpp"
+#include <iostream>
+#include <fstream>
+#include <thread>
+#include <chrono>
+#include <vector>
+#include <future>
+#include <optional>
+#include <iomanip>
+#include <filesystem>
+
+struct BenchmarkResult
+{
+    double elapsedSeconds;
+    double throughputEntries;
+    double logicalThroughputGiB;
+    double physicalThroughputGiB;
+    int fileCount;
+    double writeAmplification;
+    LatencyStats latencyStats;
+};
+
+int countLogFiles(const std::string &basePath)
+{
+    int count = 0;
+    for (const auto &entry : std::filesystem::directory_iterator(basePath))
+    {
+        if (entry.is_regular_file() && entry.path().extension() == ".log")
+        {
+            count++;
+        }
+    }
+    return count;
+}
+
+BenchmarkResult runFileRotationBenchmark(
+    const LoggingConfig &baseConfig,
+    int maxSegmentSizeMB,
+    int numProducerThreads,
+    int entriesPerProducer,
+    int numSpecificFiles,
+    int producerBatchSize,
+    int payloadSize)
+{
+    std::string logDir = "./logs/rotation_" + std::to_string(maxSegmentSizeMB) + "mb";
+
+    cleanupLogDirectory(logDir);
+
+    LoggingConfig config = baseConfig;
+    config.basePath = logDir;
+    config.maxSegmentSize = static_cast<size_t>(maxSegmentSizeMB) * 1024 * 1024;
+    std::cout << "Configured max segment size: " << config.maxSegmentSize << " bytes" << std::endl;
+
+    std::cout << "Generating batches with pre-determined destinations for all threads...";
+    std::vector<BatchWithDestination> batches = generateBatches(entriesPerProducer, numSpecificFiles, producerBatchSize, payloadSize);
+    std::cout << " Done." << std::endl;
+
+    size_t totalDataSizeBytes = calculateTotalDataSize(batches, numProducerThreads);
+    double totalDataSizeGiB = static_cast<double>(totalDataSizeBytes) / (1024 * 1024 * 1024);
+
+    std::cout << "Total data to be written: " << totalDataSizeBytes << " bytes ("
+              << totalDataSizeGiB << " GiB)" << std::endl;
+
+    LoggingManager loggingManager(config);
+    loggingManager.start();
+    auto startTime = std::chrono::high_resolution_clock::now();
+
+    // Each future now returns a LatencyCollector with thread-local measurements
+    std::vector<std::future<LatencyCollector>> futures;
+    for (int i = 0; i < numProducerThreads; i++)
+    {
+        futures.push_back(std::async(
+            std::launch::async,
+            appendLogEntries,
+            std::ref(loggingManager),
+            std::ref(batches)));
+    }
+
+    // Collect latency measurements from all threads
+    LatencyCollector masterCollector;
+    for (auto &future : futures)
+    {
+        LatencyCollector threadCollector = future.get();
+        masterCollector.merge(threadCollector);
+    }
+
+    loggingManager.stop();
+    auto endTime = std::chrono::high_resolution_clock::now();
+    std::chrono::duration<double> elapsed = endTime - startTime;
+
+    size_t finalStorageSize = calculateDirectorySize(logDir);
+    double writeAmplification = static_cast<double>(finalStorageSize) / totalDataSizeBytes;
+
+    double elapsedSeconds = elapsed.count();
+    const size_t totalEntries = numProducerThreads * entriesPerProducer;
+    double throughputEntries = totalEntries / elapsedSeconds;
+    double logicalThroughputGiB = totalDataSizeGiB / elapsedSeconds;
+    double physicalThroughputGiB = static_cast<double>(finalStorageSize) / (1024.0 * 1024.0 * 1024.0 * elapsedSeconds);
+    int fileCount = countLogFiles(logDir);
+
+    // Calculate latency statistics from merged measurements
+    LatencyStats latencyStats = calculateLatencyStats(masterCollector);
+
+    cleanupLogDirectory(logDir);
+
+    return BenchmarkResult{
+        elapsedSeconds,
+        throughputEntries,
+        logicalThroughputGiB,
+        physicalThroughputGiB,
+        fileCount,
+        writeAmplification,
+        latencyStats};
+}
+
+// Write CSV header
+void writeCSVHeader(std::ofstream &csvFile)
+{
+    csvFile << "segment_size_mb,elapsed_seconds,throughput_entries_per_sec,logical_throughput_gib_per_sec,"
+            << "physical_throughput_gib_per_sec,file_count,relative_performance,write_amplification,"
+            << "avg_latency_ms,median_latency_ms,max_latency_ms,latency_count\n";
+}
+
+// Write a single result row to CSV
+void writeCSVRow(std::ofstream &csvFile, int segmentSizeMB, const BenchmarkResult &result, double relativePerf)
+{
+    csvFile << segmentSizeMB << ","
+            << std::fixed << std::setprecision(6) << result.elapsedSeconds << ","
+            << std::fixed << std::setprecision(2) << result.throughputEntries << ","
+            << std::fixed << std::setprecision(6) << result.logicalThroughputGiB << ","
+            << std::fixed << std::setprecision(6) << result.physicalThroughputGiB << ","
+            << result.fileCount << ","
+            << std::fixed << std::setprecision(6) << relativePerf << ","
+            << std::fixed << std::setprecision(8) << result.writeAmplification << ","
+            << std::fixed << std::setprecision(6) << result.latencyStats.avgMs << ","
+            << std::fixed << std::setprecision(6) << result.latencyStats.medianMs << ","
+            << std::fixed << std::setprecision(6) << result.latencyStats.maxMs << ","
+            << result.latencyStats.count << "\n";
+}
+
+void runFileRotationComparison(
+    const LoggingConfig &baseConfig,
+    const std::vector<int> &segmentSizesMB,
+    int numProducerThreads,
+    int entriesPerProducer,
+    int numSpecificFiles,
+    int producerBatchSize,
+    int payloadSize,
+    const std::string &csvFilename = "file_rotation_benchmark.csv")
+{
+    std::vector<BenchmarkResult> results;
+
+    // Open CSV file for writing
+    std::ofstream csvFile(csvFilename);
+    if (!csvFile.is_open())
+    {
+        std::cerr << "Error: Could not open CSV file " << csvFilename << " for writing." << std::endl;
+        return;
+    }
+
+    writeCSVHeader(csvFile);
+
+    std::cout << "Running file rotation benchmark with " << segmentSizesMB.size() << " data points..." << std::endl;
+    std::cout << "Results will be saved to: " << csvFilename << std::endl;
+
+    for (size_t i = 0; i < segmentSizesMB.size(); i++)
+    {
+        int segmentSize = segmentSizesMB[i];
+        std::cout << "\nProgress: " << (i + 1) << "/" << segmentSizesMB.size()
+                  << " - Running benchmark with segment size: " << segmentSize << " MB..." << std::endl;
+
+        BenchmarkResult result = runFileRotationBenchmark(
+            baseConfig,
+            segmentSize,
+            numProducerThreads,
+            entriesPerProducer,
+            numSpecificFiles,
+            producerBatchSize,
+            payloadSize);
+
+        results.push_back(result);
+
+        // Calculate relative performance (using first result as baseline)
+        double relativePerf = results.size() > 1 ? result.throughputEntries / results[0].throughputEntries : 1.0;
+
+        // Write result to CSV immediately
+        writeCSVRow(csvFile, segmentSize, result, relativePerf);
+        csvFile.flush(); // Ensure data is written in case of early termination
+
+        // Print progress summary
+        std::cout << "  Completed: " << std::fixed << std::setprecision(2)
+                  << result.throughputEntries << " entries/s, "
+                  << std::fixed << std::setprecision(3) << result.logicalThroughputGiB << " GiB/s, "
+                  << result.fileCount << " files created" << std::endl;
+
+        // Add a small delay between runs
+        std::this_thread::sleep_for(std::chrono::seconds(5));
+    }
+
+    csvFile.close();
+    std::cout << "\nBenchmark completed! Results saved to " << csvFilename << std::endl;
+
+    // Still print summary table to console for immediate review
+    std::cout << "\n========================== FILE ROTATION BENCHMARK SUMMARY ==========================" << std::endl;
+    std::cout << std::left << std::setw(20) << "Segment Size (MB)"
+              << std::setw(15) << "Time (sec)"
+              << std::setw(20) << "Throughput (ent/s)"
+              << std::setw(15) << "Logical (GiB/s)"
+              << std::setw(15) << "Physical (GiB/s)"
+              << std::setw(15) << "Files Created"
+              << std::setw(15) << "Write Amp."
+              << std::setw(12) << "Rel. Perf"
+              << std::setw(12) << "Avg Lat(ms)" << std::endl;
+    std::cout << "--------------------------------------------------------------------------------------------------------------------------------" << std::endl;
+
+    // Use the first segment size as the baseline for relative performance
+    double baselineThroughput = results[0].throughputEntries;
+
+    for (size_t i = 0; i < segmentSizesMB.size(); i++)
+    {
+        double relativePerf = results[i].throughputEntries / baselineThroughput;
+        std::cout << std::left << std::setw(20) << segmentSizesMB[i]
+                  << std::setw(15) << std::fixed << std::setprecision(2) << results[i].elapsedSeconds
+                  << std::setw(20) << std::fixed << std::setprecision(2) << results[i].throughputEntries
+                  << std::setw(15) << std::fixed << std::setprecision(3) << results[i].logicalThroughputGiB
+                  << std::setw(15) << std::fixed << std::setprecision(3) << results[i].physicalThroughputGiB
+                  << std::setw(15) << results[i].fileCount
+                  << std::setw(15) << std::fixed << std::setprecision(4) << results[i].writeAmplification
+                  << std::setw(12) << std::fixed << std::setprecision(2) << relativePerf
+                  << std::setw(12) << std::fixed << std::setprecision(3) << results[i].latencyStats.avgMs << std::endl;
+    }
+    std::cout << "================================================================================================================================" << std::endl;
+}
+
+int main()
+{
+    // system parameters
+    LoggingConfig baseConfig;
+    baseConfig.baseFilename = "default";
+    baseConfig.maxAttempts = 5;
+    baseConfig.baseRetryDelay = std::chrono::milliseconds(1);
+    baseConfig.queueCapacity = 3000000;
+    baseConfig.maxExplicitProducers = 32;
+    baseConfig.batchSize = 8192;
+    baseConfig.numWriterThreads = 64;
+    baseConfig.appendTimeout = std::chrono::minutes(2);
+    baseConfig.useEncryption = false;
+    baseConfig.compressionLevel = 0;
+    // benchmark parameters
+    const int numSpecificFiles = 0;
+    const int producerBatchSize = 1024;
+    const int numProducers = 32;
+    const int entriesPerProducer = 1000000;
+    const int payloadSize = 256;
+
+    std::vector<int> segmentSizesMB = {8000, 6000, 4000, 3000, 2000, 1500, 1000, 800, 650, 500, 350, 250, 150, 100, 85, 70, 55, 40, 25, 10};
+
+    runFileRotationComparison(
+        baseConfig,
+        segmentSizesMB,
+        numProducers,
+        entriesPerProducer,
+        numSpecificFiles,
+        producerBatchSize,
+        payloadSize,
+        "file_rotation_benchmark_results.csv");
+
+    return 0;
+}
\ No newline at end of file
diff --git a/archive/2025/summer/bsc_karidas/benchmarks/validation/queue_capacity.cpp b/archive/2025/summer/bsc_karidas/benchmarks/validation/queue_capacity.cpp
new file mode 100644
index 000000000..c3588e3e4
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/benchmarks/validation/queue_capacity.cpp
@@ -0,0 +1,225 @@
+#include "BenchmarkUtils.hpp"
+#include "LoggingManager.hpp"
+#include <iostream>
+#include <fstream>
+#include <thread>
+#include <chrono>
+#include <vector>
+#include <future>
+#include <optional>
+#include <iomanip>
+#include <filesystem>
+
+struct BenchmarkResult
+{
+    double elapsedSeconds;
+    double throughputEntries;
+    double logicalThroughputGiB;
+    double physicalThroughputGiB;
+    double writeAmplification;
+    LatencyStats latencyStats;
+};
+
+BenchmarkResult runQueueCapacityBenchmark(const LoggingConfig &config, int numProducerThreads,
+                                          int entriesPerProducer, int numSpecificFiles, int producerBatchSize, int payloadSize)
+{
+    cleanupLogDirectory(config.basePath);
+
+    std::cout << "Generating batches with pre-determined destinations for all threads...";
+    std::vector<BatchWithDestination> batches = generateBatches(entriesPerProducer, numSpecificFiles, producerBatchSize, payloadSize);
+    std::cout << " Done." << std::endl;
+
+    size_t totalDataSizeBytes = calculateTotalDataSize(batches, numProducerThreads);
+    double totalDataSizeGiB = static_cast<double>(totalDataSizeBytes) / (1024 * 1024 * 1024);
+
+    std::cout << "Total data to be written: " << totalDataSizeBytes << " bytes ("
+              << totalDataSizeGiB << " GiB)" << std::endl;
+
+    LoggingManager loggingManager(config);
+    loggingManager.start();
+    auto startTime = std::chrono::high_resolution_clock::now();
+
+    // Each future now returns a LatencyCollector with thread-local measurements
+    std::vector<std::future<LatencyCollector>> futures;
+    for (int i = 0; i < numProducerThreads; i++)
+    {
+        futures.push_back(std::async(
+            std::launch::async,
+            appendLogEntries,
+            std::ref(loggingManager),
+            std::ref(batches)));
+    }
+
+    // Collect latency measurements from all threads
+    LatencyCollector masterCollector;
+    for (auto &future : futures)
+    {
+        LatencyCollector threadCollector = future.get();
+        masterCollector.merge(threadCollector);
+    }
+
+    loggingManager.stop();
+    auto endTime = std::chrono::high_resolution_clock::now();
+    std::chrono::duration<double> elapsed = endTime - startTime;
+
+    size_t finalStorageSize = calculateDirectorySize(config.basePath);
+    double writeAmplification = static_cast<double>(finalStorageSize) / totalDataSizeBytes;
+
+    double elapsedSeconds = elapsed.count();
+    const size_t totalEntries = numProducerThreads * entriesPerProducer;
+    double throughputEntries = totalEntries / elapsedSeconds;
+    double logicalThroughputGiB = totalDataSizeGiB / elapsedSeconds;
+    double physicalThroughputGiB = static_cast<double>(finalStorageSize) / (1024.0 * 1024.0 * 1024.0 * elapsedSeconds);
+
+    // Calculate latency statistics from merged measurements
+    LatencyStats latencyStats = calculateLatencyStats(masterCollector);
+
+    cleanupLogDirectory(config.basePath);
+
+    return BenchmarkResult{
+        elapsedSeconds,
+        throughputEntries,
+        logicalThroughputGiB,
+        physicalThroughputGiB,
+        writeAmplification,
+        latencyStats};
+}
+
+// Write CSV header
+void writeCSVHeader(std::ofstream &csvFile)
+{
+    csvFile << "queue_capacity,elapsed_seconds,throughput_entries_per_sec,logical_throughput_gib_per_sec,"
+            << "physical_throughput_gib_per_sec,relative_performance,write_amplification,"
+            << "avg_latency_ms,median_latency_ms,max_latency_ms,latency_count\n";
+}
+
+// Write a single result row to CSV
+void writeCSVRow(std::ofstream &csvFile, int queueCapacity, const BenchmarkResult &result, double relativePerf)
+{
+    csvFile << queueCapacity << ","
+            << std::fixed << std::setprecision(6) << result.elapsedSeconds << ","
+            << std::fixed << std::setprecision(2) << result.throughputEntries << ","
+            << std::fixed << std::setprecision(6) << result.logicalThroughputGiB << ","
+            << std::fixed << std::setprecision(6) << result.physicalThroughputGiB << ","
+            << std::fixed << std::setprecision(6) << relativePerf << ","
+            << std::fixed << std::setprecision(8) << result.writeAmplification << ","
+            << std::fixed << std::setprecision(6) << result.latencyStats.avgMs << ","
+            << std::fixed << std::setprecision(6) << result.latencyStats.medianMs << ","
+            << std::fixed << std::setprecision(6) << result.latencyStats.maxMs << ","
+            << result.latencyStats.count << "\n";
+}
+
+void runQueueCapacityComparison(const LoggingConfig &baseConfig, const std::vector<int> &queueSizes,
+                                int numProducerThreads,
+                                int entriesPerProducer, int numSpecificFiles, int producerBatchSize, int payloadSize,
+                                const std::string &csvFilename = "queue_capacity_benchmark.csv")
+{
+    std::vector<BenchmarkResult> results;
+
+    // Open CSV file for writing
+    std::ofstream csvFile(csvFilename);
+    if (!csvFile.is_open())
+    {
+        std::cerr << "Error: Could not open CSV file " << csvFilename << " for writing." << std::endl;
+        return;
+    }
+
+    writeCSVHeader(csvFile);
+
+    std::cout << "Running queue capacity benchmark with " << queueSizes.size() << " data points..." << std::endl;
+    std::cout << "Results will be saved to: " << csvFilename << std::endl;
+
+    for (size_t i = 0; i < queueSizes.size(); i++)
+    {
+        int queueSize = queueSizes[i];
+        std::cout << "\nProgress: " << (i + 1) << "/" << queueSizes.size()
+                  << " - Running benchmark with queue capacity: " << queueSize << "..." << std::endl;
+
+        LoggingConfig runConfig = baseConfig;
+        runConfig.queueCapacity = queueSize;
+        runConfig.basePath = "./logs/queue_" + std::to_string(queueSize);
+
+        BenchmarkResult result = runQueueCapacityBenchmark(
+            runConfig, numProducerThreads,
+            entriesPerProducer, numSpecificFiles, producerBatchSize, payloadSize);
+
+        results.push_back(result);
+
+        // Calculate relative performance (using first result as baseline)
+        double relativePerf = results.size() > 1 ? result.throughputEntries / results[0].throughputEntries : 1.0;
+
+        // Write result to CSV immediately
+        writeCSVRow(csvFile, queueSize, result, relativePerf);
+        csvFile.flush(); // Ensure data is written in case of early termination
+
+        // Print progress summary
+        std::cout << "  Completed: " << std::fixed << std::setprecision(2)
+                  << result.throughputEntries << " entries/s, "
+                  << std::fixed << std::setprecision(3) << result.logicalThroughputGiB << " GiB/s" << std::endl;
+
+        // Add a small delay between runs
+        std::this_thread::sleep_for(std::chrono::seconds(5));
+    }
+
+    csvFile.close();
+    std::cout << "\nBenchmark completed! Results saved to " << csvFilename << std::endl;
+
+    std::cout << "\n=========== QUEUE CAPACITY BENCHMARK SUMMARY ===========" << std::endl;
+    std::cout << std::left << std::setw(15) << "Queue Capacity"
+              << std::setw(15) << "Time (sec)"
+              << std::setw(20) << "Throughput (ent/s)"
+              << std::setw(15) << "Logical (GiB/s)"
+              << std::setw(15) << "Physical (GiB/s)"
+              << std::setw(15) << "Write Amp."
+              << std::setw(12) << "Rel. Perf"
+              << std::setw(12) << "Avg Lat(ms)" << std::endl;
+    std::cout << "--------------------------------------------------------------------------------------------------------------------------------" << std::endl;
+
+    for (size_t i = 0; i < queueSizes.size(); i++)
+    {
+        double relativePerf = results[i].throughputEntries / results[0].throughputEntries; // Relative to smallest queue
+        std::cout << std::left << std::setw(15) << queueSizes[i]
+                  << std::setw(15) << std::fixed << std::setprecision(2) << results[i].elapsedSeconds
+                  << std::setw(20) << std::fixed << std::setprecision(2) << results[i].throughputEntries
+                  << std::setw(15) << std::fixed << std::setprecision(3) << results[i].logicalThroughputGiB
+                  << std::setw(15) << std::fixed << std::setprecision(3) << results[i].physicalThroughputGiB
+                  << std::setw(15) << std::fixed << std::setprecision(4) << results[i].writeAmplification
+                  << std::setw(12) << std::fixed << std::setprecision(2) << relativePerf
+                  << std::setw(12) << std::fixed << std::setprecision(3) << results[i].latencyStats.avgMs << std::endl;
+    }
+    std::cout << "================================================================================================================================" << std::endl;
+}
+
+int main()
+{
+    // system parameters
+    LoggingConfig baseConfig;
+    baseConfig.baseFilename = "default";
+    baseConfig.maxSegmentSize = 50 * 1024 * 1024; // 50 MB
+    baseConfig.maxAttempts = 5;
+    baseConfig.baseRetryDelay = std::chrono::milliseconds(1);
+    baseConfig.batchSize = 8192;
+    baseConfig.maxExplicitProducers = 32;
+    baseConfig.numWriterThreads = 32;
+    baseConfig.appendTimeout = std::chrono::minutes(2);
+    baseConfig.useEncryption = true;
+    baseConfig.compressionLevel = 9;
+    baseConfig.maxOpenFiles = 512;
+    // benchmark parameters
+    const int numSpecificFiles = 256;
+    const int producerBatchSize = 2048;
+    const int numProducers = 32;
+    const int entriesPerProducer = 2000000;
+    const int payloadSize = 2048;
+
+    std::vector<int> queueSizes = {8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608, 16777216, 33554432};
+    runQueueCapacityComparison(baseConfig, queueSizes,
+                               numProducers,
+                               entriesPerProducer,
+                               numSpecificFiles,
+                               producerBatchSize,
+                               payloadSize,
+                               "queue_capacity_benchmark_results.csv");
+
+    return 0;
+}
\ No newline at end of file
diff --git a/archive/2025/summer/bsc_karidas/benchmarks/validation/scaling_concurrency.cpp b/archive/2025/summer/bsc_karidas/benchmarks/validation/scaling_concurrency.cpp
new file mode 100644
index 000000000..ae14aa969
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/benchmarks/validation/scaling_concurrency.cpp
@@ -0,0 +1,248 @@
+#include "BenchmarkUtils.hpp"
+#include "LoggingManager.hpp"
+#include <iostream>
+#include <fstream>
+#include <thread>
+#include <chrono>
+#include <vector>
+#include <future>
+#include <optional>
+#include <iomanip>
+#include <filesystem>
+
+struct BenchmarkResult
+{
+    double executionTime;
+    double throughputEntries;
+    double logicalThroughputGiB;
+    double physicalThroughputGiB;
+    size_t inputDataSizeBytes;
+    size_t outputDataSizeBytes;
+    double writeAmplification;
+    LatencyStats latencyStats;
+};
+
+BenchmarkResult runBenchmark(const LoggingConfig &baseConfig, int numWriterThreads, int numProducerThreads,
+                             int entriesPerProducer, int numSpecificFiles, int producerBatchSize, int payloadSize)
+{
+    LoggingConfig config = baseConfig;
+    config.basePath = "./logs_writers";
+    config.numWriterThreads = numWriterThreads;
+    config.maxExplicitProducers = numProducerThreads;
+
+    cleanupLogDirectory(config.basePath);
+
+    std::cout << "Generating batches with pre-determined destinations for all threads...";
+    std::vector<BatchWithDestination> batches = generateBatches(entriesPerProducer, numSpecificFiles, producerBatchSize, payloadSize);
+    std::cout << " Done." << std::endl;
+
+    size_t totalDataSizeBytes = calculateTotalDataSize(batches, numProducerThreads);
+    double totalDataSizeGiB = static_cast<double>(totalDataSizeBytes) / (1024 * 1024 * 1024);
+    std::cout << "Total data to be written: " << totalDataSizeBytes << " bytes ("
+              << totalDataSizeGiB << " GiB)" << std::endl;
+
+    LoggingManager loggingManager(config);
+    loggingManager.start();
+    auto startTime = std::chrono::high_resolution_clock::now();
+
+    // Each future now returns a LatencyCollector with thread-local measurements
+    std::vector<std::future<LatencyCollector>> futures;
+    for (int i = 0; i < numProducerThreads; i++)
+    {
+        futures.push_back(std::async(
+            std::launch::async,
+            appendLogEntries,
+            std::ref(loggingManager),
+            std::ref(batches)));
+    }
+
+    // Collect latency measurements from all threads
+    LatencyCollector masterCollector;
+    for (auto &future : futures)
+    {
+        LatencyCollector threadCollector = future.get();
+        masterCollector.merge(threadCollector);
+    }
+
+    loggingManager.stop();
+    auto endTime = std::chrono::high_resolution_clock::now();
+    std::chrono::duration<double> elapsed = endTime - startTime;
+
+    size_t finalStorageSize = calculateDirectorySize(config.basePath);
+    double writeAmplification = static_cast<double>(finalStorageSize) / totalDataSizeBytes;
+
+    double elapsedSeconds = elapsed.count();
+    const size_t totalEntries = numProducerThreads * entriesPerProducer;
+    double throughputEntries = totalEntries / elapsedSeconds;
+    double logicalThroughputGiB = totalDataSizeGiB / elapsedSeconds;
+    double physicalThroughputGiB = static_cast<double>(finalStorageSize) / (1024.0 * 1024.0 * 1024.0 * elapsedSeconds);
+
+    // Calculate latency statistics from merged measurements
+    LatencyStats latencyStats = calculateLatencyStats(masterCollector);
+
+    cleanupLogDirectory(config.basePath);
+
+    return BenchmarkResult{
+        elapsedSeconds,
+        throughputEntries,
+        logicalThroughputGiB,
+        physicalThroughputGiB,
+        totalDataSizeBytes,
+        finalStorageSize,
+        writeAmplification,
+        latencyStats};
+}
+
+// Write CSV header
+void writeCSVHeader(std::ofstream &csvFile)
+{
+    csvFile << "writer_threads,producer_threads,execution_time_seconds,throughput_entries_per_sec,logical_throughput_gib_per_sec,"
+            << "physical_throughput_gib_per_sec,input_data_size_bytes,output_data_size_bytes,scaling_efficiency,"
+            << "write_amplification,avg_latency_ms,median_latency_ms,max_latency_ms,latency_count\n";
+}
+
+// Write a single result row to CSV
+void writeCSVRow(std::ofstream &csvFile, int writerThreads, int producerThreads, const BenchmarkResult &result, double scalingEfficiency)
+{
+    csvFile << writerThreads << ","
+            << producerThreads << ","
+            << std::fixed << std::setprecision(6) << result.executionTime << ","
+            << std::fixed << std::setprecision(2) << result.throughputEntries << ","
+            << std::fixed << std::setprecision(6) << result.logicalThroughputGiB << ","
+            << std::fixed << std::setprecision(6) << result.physicalThroughputGiB << ","
+            << result.inputDataSizeBytes << ","
+            << result.outputDataSizeBytes << ","
+            << std::fixed << std::setprecision(6) << scalingEfficiency << ","
+            << std::fixed << std::setprecision(8) << result.writeAmplification << ","
+            << std::fixed << std::setprecision(6) << result.latencyStats.avgMs << ","
+            << std::fixed << std::setprecision(6) << result.latencyStats.medianMs << ","
+            << std::fixed << std::setprecision(6) << result.latencyStats.maxMs << ","
+            << result.latencyStats.count << "\n";
+}
+
+void runScalabilityBenchmark(const LoggingConfig &baseConfig, const std::vector<int> &writerThreadCounts,
+                             int baseProducerThreads, int baseEntriesPerProducer,
+                             int numSpecificFiles, int producerBatchSize, int payloadSize,
+                             const std::string &csvFilename = "scaling_concurrency_benchmark.csv")
+{
+    std::vector<BenchmarkResult> results;
+    std::vector<int> producerThreadCounts;
+
+    // Open CSV file for writing
+    std::ofstream csvFile(csvFilename);
+    if (!csvFile.is_open())
+    {
+        std::cerr << "Error: Could not open CSV file " << csvFilename << " for writing." << std::endl;
+        return;
+    }
+
+    writeCSVHeader(csvFile);
+
+    std::cout << "Running scaling concurrency benchmark with " << writerThreadCounts.size() << " data points..." << std::endl;
+    std::cout << "Results will be saved to: " << csvFilename << std::endl;
+
+    for (size_t i = 0; i < writerThreadCounts.size(); i++)
+    {
+        int writerCount = writerThreadCounts[i];
+        std::cout << "\nProgress: " << (i + 1) << "/" << writerThreadCounts.size()
+                  << " - Running scalability benchmark with " << writerCount << " writer thread(s)..." << std::endl;
+
+        // Option 1: Scale producer threads, keeping entries per producer constant
+        int scaledProducers = baseProducerThreads * writerCount;
+        int entriesPerProducer = baseEntriesPerProducer;
+        producerThreadCounts.push_back(scaledProducers);
+
+        std::cout << "Scaled workload: " << scaledProducers << " producers, "
+                  << entriesPerProducer << " entries per producer" << std::endl;
+
+        BenchmarkResult result = runBenchmark(baseConfig, writerCount, scaledProducers, entriesPerProducer,
+                                              numSpecificFiles, producerBatchSize, payloadSize);
+
+        results.push_back(result);
+
+        // Calculate scaling efficiency (normalized by expected linear scaling)
+        double scalingEfficiency = results.size() > 1 ? (result.throughputEntries / results[0].throughputEntries) / writerCount : 1.0;
+
+        // Write result to CSV immediately
+        writeCSVRow(csvFile, writerCount, scaledProducers, result, scalingEfficiency);
+        csvFile.flush(); // Ensure data is written in case of early termination
+
+        // Print progress summary
+        std::cout << "  Completed: " << std::fixed << std::setprecision(2)
+                  << result.throughputEntries << " entries/s, "
+                  << std::fixed << std::setprecision(3) << result.logicalThroughputGiB << " GiB/s, "
+                  << std::fixed << std::setprecision(2) << scalingEfficiency << " scaling efficiency" << std::endl;
+    }
+
+    csvFile.close();
+    std::cout << "\nBenchmark completed! Results saved to " << csvFilename << std::endl;
+
+    // Still print summary table to console for immediate review
+    std::cout << "\n=================== SCALABILITY BENCHMARK SUMMARY ===================" << std::endl;
+    std::cout << std::left << std::setw(20) << "Writer Threads"
+              << std::setw(20) << "Producer Threads"
+              << std::setw(15) << "Time (sec)"
+              << std::setw(20) << "Throughput (ent/s)"
+              << std::setw(15) << "Logical (GiB/s)"
+              << std::setw(15) << "Physical (GiB/s)"
+              << std::setw(20) << "Input Size (bytes)"
+              << std::setw(20) << "Storage Size (bytes)"
+              << std::setw(15) << "Write Amp."
+              << std::setw(12) << "Rel. Perf."
+              << std::setw(12) << "Avg Lat(ms)" << std::endl;
+    std::cout << "--------------------------------------------------------------------------------------------------------------------------------" << std::endl;
+
+    double baselineThroughput = results[0].throughputEntries;
+
+    for (size_t i = 0; i < writerThreadCounts.size(); i++)
+    {
+        double relativePerformance = results[i].throughputEntries / (baselineThroughput * writerThreadCounts[i]);
+
+        std::cout << std::left << std::setw(20) << writerThreadCounts[i]
+                  << std::setw(20) << producerThreadCounts[i]
+                  << std::setw(15) << std::fixed << std::setprecision(2) << results[i].executionTime
+                  << std::setw(20) << std::fixed << std::setprecision(2) << results[i].throughputEntries
+                  << std::setw(15) << std::fixed << std::setprecision(3) << results[i].logicalThroughputGiB
+                  << std::setw(15) << std::fixed << std::setprecision(3) << results[i].physicalThroughputGiB
+                  << std::setw(20) << results[i].inputDataSizeBytes
+                  << std::setw(20) << results[i].outputDataSizeBytes
+                  << std::setw(15) << std::fixed << std::setprecision(4) << results[i].writeAmplification
+                  << std::setw(12) << std::fixed << std::setprecision(2) << relativePerformance
+                  << std::setw(12) << std::fixed << std::setprecision(3) << results[i].latencyStats.avgMs << std::endl;
+    }
+    std::cout << "================================================================================================================================" << std::endl;
+}
+
+int main()
+{
+    // system parameters
+    LoggingConfig baseConfig;
+    baseConfig.baseFilename = "default";
+    baseConfig.maxSegmentSize = 250 * 1024 * 1024; // 250 MB
+    baseConfig.maxAttempts = 5;
+    baseConfig.baseRetryDelay = std::chrono::milliseconds(1);
+    baseConfig.queueCapacity = 3000000;
+    baseConfig.batchSize = 8192;
+    baseConfig.appendTimeout = std::chrono::minutes(5);
+    baseConfig.useEncryption = true;
+    baseConfig.compressionLevel = 9;
+    // benchmark parameters
+    const int numSpecificFiles = 256;
+    const int producerBatchSize = 512;
+    const int baseProducerThreads = 1;
+    const int baseEntriesPerProducer = 4000000;
+    const int payloadSize = 2048;
+
+    std::vector<int> writerThreadCounts = {1, 2, 4, 8, 12, 16, 20, 24, 28, 32, 40, 48, 56, 64};
+
+    runScalabilityBenchmark(baseConfig,
+                            writerThreadCounts,
+                            baseProducerThreads,
+                            baseEntriesPerProducer,
+                            numSpecificFiles,
+                            producerBatchSize,
+                            payloadSize,
+                            "scaling_concurrency_benchmark_results.csv");
+
+    return 0;
+}
\ No newline at end of file
diff --git a/archive/2025/summer/bsc_karidas/benchmarks/workloads/compression_ratio.cpp b/archive/2025/summer/bsc_karidas/benchmarks/workloads/compression_ratio.cpp
new file mode 100644
index 000000000..a8ba49433
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/benchmarks/workloads/compression_ratio.cpp
@@ -0,0 +1,62 @@
+#include "BenchmarkUtils.hpp"
+#include "Compression.hpp"
+#include "LogEntry.hpp"
+#include <chrono>
+#include <cstdint>
+#include <iomanip>
+#include <iostream>
+#include <random>
+#include <vector>
+
+struct Result
+{
+    int level;
+    size_t uncompressedSize;
+    size_t compressedSize;
+    double compressionRatio;
+    long long durationMs;
+};
+
+int main()
+{
+    constexpr size_t batchSize = 1000;
+    const std::vector<int> compressionLevels = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+    std::vector<Result> results;
+
+    for (int level : compressionLevels)
+    {
+        // Generate one batch with batchSize entries, no specific destinations
+        std::vector<BatchWithDestination> batches = generateBatches(batchSize, 0, batchSize, 4096);
+        std::vector<LogEntry> entries = std::move(batches[0].first);
+
+        // Serialize the entries
+        std::vector<uint8_t> serializedEntries = LogEntry::serializeBatch(std::move(entries));
+        size_t uncompressedSize = serializedEntries.size();
+
+        // Measure compression time
+        auto start = std::chrono::high_resolution_clock::now();
+        std::vector<uint8_t> compressed = Compression::compress(std::move(serializedEntries), level);
+        auto end = std::chrono::high_resolution_clock::now();
+
+        size_t compressedSize = compressed.size();
+        double compressionRatio = static_cast<double>(uncompressedSize) / compressedSize;
+        auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
+
+        results.push_back({level, uncompressedSize, compressedSize, compressionRatio, duration});
+    }
+
+    // Print results
+    std::cout << std::fixed << std::setprecision(2);
+    std::cout << "Level | Uncompressed (B) | Compressed (B) | Ratio | Time (ms)\n";
+    std::cout << "------|------------------|----------------|-------|----------\n";
+    for (const auto &r : results)
+    {
+        std::cout << std::setw(5) << r.level << " | "
+                  << std::setw(16) << r.uncompressedSize << " | "
+                  << std::setw(14) << r.compressedSize << " | "
+                  << std::setw(5) << r.compressionRatio << " | "
+                  << std::setw(9) << r.durationMs << "\n";
+    }
+
+    return 0;
+}
\ No newline at end of file
diff --git a/archive/2025/summer/bsc_karidas/benchmarks/workloads/diverse_filepaths.cpp b/archive/2025/summer/bsc_karidas/benchmarks/workloads/diverse_filepaths.cpp
new file mode 100644
index 000000000..c5fda742d
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/benchmarks/workloads/diverse_filepaths.cpp
@@ -0,0 +1,248 @@
+#include "BenchmarkUtils.hpp"
+#include "LoggingManager.hpp"
+#include <iostream>
+#include <fstream>
+#include <thread>
+#include <chrono>
+#include <vector>
+#include <future>
+#include <optional>
+#include <iomanip>
+#include <filesystem>
+
+struct BenchmarkResult
+{
+    double elapsedSeconds;
+    double throughputEntries;
+    double logicalThroughputGiB;
+    double physicalThroughputGiB;
+    double writeAmplification;
+    LatencyStats latencyStats;
+};
+
+BenchmarkResult runFilepathDiversityBenchmark(const LoggingConfig &config, int numSpecificFiles, int numProducerThreads,
+                                              int entriesPerProducer, int producerBatchSize, int payloadSize)
+{
+    LoggingConfig runConfig = config;
+    runConfig.basePath = "./logs/files_" + std::to_string(numSpecificFiles);
+
+    cleanupLogDirectory(runConfig.basePath);
+
+    std::cout << "Generating batches with " << numSpecificFiles << " specific files for all threads...";
+    std::vector<BatchWithDestination> batches = generateBatches(entriesPerProducer, numSpecificFiles, producerBatchSize, payloadSize);
+    std::cout << " Done." << std::endl;
+    size_t totalDataSizeBytes = calculateTotalDataSize(batches, numProducerThreads);
+    double totalDataSizeGiB = static_cast<double>(totalDataSizeBytes) / (1024 * 1024 * 1024);
+    std::cout << "Total data to be written: " << totalDataSizeBytes << " bytes ("
+              << totalDataSizeGiB << " GiB)" << std::endl;
+
+    LoggingManager loggingManager(runConfig);
+    loggingManager.start();
+
+    auto startTime = std::chrono::high_resolution_clock::now();
+
+    // Each future now returns a LatencyCollector with thread-local measurements
+    std::vector<std::future<LatencyCollector>> futures;
+    for (int i = 0; i < numProducerThreads; i++)
+    {
+        futures.push_back(std::async(
+            std::launch::async,
+            appendLogEntries,
+            std::ref(loggingManager),
+            std::ref(batches)));
+    }
+
+    // Collect latency measurements from all threads
+    LatencyCollector masterCollector;
+    for (auto &future : futures)
+    {
+        LatencyCollector threadCollector = future.get();
+        masterCollector.merge(threadCollector);
+    }
+
+    loggingManager.stop();
+    auto endTime = std::chrono::high_resolution_clock::now();
+    std::chrono::duration<double> elapsed = endTime - startTime;
+
+    size_t finalStorageSize = calculateDirectorySize(runConfig.basePath);
+    double writeAmplification = static_cast<double>(finalStorageSize) / totalDataSizeBytes;
+
+    double elapsedSeconds = elapsed.count();
+    const size_t totalEntries = numProducerThreads * entriesPerProducer;
+    double throughputEntries = totalEntries / elapsedSeconds;
+    double logicalThroughputGiB = totalDataSizeGiB / elapsedSeconds;
+    double physicalThroughputGiB = static_cast<double>(finalStorageSize) / (1024.0 * 1024.0 * 1024.0 * elapsedSeconds);
+
+    // Calculate latency statistics from merged measurements
+    LatencyStats latencyStats = calculateLatencyStats(masterCollector);
+
+    cleanupLogDirectory(runConfig.basePath);
+
+    return BenchmarkResult{
+        elapsedSeconds,
+        throughputEntries,
+        logicalThroughputGiB,
+        physicalThroughputGiB,
+        writeAmplification,
+        latencyStats};
+}
+
+// Write CSV header
+void writeCSVHeader(std::ofstream &csvFile)
+{
+    csvFile << "num_specific_files,configuration_description,elapsed_seconds,throughput_entries_per_sec,logical_throughput_gib_per_sec,"
+            << "physical_throughput_gib_per_sec,relative_performance,write_amplification,"
+            << "avg_latency_ms,median_latency_ms,max_latency_ms,latency_count\n";
+}
+
+// Write a single result row to CSV
+void writeCSVRow(std::ofstream &csvFile, int numSpecificFiles, const std::string &description, const BenchmarkResult &result, double relativePerf)
+{
+    csvFile << numSpecificFiles << ","
+            << "\"" << description << "\"," // Quote the description in case it contains commas
+            << std::fixed << std::setprecision(6) << result.elapsedSeconds << ","
+            << std::fixed << std::setprecision(2) << result.throughputEntries << ","
+            << std::fixed << std::setprecision(6) << result.logicalThroughputGiB << ","
+            << std::fixed << std::setprecision(6) << result.physicalThroughputGiB << ","
+            << std::fixed << std::setprecision(6) << relativePerf << ","
+            << std::fixed << std::setprecision(8) << result.writeAmplification << ","
+            << std::fixed << std::setprecision(6) << result.latencyStats.avgMs << ","
+            << std::fixed << std::setprecision(6) << result.latencyStats.medianMs << ","
+            << std::fixed << std::setprecision(6) << result.latencyStats.maxMs << ","
+            << result.latencyStats.count << "\n";
+}
+
+void runFilepathDiversityComparison(const LoggingConfig &config, const std::vector<int> &numFilesVariants,
+                                    int numProducerThreads, int entriesPerProducer, int producerBatchSize, int payloadSize,
+                                    const std::string &csvFilename = "diverse_filepaths_benchmark.csv")
+{
+    std::vector<BenchmarkResult> results;
+    std::vector<std::string> descriptions;
+
+    // Generate descriptions for each file count variant
+    for (int fileCount : numFilesVariants)
+    {
+        if (fileCount == 0)
+        {
+            descriptions.push_back("Default file only");
+        }
+        else if (fileCount == 1)
+        {
+            descriptions.push_back("1 specific file");
+        }
+        else
+        {
+            descriptions.push_back(std::to_string(fileCount) + " specific files");
+        }
+    }
+
+    // Open CSV file for writing
+    std::ofstream csvFile(csvFilename);
+    if (!csvFile.is_open())
+    {
+        std::cerr << "Error: Could not open CSV file " << csvFilename << " for writing." << std::endl;
+        return;
+    }
+
+    writeCSVHeader(csvFile);
+
+    std::cout << "Running filepath diversity benchmark with " << numFilesVariants.size() << " data points..." << std::endl;
+    std::cout << "Results will be saved to: " << csvFilename << std::endl;
+
+    for (size_t i = 0; i < numFilesVariants.size(); i++)
+    {
+        int fileCount = numFilesVariants[i];
+        std::cout << "\nProgress: " << (i + 1) << "/" << numFilesVariants.size()
+                  << " - Running benchmark with " << descriptions[i] << "..." << std::endl;
+
+        BenchmarkResult result = runFilepathDiversityBenchmark(
+            config,
+            fileCount,
+            numProducerThreads, entriesPerProducer, producerBatchSize, payloadSize);
+
+        results.push_back(result);
+
+        // Calculate relative performance (using first result as baseline)
+        double relativePerf = results.size() > 1 ? result.throughputEntries / results[0].throughputEntries : 1.0;
+
+        // Write result to CSV immediately
+        writeCSVRow(csvFile, fileCount, descriptions[i], result, relativePerf);
+        csvFile.flush(); // Ensure data is written in case of early termination
+
+        // Print progress summary
+        std::cout << "  Completed: " << std::fixed << std::setprecision(2)
+                  << result.throughputEntries << " entries/s, "
+                  << std::fixed << std::setprecision(3) << result.logicalThroughputGiB << " GiB/s, "
+                  << std::fixed << std::setprecision(2) << relativePerf << "x relative performance" << std::endl;
+
+        // Add a small delay between runs
+        std::this_thread::sleep_for(std::chrono::seconds(5));
+    }
+
+    csvFile.close();
+    std::cout << "\nBenchmark completed! Results saved to " << csvFilename << std::endl;
+
+    // Still print summary table to console for immediate review
+    std::cout << "\n=========== FILEPATH DIVERSITY BENCHMARK SUMMARY ===========" << std::endl;
+    std::cout << std::left << std::setw(25) << "Configuration"
+              << std::setw(15) << "Time (sec)"
+              << std::setw(20) << "Throughput (ent/s)"
+              << std::setw(15) << "Logical (GiB/s)"
+              << std::setw(15) << "Physical (GiB/s)"
+              << std::setw(15) << "Write Amp."
+              << std::setw(12) << "Rel. Perf"
+              << std::setw(12) << "Avg Lat(ms)" << std::endl;
+    std::cout << "--------------------------------------------------------------------------------------------------------------------------------" << std::endl;
+
+    // Calculate base throughput for relative performance
+    double baseThroughputEntries = results[0].throughputEntries;
+
+    for (size_t i = 0; i < numFilesVariants.size(); i++)
+    {
+        double relativePerf = results[i].throughputEntries / baseThroughputEntries;
+        std::cout << std::left << std::setw(25) << descriptions[i]
+                  << std::setw(15) << std::fixed << std::setprecision(2) << results[i].elapsedSeconds
+                  << std::setw(20) << std::fixed << std::setprecision(2) << results[i].throughputEntries
+                  << std::setw(15) << std::fixed << std::setprecision(3) << results[i].logicalThroughputGiB
+                  << std::setw(15) << std::fixed << std::setprecision(3) << results[i].physicalThroughputGiB
+                  << std::setw(15) << std::fixed << std::setprecision(4) << results[i].writeAmplification
+                  << std::setw(12) << std::fixed << std::setprecision(2) << relativePerf
+                  << std::setw(12) << std::fixed << std::setprecision(3) << results[i].latencyStats.avgMs << std::endl;
+    }
+    std::cout << "======================================================================================================================================" << std::endl;
+}
+
+int main()
+{
+    // system parameters
+    LoggingConfig config;
+    config.baseFilename = "default";
+    config.maxSegmentSize = static_cast<size_t>(1000) * 1024 * 1024; // 1 GB
+    config.maxAttempts = 10;
+    config.baseRetryDelay = std::chrono::milliseconds(2);
+    config.queueCapacity = 3000000;
+    config.maxExplicitProducers = 32;
+    config.batchSize = 8192;
+    config.numWriterThreads = 64;
+    config.appendTimeout = std::chrono::minutes(2);
+    config.useEncryption = true;
+    config.compressionLevel = 9;
+    config.maxOpenFiles = 256;
+    // benchmark parameters
+    const int producerBatchSize = 8192;
+    const int numProducers = 32;
+    const int entriesPerProducer = 2000000;
+    const int payloadSize = 2048;
+
+    std::vector<int> numFilesVariants = {0, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192};
+
+    runFilepathDiversityComparison(config,
+                                   numFilesVariants,
+                                   numProducers,
+                                   entriesPerProducer,
+                                   producerBatchSize,
+                                   payloadSize,
+                                   "diverse_filepaths_benchmark_results.csv");
+
+    return 0;
+}
\ No newline at end of file
diff --git a/archive/2025/summer/bsc_karidas/benchmarks/workloads/large_batches.cpp b/archive/2025/summer/bsc_karidas/benchmarks/workloads/large_batches.cpp
new file mode 100644
index 000000000..8dd16028c
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/benchmarks/workloads/large_batches.cpp
@@ -0,0 +1,102 @@
+#include "BenchmarkUtils.hpp"
+#include "LoggingManager.hpp"
+#include <iostream>
+#include <thread>
+#include <chrono>
+#include <vector>
+#include <future>
+#include <optional>
+#include <filesystem>
+#include <numeric>
+
+int main()
+{
+    // system parameters
+    LoggingConfig config;
+    config.basePath = "./logs";
+    config.baseFilename = "default";
+    config.maxSegmentSize = 50 * 1024 * 1024; // 50 MB
+    config.maxAttempts = 5;
+    config.baseRetryDelay = std::chrono::milliseconds(1);
+    config.queueCapacity = 3000000;
+    config.maxExplicitProducers = 4;
+    config.batchSize = 8400;
+    config.numWriterThreads = 32;
+    config.appendTimeout = std::chrono::minutes(2);
+    config.useEncryption = true;
+    config.compressionLevel = 9;
+    // benchmark parameters
+    const int numProducerThreads = 4;
+    const int entriesPerProducer = 30000000;
+    const int numSpecificFiles = 25;
+    const int producerBatchSize = 1000;
+    const int payloadSize = 2048;
+
+    cleanupLogDirectory(config.basePath);
+
+    std::cout << "Generating batches with pre-determined destinations for all threads...";
+    std::vector<BatchWithDestination> batches = generateBatches(entriesPerProducer, numSpecificFiles, producerBatchSize, payloadSize);
+    std::cout << " Done." << std::endl;
+    size_t totalDataSizeBytes = calculateTotalDataSize(batches, numProducerThreads);
+    double totalDataSizeGiB = static_cast<double>(totalDataSizeBytes) / (1024 * 1024 * 1024);
+    std::cout << "Total data to be written: " << totalDataSizeBytes << " bytes (" << totalDataSizeGiB << " GiB)" << std::endl;
+
+    LoggingManager loggingManager(config);
+    loggingManager.start();
+    auto startTime = std::chrono::high_resolution_clock::now();
+
+    // Each future now returns a LatencyCollector with thread-local measurements
+    std::vector<std::future<LatencyCollector>> futures;
+    for (int i = 0; i < numProducerThreads; i++)
+    {
+        futures.push_back(std::async(
+            std::launch::async,
+            appendLogEntries,
+            std::ref(loggingManager),
+            std::ref(batches)));
+    }
+
+    // Collect latency measurements from all threads
+    LatencyCollector masterCollector;
+    for (auto &future : futures)
+    {
+        LatencyCollector threadCollector = future.get();
+        masterCollector.merge(threadCollector);
+    }
+
+    loggingManager.stop();
+    auto endTime = std::chrono::high_resolution_clock::now();
+    std::chrono::duration<double> elapsed = endTime - startTime;
+
+    size_t finalStorageSize = calculateDirectorySize(config.basePath);
+    double finalStorageSizeGiB = static_cast<double>(finalStorageSize) / (1024 * 1024 * 1024);
+    double writeAmplification = static_cast<double>(finalStorageSize) / totalDataSizeBytes;
+
+    double elapsedSeconds = elapsed.count();
+    const size_t totalEntries = numProducerThreads * entriesPerProducer;
+    double entriesThroughput = totalEntries / elapsedSeconds;
+    double logicalThroughputGiB = totalDataSizeGiB / elapsedSeconds;
+    double physicalThroughputGiB = finalStorageSizeGiB / elapsedSeconds;
+    double averageEntrySize = static_cast<double>(totalDataSizeBytes) / totalEntries;
+
+    // Calculate latency statistics from merged measurements
+    auto latencyStats = calculateLatencyStats(masterCollector);
+
+    cleanupLogDirectory(config.basePath);
+
+    std::cout << "============== Benchmark Results ==============" << std::endl;
+    std::cout << "Execution time: " << elapsedSeconds << " seconds" << std::endl;
+    std::cout << "Total entries appended: " << totalEntries << std::endl;
+    std::cout << "Average entry size: " << averageEntrySize << " bytes" << std::endl;
+    std::cout << "Total data written: " << totalDataSizeGiB << " GiB" << std::endl;
+    std::cout << "Final storage size: " << finalStorageSizeGiB << " GiB" << std::endl;
+    std::cout << "Write amplification: " << writeAmplification << " (ratio)" << std::endl;
+    std::cout << "Throughput (entries): " << entriesThroughput << " entries/second" << std::endl;
+    std::cout << "Throughput (logical): " << logicalThroughputGiB << " GiB/second" << std::endl;
+    std::cout << "Throughput (physical): " << physicalThroughputGiB << " GiB/second" << std::endl;
+    std::cout << "===============================================" << std::endl;
+
+    printLatencyStats(latencyStats);
+
+    return 0;
+}
\ No newline at end of file
diff --git a/archive/2025/summer/bsc_karidas/benchmarks/workloads/main.cpp b/archive/2025/summer/bsc_karidas/benchmarks/workloads/main.cpp
new file mode 100644
index 000000000..401db22a0
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/benchmarks/workloads/main.cpp
@@ -0,0 +1,103 @@
+#include "BenchmarkUtils.hpp"
+#include "LoggingManager.hpp"
+#include <iostream>
+#include <thread>
+#include <chrono>
+#include <vector>
+#include <future>
+#include <optional>
+#include <filesystem>
+#include <numeric>
+
+int main()
+{
+    // system parameters
+    LoggingConfig config;
+    config.basePath = "./logs";
+    config.baseFilename = "default";
+    config.maxSegmentSize = 50 * 1024 * 1024; // 50 MB
+    config.maxAttempts = 5;
+    config.baseRetryDelay = std::chrono::milliseconds(1);
+    config.queueCapacity = 3000000;
+    config.maxExplicitProducers = 96;
+    config.batchSize = 8192;
+    config.numWriterThreads = 96;
+    config.appendTimeout = std::chrono::minutes(2);
+    config.useEncryption = true;
+    config.compressionLevel = 9;
+    config.maxOpenFiles = 512;
+    // benchmark parameters
+    const int numSpecificFiles = 1024;
+    const int producerBatchSize = 4096;
+    const int numProducers = 96;
+    const int entriesPerProducer = 800000;
+    const int payloadSize = 4096;
+
+    cleanupLogDirectory(config.basePath);
+
+    std::cout << "Generating batches with pre-determined destinations...";
+    std::vector<BatchWithDestination> batches = generateBatches(entriesPerProducer, numSpecificFiles, producerBatchSize, payloadSize);
+    std::cout << " Done." << std::endl;
+    size_t totalDataSizeBytes = calculateTotalDataSize(batches, numProducers);
+    double totalDataSizeGiB = static_cast<double>(totalDataSizeBytes) / (1024 * 1024 * 1024);
+    std::cout << "Total data to be written: " << totalDataSizeBytes << " bytes (" << totalDataSizeGiB << " GiB)" << std::endl;
+
+    LoggingManager loggingManager(config);
+    loggingManager.start();
+    auto startTime = std::chrono::high_resolution_clock::now();
+
+    // Each future now returns a LatencyCollector with thread-local measurements
+    std::vector<std::future<LatencyCollector>> futures;
+    for (int i = 0; i < numProducers; i++)
+    {
+        futures.push_back(std::async(
+            std::launch::async,
+            appendLogEntries,
+            std::ref(loggingManager),
+            std::ref(batches)));
+    }
+
+    // Collect latency measurements from all threads
+    LatencyCollector masterCollector;
+    for (auto &future : futures)
+    {
+        LatencyCollector threadCollector = future.get();
+        masterCollector.merge(threadCollector);
+    }
+
+    loggingManager.stop();
+    auto endTime = std::chrono::high_resolution_clock::now();
+    std::chrono::duration<double> elapsed = endTime - startTime;
+
+    size_t finalStorageSize = calculateDirectorySize(config.basePath);
+    double finalStorageSizeGiB = static_cast<double>(finalStorageSize) / (1024 * 1024 * 1024);
+    double writeAmplification = static_cast<double>(finalStorageSize) / totalDataSizeBytes;
+
+    double elapsedSeconds = elapsed.count();
+    const size_t totalEntries = numProducers * entriesPerProducer;
+    double entriesThroughput = totalEntries / elapsedSeconds;
+    double logicalThroughputGiB = totalDataSizeGiB / elapsedSeconds;
+    double physicalThroughputGiB = finalStorageSizeGiB / elapsedSeconds;
+    double averageEntrySize = static_cast<double>(totalDataSizeBytes) / totalEntries;
+
+    // Calculate latency statistics from merged measurements
+    auto latencyStats = calculateLatencyStats(masterCollector);
+
+    cleanupLogDirectory(config.basePath);
+
+    std::cout << "============== Benchmark Results ==============" << std::endl;
+    std::cout << "Execution time: " << elapsedSeconds << " seconds" << std::endl;
+    std::cout << "Total entries appended: " << totalEntries << std::endl;
+    std::cout << "Average entry size: " << averageEntrySize << " bytes" << std::endl;
+    std::cout << "Total data written: " << totalDataSizeGiB << " GiB" << std::endl;
+    std::cout << "Final storage size: " << finalStorageSizeGiB << " GiB" << std::endl;
+    std::cout << "Write amplification: " << writeAmplification << " (ratio)" << std::endl;
+    std::cout << "Throughput (entries): " << entriesThroughput << " entries/second" << std::endl;
+    std::cout << "Throughput (logical): " << logicalThroughputGiB << " GiB/second" << std::endl;
+    std::cout << "Throughput (physical): " << physicalThroughputGiB << " GiB/second" << std::endl;
+    std::cout << "===============================================" << std::endl;
+
+    printLatencyStats(latencyStats);
+
+    return 0;
+}
\ No newline at end of file
diff --git a/archive/2025/summer/bsc_karidas/benchmarks/workloads/multi_producer_small_batches.cpp b/archive/2025/summer/bsc_karidas/benchmarks/workloads/multi_producer_small_batches.cpp
new file mode 100644
index 000000000..705ef1523
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/benchmarks/workloads/multi_producer_small_batches.cpp
@@ -0,0 +1,102 @@
+#include "BenchmarkUtils.hpp"
+#include "LoggingManager.hpp"
+#include <iostream>
+#include <thread>
+#include <chrono>
+#include <vector>
+#include <future>
+#include <optional>
+#include <filesystem>
+#include <numeric>
+
+int main()
+{
+    // system parameters
+    LoggingConfig config;
+    config.basePath = "./logs";
+    config.baseFilename = "default";
+    config.maxSegmentSize = 50 * 1024 * 1024; // 50 MB
+    config.maxAttempts = 5;
+    config.baseRetryDelay = std::chrono::milliseconds(1);
+    config.queueCapacity = 3000000;
+    config.maxExplicitProducers = 64;
+    config.batchSize = 8400;
+    config.numWriterThreads = 32;
+    config.appendTimeout = std::chrono::minutes(2);
+    config.useEncryption = true;
+    config.compressionLevel = 9;
+    // benchmark parameters
+    const int numProducerThreads = 64;
+    const int entriesPerProducer = 100000;
+    const int numSpecificFiles = 25;
+    const int producerBatchSize = 50;
+    const int payloadSize = 2048;
+
+    cleanupLogDirectory(config.basePath);
+
+    std::cout << "Generating batches with pre-determined destinations for all threads...";
+    std::vector<BatchWithDestination> batches = generateBatches(entriesPerProducer, numSpecificFiles, producerBatchSize, payloadSize);
+    std::cout << " Done." << std::endl;
+    size_t totalDataSizeBytes = calculateTotalDataSize(batches, numProducerThreads);
+    double totalDataSizeGiB = static_cast<double>(totalDataSizeBytes) / (1024 * 1024 * 1024);
+    std::cout << "Total data to be written: " << totalDataSizeBytes << " bytes (" << totalDataSizeGiB << " GiB)" << std::endl;
+
+    LoggingManager loggingManager(config);
+    loggingManager.start();
+    auto startTime = std::chrono::high_resolution_clock::now();
+
+    // Each future now returns a LatencyCollector with thread-local measurements
+    std::vector<std::future<LatencyCollector>> futures;
+    for (int i = 0; i < numProducerThreads; i++)
+    {
+        futures.push_back(std::async(
+            std::launch::async,
+            appendLogEntries,
+            std::ref(loggingManager),
+            std::ref(batches)));
+    }
+
+    // Collect latency measurements from all threads
+    LatencyCollector masterCollector;
+    for (auto &future : futures)
+    {
+        LatencyCollector threadCollector = future.get();
+        masterCollector.merge(threadCollector);
+    }
+
+    loggingManager.stop();
+    auto endTime = std::chrono::high_resolution_clock::now();
+    std::chrono::duration<double> elapsed = endTime - startTime;
+
+    size_t finalStorageSize = calculateDirectorySize(config.basePath);
+    double finalStorageSizeGiB = static_cast<double>(finalStorageSize) / (1024 * 1024 * 1024);
+    double writeAmplification = static_cast<double>(finalStorageSize) / totalDataSizeBytes;
+
+    double elapsedSeconds = elapsed.count();
+    const size_t totalEntries = numProducerThreads * entriesPerProducer;
+    double entriesThroughput = totalEntries / elapsedSeconds;
+    double logicalThroughputGiB = totalDataSizeGiB / elapsedSeconds;
+    double physicalThroughputGiB = finalStorageSizeGiB / elapsedSeconds;
+    double averageEntrySize = static_cast<double>(totalDataSizeBytes) / totalEntries;
+
+    // Calculate latency statistics from merged measurements
+    auto latencyStats = calculateLatencyStats(masterCollector);
+
+    cleanupLogDirectory(config.basePath);
+
+    std::cout << "============== Benchmark Results ==============" << std::endl;
+    std::cout << "Execution time: " << elapsedSeconds << " seconds" << std::endl;
+    std::cout << "Total entries appended: " << totalEntries << std::endl;
+    std::cout << "Average entry size: " << averageEntrySize << " bytes" << std::endl;
+    std::cout << "Total data written: " << totalDataSizeGiB << " GiB" << std::endl;
+    std::cout << "Final storage size: " << finalStorageSizeGiB << " GiB" << std::endl;
+    std::cout << "Write amplification: " << writeAmplification << " (ratio)" << std::endl;
+    std::cout << "Throughput (entries): " << entriesThroughput << " entries/second" << std::endl;
+    std::cout << "Throughput (logical): " << logicalThroughputGiB << " GiB/second" << std::endl;
+    std::cout << "Throughput (physical): " << physicalThroughputGiB << " GiB/second" << std::endl;
+    std::cout << "===============================================" << std::endl;
+
+    printLatencyStats(latencyStats);
+
+    return 0;
+}
\ No newline at end of file
diff --git a/archive/2025/summer/bsc_karidas/benchmarks/workloads/single_entry_appends.cpp b/archive/2025/summer/bsc_karidas/benchmarks/workloads/single_entry_appends.cpp
new file mode 100644
index 000000000..0029e1722
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/benchmarks/workloads/single_entry_appends.cpp
@@ -0,0 +1,102 @@
+#include "BenchmarkUtils.hpp"
+#include "LoggingManager.hpp"
+#include <iostream>
+#include <thread>
+#include <chrono>
+#include <vector>
+#include <future>
+#include <optional>
+#include <filesystem>
+#include <numeric>
+
+int main()
+{
+    // system parameters
+    LoggingConfig config;
+    config.basePath = "./logs";
+    config.baseFilename = "default";
+    config.maxSegmentSize = 50 * 1024 * 1024; // 50 MB
+    config.maxAttempts = 5;
+    config.baseRetryDelay = std::chrono::milliseconds(1);
+    config.queueCapacity = 3000000;
+    config.maxExplicitProducers = 64;
+    config.batchSize = 8400;
+    config.numWriterThreads = 32;
+    config.appendTimeout = std::chrono::minutes(2);
+    config.useEncryption = true;
+    config.compressionLevel = 9;
+    // benchmark parameters
+    const int numProducerThreads = 64;
+    const int entriesPerProducer = 25000;
+    const int numSpecificFiles = 25;
+    const int producerBatchSize = 1;
+    const int payloadSize = 2048;
+
+    cleanupLogDirectory(config.basePath);
+
+    std::cout << "Generating batches with pre-determined destinations for all threads...";
+    std::vector<BatchWithDestination> batches = generateBatches(entriesPerProducer, numSpecificFiles, producerBatchSize, payloadSize);
+    std::cout << " Done." << std::endl;
+    size_t totalDataSizeBytes = calculateTotalDataSize(batches, numProducerThreads);
+    double totalDataSizeGiB = static_cast<double>(totalDataSizeBytes) / (1024 * 1024 * 1024);
+    std::cout << "Total data to be written: " << totalDataSizeBytes << " bytes (" << totalDataSizeGiB << " GiB)" << std::endl;
+
+    LoggingManager loggingManager(config);
+    loggingManager.start();
+    auto startTime = std::chrono::high_resolution_clock::now();
+
+    // Each future now returns a LatencyCollector with thread-local measurements
+    std::vector<std::future<LatencyCollector>> futures;
+    for (int i = 0; i < numProducerThreads; i++)
+    {
+        futures.push_back(std::async(
+            std::launch::async,
+            appendLogEntries,
+            std::ref(loggingManager),
+            std::ref(batches)));
+    }
+
+    // Collect latency measurements from all threads
+    LatencyCollector masterCollector;
+    for (auto &future : futures)
+    {
+        LatencyCollector threadCollector = future.get();
+        masterCollector.merge(threadCollector);
+    }
+
+    loggingManager.stop();
+    auto endTime = std::chrono::high_resolution_clock::now();
+    std::chrono::duration<double> elapsed = endTime - startTime;
+
+    size_t finalStorageSize = calculateDirectorySize(config.basePath);
+    double finalStorageSizeGiB = static_cast<double>(finalStorageSize) / (1024 * 1024 * 1024);
+    double writeAmplification = static_cast<double>(finalStorageSize) / totalDataSizeBytes;
+
+    double elapsedSeconds = elapsed.count();
+    const size_t totalEntries = numProducerThreads * entriesPerProducer;
+    double entriesThroughput = totalEntries / elapsedSeconds;
+    double logicalThroughputGiB = totalDataSizeGiB / elapsedSeconds;
+    double physicalThroughputGiB = finalStorageSizeGiB / elapsedSeconds;
+    double averageEntrySize = static_cast<double>(totalDataSizeBytes) / totalEntries;
+
+    // Calculate latency statistics from merged measurements
+    auto latencyStats = calculateLatencyStats(masterCollector);
+
+    cleanupLogDirectory(config.basePath);
+
+    std::cout << "============== Benchmark Results ==============" << std::endl;
+    std::cout << "Execution time: " << elapsedSeconds << " seconds" << std::endl;
+    std::cout << "Total entries appended: " << totalEntries << std::endl;
+    std::cout << "Average entry size: " << averageEntrySize << " bytes" << std::endl;
+    std::cout << "Total data written: " << totalDataSizeGiB << " GiB" << std::endl;
+    std::cout << "Final storage size: " << finalStorageSizeGiB << " GiB" << std::endl;
+    std::cout << "Write amplification: " << writeAmplification << " (ratio)" << std::endl;
+    std::cout << "Throughput (entries): " << entriesThroughput << " entries/second" << std::endl;
+    std::cout << "Throughput (logical): " << logicalThroughputGiB << " GiB/second" << std::endl;
+    std::cout << "Throughput (physical): " << physicalThroughputGiB << " GiB/second" << std::endl;
+    std::cout << "===============================================" << std::endl;
+
+    printLatencyStats(latencyStats);
+
+    return 0;
+}
\ No newline at end of file
diff --git a/archive/2025/summer/bsc_karidas/cmake/benchmarks.cmake b/archive/2025/summer/bsc_karidas/cmake/benchmarks.cmake
new file mode 100644
index 000000000..7f2bb376e
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/cmake/benchmarks.cmake
@@ -0,0 +1,37 @@
+set(BENCHMARK_LIBS
+    PRIVATE
+    GDPR_Logging_lib
+    OpenSSL::SSL
+    OpenSSL::Crypto
+    ZLIB::ZLIB
+)
+
+include_directories(${CMAKE_CURRENT_SOURCE_DIR}/benchmarks)
+
+set(VALIDATION_BENCHMARKS
+    batch_size
+    concurrency
+    scaling_concurrency
+    encryption_compression_usage
+    file_rotation
+    queue_capacity
+)
+
+set(WORKLOAD_BENCHMARKS
+    compression_ratio
+    diverse_filepaths
+    large_batches
+    main
+    multi_producer_small_batches
+    single_entry_appends
+)
+
+foreach(benchmark ${VALIDATION_BENCHMARKS})
+    add_executable(${benchmark}_benchmark benchmarks/validation/${benchmark}.cpp)
+    target_link_libraries(${benchmark}_benchmark ${BENCHMARK_LIBS})
+endforeach()
+
+foreach(benchmark ${WORKLOAD_BENCHMARKS})
+    add_executable(${benchmark}_benchmark benchmarks/workloads/${benchmark}.cpp)
+    target_link_libraries(${benchmark}_benchmark ${BENCHMARK_LIBS})
+endforeach()
\ No newline at end of file
diff --git a/archive/2025/summer/bsc_karidas/cmake/dependencies.cmake b/archive/2025/summer/bsc_karidas/cmake/dependencies.cmake
new file mode 100644
index 000000000..ed35200cd
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/cmake/dependencies.cmake
@@ -0,0 +1,12 @@
+find_package(OpenSSL REQUIRED)
+find_package(GTest REQUIRED)
+find_package(ZLIB REQUIRED)
+
+message(STATUS "Using OpenSSL version: ${OPENSSL_VERSION}")
+message(STATUS "Using GTest version: ${GTEST_VERSION}")
+message(STATUS "Using ZLIB version: ${ZLIB_VERSION_STRING}")
+
+include_directories(include)
+
+add_subdirectory(external/concurrentqueue EXCLUDE_FROM_ALL)
+include_directories(external/concurrentqueue)
\ No newline at end of file
diff --git a/archive/2025/summer/bsc_karidas/cmake/examples.cmake b/archive/2025/summer/bsc_karidas/cmake/examples.cmake
new file mode 100644
index 000000000..40aa8823f
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/cmake/examples.cmake
@@ -0,0 +1,16 @@
+set(EXAMPLE_SOURCES
+    examples/main.cpp
+)
+
+add_executable(logging_example ${EXAMPLE_SOURCES})
+
+target_link_libraries(logging_example
+    PRIVATE
+    GDPR_Logging_lib
+)
+
+target_include_directories(logging_example
+    PRIVATE
+    ${CMAKE_CURRENT_SOURCE_DIR}/include
+    ${CMAKE_CURRENT_SOURCE_DIR}/external/concurrentqueue
+)
diff --git a/archive/2025/summer/bsc_karidas/cmake/library.cmake b/archive/2025/summer/bsc_karidas/cmake/library.cmake
new file mode 100644
index 000000000..e7ed7018c
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/cmake/library.cmake
@@ -0,0 +1,23 @@
+set(LIBRARY_SOURCES
+    src/LogEntry.cpp
+    src/Logger.cpp
+    src/BufferQueue.cpp
+    src/Compression.cpp
+    src/Crypto.cpp
+    src/Writer.cpp
+    src/SegmentedStorage.cpp
+    src/LoggingManager.cpp
+    benchmarks/BenchmarkUtils.cpp
+)
+
+add_library(GDPR_Logging_lib ${LIBRARY_SOURCES})
+
+target_link_libraries(GDPR_Logging_lib 
+    PUBLIC 
+    OpenSSL::SSL 
+    OpenSSL::Crypto 
+    ZLIB::ZLIB
+)
+
+target_include_directories(GDPR_Logging_lib PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include)
+target_include_directories(GDPR_Logging_lib PUBLIC external/concurrentqueue)
\ No newline at end of file
diff --git a/archive/2025/summer/bsc_karidas/cmake/project_settings.cmake b/archive/2025/summer/bsc_karidas/cmake/project_settings.cmake
new file mode 100644
index 000000000..6435a5dd0
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/cmake/project_settings.cmake
@@ -0,0 +1,15 @@
+set(CMAKE_CXX_STANDARD 17)
+set(CMAKE_CXX_STANDARD_REQUIRED ON)
+
+# Add compile warnings
+if(MSVC) # Visual Studio
+    add_compile_options(/W4)
+else()
+    add_compile_options(-Wall -Wextra -Wpedantic)
+endif()
+
+# Set a default build type if not specified
+if(NOT CMAKE_BUILD_TYPE)
+    set(CMAKE_BUILD_TYPE "RelWithDebInfo" CACHE STRING "Choose the type of build" FORCE)
+    message(STATUS "No build type selected, defaulting to RelWithDebInfo")
+endif()
\ No newline at end of file
diff --git a/archive/2025/summer/bsc_karidas/cmake/testing.cmake b/archive/2025/summer/bsc_karidas/cmake/testing.cmake
new file mode 100644
index 000000000..220e8013c
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/cmake/testing.cmake
@@ -0,0 +1,41 @@
+enable_testing()
+
+set(TEST_SOURCES
+    # unit tests
+    tests/unit/test_LogEntry.cpp
+    tests/unit/test_Logger.cpp
+    tests/unit/test_BufferQueue.cpp
+    tests/unit/test_Compression.cpp
+    tests/unit/test_Crypto.cpp
+    tests/unit/test_Writer.cpp
+    tests/unit/test_SegmentedStorage.cpp
+    # integration tests
+    tests/integration/test_CompressionCrypto.cpp
+    tests/integration/test_WriterQueue.cpp
+)
+
+macro(add_test_suite TEST_NAME TEST_SOURCE)
+    add_executable(${TEST_NAME} ${TEST_SOURCE})
+    target_link_libraries(${TEST_NAME}
+        PRIVATE
+        GDPR_Logging_lib
+        GTest::GTest 
+        GTest::Main
+        OpenSSL::SSL 
+        OpenSSL::Crypto 
+        ZLIB::ZLIB
+    )
+    add_test(NAME ${TEST_NAME}_test COMMAND ${TEST_NAME})
+endmacro()
+
+# unit tests
+add_test_suite(test_log_entry tests/unit/test_LogEntry.cpp)
+add_test_suite(test_logger tests/unit/test_Logger.cpp)
+add_test_suite(test_buffer_queue tests/unit/test_BufferQueue.cpp)
+add_test_suite(test_compression tests/unit/test_Compression.cpp)
+add_test_suite(test_crypto tests/unit/test_Crypto.cpp)
+add_test_suite(test_writer tests/unit/test_Writer.cpp)
+add_test_suite(test_segmented_storage tests/unit/test_SegmentedStorage.cpp)
+# integration tests
+add_test_suite(test_compression_crypto tests/integration/test_CompressionCrypto.cpp)
+add_test_suite(test_writer_queue tests/integration/test_WriterQueue.cpp)
\ No newline at end of file
diff --git a/archive/2025/summer/bsc_karidas/examples/main.cpp b/archive/2025/summer/bsc_karidas/examples/main.cpp
new file mode 100644
index 000000000..e589e92e0
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/examples/main.cpp
@@ -0,0 +1,68 @@
+#include "LoggingManager.hpp"
+#include <iostream>
+#include <thread>
+#include <chrono>
+#include <vector>
+#include <future>
+#include <optional>
+#include <filesystem>
+#include <numeric>
+
+int main()
+{
+    // system parameters
+    LoggingConfig config;
+    config.basePath = "./logs";
+    config.baseFilename = "default";
+    config.maxSegmentSize = 1 * 1024 * 1024; // 1 MB
+    config.maxAttempts = 5;
+    config.baseRetryDelay = std::chrono::milliseconds(1);
+    config.queueCapacity = 1000;
+    config.maxExplicitProducers = 1;
+    config.batchSize = 10;
+    config.numWriterThreads = 1;
+    config.appendTimeout = std::chrono::seconds(5);
+    config.useEncryption = true;
+    config.compressionLevel = 4;
+    config.maxOpenFiles = 32;
+
+    if (std::filesystem::exists(config.basePath))
+    {
+        std::filesystem::remove_all(config.basePath);
+    }
+
+    LoggingManager loggingManager(config);
+    loggingManager.start();
+
+    auto producerToken = loggingManager.createProducerToken();
+
+    LogEntry entry1(LogEntry::ActionType::READ,
+                   "users/user01",
+                   "controller1",
+                   "processor1",
+                   "user01");
+
+    loggingManager.append(entry1, producerToken);
+
+    LogEntry entry2(LogEntry::ActionType::UPDATE,
+                    "users/user02",
+                    "controller2",
+                    "processor2",
+                    "user02");
+
+    LogEntry entry3(LogEntry::ActionType::DELETE,
+                    "users/user03",
+                    "controller3",
+                    "processor3",
+                    "user03");
+
+    std::vector<LogEntry> batch{
+        LogEntry(LogEntry::ActionType::UPDATE, "users/user02", "controller2", "processor2", "user02"),
+        LogEntry(LogEntry::ActionType::DELETE, "users/user03", "controller3", "processor3", "user03")};
+
+    loggingManager.appendBatch(batch, producerToken);
+
+    loggingManager.stop();
+
+    return 0;
+}
\ No newline at end of file
diff --git a/archive/2025/summer/bsc_karidas/external/concurrentqueue/.gitignore b/archive/2025/summer/bsc_karidas/external/concurrentqueue/.gitignore
new file mode 100644
index 000000000..64f45cdb9
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/external/concurrentqueue/.gitignore
@@ -0,0 +1,33 @@
+*.ipch
+*.suo
+*.user
+*.sdf
+*.opensdf
+*.exe
+*.pdb
+*.vs
+*.VC.db
+build/bin/
+build/*.o
+build/*.log
+build/msvc16/*.log
+build/msvc16/obj/
+build/msvc15/*.log
+build/msvc15/obj/
+build/msvc14/*.log
+build/msvc14/obj/
+build/msvc12/*.log
+build/msvc12/obj/
+build/msvc11/*.log
+build/msvc11/obj/
+build/xcode/build/
+.idea/
+cmake-build*/
+tests/fuzztests/fuzztests.log
+benchmarks/benchmarks.log
+tests/CDSChecker/*.o
+tests/CDSChecker/*.log
+tests/CDSChecker/model-checker/
+tests/relacy/freelist.exe
+tests/relacy/spmchash.exe
+tests/relacy/log.txt
diff --git a/archive/2025/summer/bsc_karidas/external/concurrentqueue/CMakeLists.txt b/archive/2025/summer/bsc_karidas/external/concurrentqueue/CMakeLists.txt
new file mode 100644
index 000000000..e8a38cfe2
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/external/concurrentqueue/CMakeLists.txt
@@ -0,0 +1,75 @@
+cmake_minimum_required(VERSION 3.9)
+project(concurrentqueue VERSION 1.0.0)
+
+include(GNUInstallDirs)
+include(CMakePackageConfigHelpers)
+
+add_library(${PROJECT_NAME} INTERFACE)
+
+target_include_directories(${PROJECT_NAME} 
+        INTERFACE 
+                $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}>
+                $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}/${PROJECT_NAME}/>
+)
+
+install(TARGETS ${PROJECT_NAME} 
+    EXPORT ${PROJECT_NAME}Targets
+)
+
+write_basic_package_version_file(
+        ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake
+    VERSION 
+        ${PROJECT_VERSION}
+    COMPATIBILITY AnyNewerVersion
+    ARCH_INDEPENDENT
+)
+
+configure_package_config_file(${PROJECT_NAME}Config.cmake.in
+                ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}Config.cmake
+        INSTALL_DESTINATION 
+                ${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}/
+)
+
+install(EXPORT 
+                ${PROJECT_NAME}Targets
+        FILE
+                ${PROJECT_NAME}Targets.cmake
+        NAMESPACE 
+                "${PROJECT_NAME}::"
+        DESTINATION
+                ${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}
+        COMPONENT
+                Devel
+)
+
+install(
+        FILES
+                ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}Config.cmake
+                ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake
+        DESTINATION
+                ${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}
+        COMPONENT
+                Devel
+)
+
+install(
+        FILES 
+                blockingconcurrentqueue.h 
+                concurrentqueue.h 
+                lightweightsemaphore.h 
+                LICENSE.md
+        DESTINATION 
+                ${CMAKE_INSTALL_INCLUDEDIR}/${PROJECT_NAME}/moodycamel
+)
+
+set(CPACK_PACKAGE_NAME ${PROJECT_NAME})
+set(CPACK_PACKAGE_VENDOR "Cameron Desrochers <cameron@moodycamel.com>")
+set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "An industrial-strength lock-free queue for C++.")
+set(CPACK_PACKAGE_VERSION "${PROJECT_VERSION}")
+set(CPACK_PACKAGE_VERSION_MAJOR "${PROJECT_VERSION_MAJOR}")
+set(CPACK_PACKAGE_VERSION_MINOR "${PROJECT_VERSION_MINOR}")
+set(CPACK_PACKAGE_VERSION_PATCH "${PROJECT_VERSION_PATCH}")
+set(CPACK_DEBIAN_PACKAGE_MAINTAINER ${CPACK_PACKAGE_VENDOR})
+set(CPACK_GENERATOR "RPM;DEB")
+
+include(CPack)
diff --git a/archive/2025/summer/bsc_karidas/external/concurrentqueue/LICENSE.md b/archive/2025/summer/bsc_karidas/external/concurrentqueue/LICENSE.md
new file mode 100644
index 000000000..519338976
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/external/concurrentqueue/LICENSE.md
@@ -0,0 +1,62 @@
+This license file applies to everything in this repository except that which
+is explicitly annotated as being written by other authors, i.e. the Boost
+queue (included in the benchmarks for comparison), Intel's TBB library (ditto),
+dlib::pipe (ditto),
+the CDSChecker tool (used for verification), the Relacy model checker (ditto),
+and Jeff Preshing's semaphore implementation (used in the blocking queue) which
+has a zlib license (embedded in lightweightsempahore.h).
+
+---
+
+Simplified BSD License:
+
+Copyright (c) 2013-2016, Cameron Desrochers.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+- Redistributions of source code must retain the above copyright notice, this list of
+conditions and the following disclaimer.
+- Redistributions in binary form must reproduce the above copyright notice, this list of
+conditions and the following disclaimer in the documentation and/or other materials
+provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+---
+
+I have also chosen to dual-license under the Boost Software License as an alternative to
+the Simplified BSD license above:
+
+Boost Software License - Version 1.0 - August 17th, 2003
+
+Permission is hereby granted, free of charge, to any person or organization
+obtaining a copy of the software and accompanying documentation covered by
+this license (the "Software") to use, reproduce, display, distribute,
+execute, and transmit the Software, and to prepare derivative works of the
+Software, and to permit third-parties to whom the Software is furnished to
+do so, all subject to the following:
+
+The copyright notices in the Software and this entire statement, including
+the above license grant, this restriction and the following disclaimer,
+must be included in all copies of the Software, in whole or in part, and
+all derivative works of the Software, unless such copies or derivative
+works are solely in the form of machine-executable object code generated by
+a source language processor.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
+SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
+FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/archive/2025/summer/bsc_karidas/external/concurrentqueue/README.md b/archive/2025/summer/bsc_karidas/external/concurrentqueue/README.md
new file mode 100644
index 000000000..d7424cefa
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/external/concurrentqueue/README.md
@@ -0,0 +1,533 @@
+# moodycamel::ConcurrentQueue<T>
+
+An industrial-strength lock-free queue for C++.
+
+Note: If all you need is a single-producer, single-consumer queue, I have [one of those too][spsc].
+
+## Features
+
+- Knock-your-socks-off [blazing fast performance][benchmarks].
+- Single-header implementation. Just drop it in your project.
+- Fully thread-safe lock-free queue. Use concurrently from any number of threads.
+- C++11 implementation -- elements are moved (instead of copied) where possible.
+- Templated, obviating the need to deal exclusively with pointers -- memory is managed for you.
+- No artificial limitations on element types or maximum count.
+- Memory can be allocated once up-front, or dynamically as needed.
+- Fully portable (no assembly; all is done through standard C++11 primitives).
+- Supports super-fast bulk operations.
+- Includes a low-overhead blocking version (BlockingConcurrentQueue).
+- Exception safe.
+
+## Reasons to use
+
+There are not that many full-fledged lock-free queues for C++. Boost has one, but it's limited to objects with trivial
+assignment operators and trivial destructors, for example.
+Intel's TBB queue isn't lock-free, and requires trivial constructors too.
+There're many academic papers that implement lock-free queues in C++, but usable source code is
+hard to find, and tests even more so.
+
+This queue not only has less limitations than others (for the most part), but [it's also faster][benchmarks].
+It's been fairly well-tested, and offers advanced features like **bulk enqueueing/dequeueing**
+(which, with my new design, is much faster than one element at a time, approaching and even surpassing
+the speed of a non-concurrent queue even under heavy contention).
+
+In short, there was a lock-free queue shaped hole in the C++ open-source universe, and I set out
+to fill it with the fastest, most complete, and well-tested design and implementation I could.
+The result is `moodycamel::ConcurrentQueue` :-)
+
+## Reasons *not* to use
+
+The fastest synchronization of all is the kind that never takes place. Fundamentally,
+concurrent data structures require some synchronization, and that takes time. Every effort
+was made, of course, to minimize the overhead, but if you can avoid sharing data between
+threads, do so!
+
+Why use concurrent data structures at all, then? Because they're gosh darn convenient! (And, indeed,
+sometimes sharing data concurrently is unavoidable.)
+
+My queue is **not linearizable** (see the next section on high-level design). The foundations of
+its design assume that producers are independent; if this is not the case, and your producers
+co-ordinate amongst themselves in some fashion, be aware that the elements won't necessarily
+come out of the queue in the same order they were put in *relative to the ordering formed by that co-ordination*
+(but they will still come out in the order they were put in by any *individual* producer). If this affects
+your use case, you may be better off with another implementation; either way, it's an important limitation
+to be aware of.
+
+My queue is also **not NUMA aware**, and does a lot of memory re-use internally, meaning it probably doesn't
+scale particularly well on NUMA architectures; however, I don't know of any other lock-free queue that *is*
+NUMA aware (except for [SALSA][salsa], which is very cool, but has no publicly available implementation that I know of).
+
+Finally, the queue is **not sequentially consistent**; there *is* a happens-before relationship between when an element is put
+in the queue and when it comes out, but other things (such as pumping the queue until it's empty) require more thought
+to get right in all eventualities, because explicit memory ordering may have to be done to get the desired effect. In other words,
+it can sometimes be difficult to use the queue correctly. This is why it's a good idea to follow the [samples][samples.md] where possible.
+On the other hand, the upside of this lack of sequential consistency is better performance.
+
+## High-level design
+
+Elements are stored internally using contiguous blocks instead of linked lists for better performance.
+The queue is made up of a collection of sub-queues, one for each producer. When a consumer
+wants to dequeue an element, it checks all the sub-queues until it finds one that's not empty.
+All of this is largely transparent to the user of the queue, however -- it mostly just works<sup>TM</sup>.
+
+One particular consequence of this design, however, (which seems to be non-intuitive) is that if two producers
+enqueue at the same time, there is no defined ordering between the elements when they're later dequeued.
+Normally this is fine, because even with a fully linearizable queue there'd be a race between the producer
+threads and so you couldn't rely on the ordering anyway. However, if for some reason you do extra explicit synchronization
+between the two producer threads yourself, thus defining a total order between enqueue operations, you might expect
+that the elements would come out in the same total order, which is a guarantee my queue does not offer. At that
+point, though, there semantically aren't really two separate producers, but rather one that happens to be spread
+across multiple threads. In this case, you can still establish a total ordering with my queue by creating
+a single producer token, and using that from both threads to enqueue (taking care to synchronize access to the token,
+of course, but there was already extra synchronization involved anyway).
+
+I've written a more detailed [overview of the internal design][blog], as well as [the full
+nitty-gritty details of the design][design], on my blog. Finally, the
+[source][source] itself is available for perusal for those interested in its implementation.
+
+## Basic use
+
+The entire queue's implementation is contained in **one header**, [`concurrentqueue.h`][concurrentqueue.h].
+Simply download and include that to use the queue. The blocking version is in a separate header,
+[`blockingconcurrentqueue.h`][blockingconcurrentqueue.h], that depends on [`concurrentqueue.h`][concurrentqueue.h] and
+[`lightweightsemaphore.h`][lightweightsemaphore.h]. The implementation makes use of certain key C++11 features,
+so it requires a relatively recent compiler (e.g. VS2012+ or g++ 4.8; note that g++ 4.6 has a known bug with `std::atomic`
+and is thus not supported). The algorithm implementations themselves are platform independent.
+
+Use it like you would any other templated queue, with the exception that you can use
+it from many threads at once :-)
+
+Simple example:
+
+```C++
+#include "concurrentqueue.h"
+
+moodycamel::ConcurrentQueue<int> q;
+q.enqueue(25);
+
+int item;
+bool found = q.try_dequeue(item);
+assert(found && item == 25);
+```
+
+Description of basic methods:
+- `ConcurrentQueue(size_t initialSizeEstimate)`
+      Constructor which optionally accepts an estimate of the number of elements the queue will hold
+- `enqueue(T&& item)`
+      Enqueues one item, allocating extra space if necessary
+- `try_enqueue(T&& item)`
+      Enqueues one item, but only if enough memory is already allocated
+- `try_dequeue(T& item)`
+      Dequeues one item, returning true if an item was found or false if the queue appeared empty
+
+Note that it is up to the user to ensure that the queue object is completely constructed before
+being used by any other threads (this includes making the memory effects of construction
+visible, possibly via a memory barrier). Similarly, it's important that all threads have
+finished using the queue (and the memory effects have fully propagated) before it is
+destructed.
+
+There's usually two versions of each method, one "explicit" version that takes a user-allocated per-producer or
+per-consumer token, and one "implicit" version that works without tokens. Using the explicit methods is almost
+always faster (though not necessarily by a huge factor). Apart from performance, the primary distinction between them
+is their sub-queue allocation behaviour for enqueue operations: Using the implicit enqueue methods causes an
+automatically-allocated thread-local producer sub-queue to be allocated.
+Explicit producers, on the other hand, are tied directly to their tokens' lifetimes (but are recycled internally).
+
+In order to avoid the number of sub-queues growing without bound, implicit producers are marked for reuse once
+their thread exits. However, this is not supported on all platforms. If using the queue from short-lived threads,
+it is recommended to use explicit producer tokens instead.
+
+Full API (pseudocode):
+
+	# Allocates more memory if necessary
+	enqueue(item) : bool
+	enqueue(prod_token, item) : bool
+	enqueue_bulk(item_first, count) : bool
+	enqueue_bulk(prod_token, item_first, count) : bool
+	
+	# Fails if not enough memory to enqueue
+	try_enqueue(item) : bool
+	try_enqueue(prod_token, item) : bool
+	try_enqueue_bulk(item_first, count) : bool
+	try_enqueue_bulk(prod_token, item_first, count) : bool
+	
+	# Attempts to dequeue from the queue (never allocates)
+	try_dequeue(item&) : bool
+	try_dequeue(cons_token, item&) : bool
+	try_dequeue_bulk(item_first, max) : size_t
+	try_dequeue_bulk(cons_token, item_first, max) : size_t
+	
+	# If you happen to know which producer you want to dequeue from
+	try_dequeue_from_producer(prod_token, item&) : bool
+	try_dequeue_bulk_from_producer(prod_token, item_first, max) : size_t
+	
+	# A not-necessarily-accurate count of the total number of elements
+	size_approx() : size_t
+
+## Blocking version
+
+As mentioned above, a full blocking wrapper of the queue is provided that adds
+`wait_dequeue` and `wait_dequeue_bulk` methods in addition to the regular interface.
+This wrapper is extremely low-overhead, but slightly less fast than the non-blocking
+queue (due to the necessary bookkeeping involving a lightweight semaphore).
+
+There are also timed versions that allow a timeout to be specified (either in microseconds
+or with a `std::chrono` object).
+
+The only major caveat with the blocking version is that you must be careful not to
+destroy the queue while somebody is waiting on it. This generally means you need to
+know for certain that another element is going to come along before you call one of
+the blocking methods. (To be fair, the non-blocking version cannot be destroyed while
+in use either, but it can be easier to coordinate the cleanup.)
+
+Blocking example:
+
+```C++
+#include "blockingconcurrentqueue.h"
+
+moodycamel::BlockingConcurrentQueue<int> q;
+std::thread producer([&]() {
+    for (int i = 0; i != 100; ++i) {
+        std::this_thread::sleep_for(std::chrono::milliseconds(i % 10));
+        q.enqueue(i);
+    }
+});
+std::thread consumer([&]() {
+    for (int i = 0; i != 100; ++i) {
+        int item;
+        q.wait_dequeue(item);
+        assert(item == i);
+        
+        if (q.wait_dequeue_timed(item, std::chrono::milliseconds(5))) {
+            ++i;
+            assert(item == i);
+        }
+    }
+});
+producer.join();
+consumer.join();
+
+assert(q.size_approx() == 0);
+```
+
+## Advanced features
+
+#### Tokens
+
+The queue can take advantage of extra per-producer and per-consumer storage if
+it's available to speed up its operations. This takes the form of "tokens":
+You can create a consumer token and/or a producer token for each thread or task
+(tokens themselves are not thread-safe), and use the methods that accept a token
+as their first parameter:
+
+```C++
+moodycamel::ConcurrentQueue<int> q;
+
+moodycamel::ProducerToken ptok(q);
+q.enqueue(ptok, 17);
+
+moodycamel::ConsumerToken ctok(q);
+int item;
+q.try_dequeue(ctok, item);
+assert(item == 17);
+```
+
+If you happen to know which producer you want to consume from (e.g. in
+a single-producer, multi-consumer scenario), you can use the `try_dequeue_from_producer`
+methods, which accept a producer token instead of a consumer token, and cut some overhead.
+
+Note that tokens work with the blocking version of the queue too.
+
+When producing or consuming many elements, the most efficient way is to:
+
+1. Use the bulk methods of the queue with tokens
+2. Failing that, use the bulk methods without tokens
+3. Failing that, use the single-item methods with tokens
+4. Failing that, use the single-item methods without tokens
+
+Having said that, don't create tokens willy-nilly -- ideally there would be
+one token (of each kind) per thread. The queue will work with what it is
+given, but it performs best when used with tokens.
+
+Note that tokens aren't actually tied to any given thread; it's not technically
+required that they be local to the thread, only that they be used by a single
+producer/consumer at a time.
+
+#### Bulk operations
+
+Thanks to the [novel design][blog] of the queue, it's just as easy to enqueue/dequeue multiple
+items as it is to do one at a time. This means that overhead can be cut drastically for
+bulk operations. Example syntax:
+
+```C++
+moodycamel::ConcurrentQueue<int> q;
+
+int items[] = { 1, 2, 3, 4, 5 };
+q.enqueue_bulk(items, 5);
+
+int results[5];     // Could also be any iterator
+size_t count = q.try_dequeue_bulk(results, 5);
+for (size_t i = 0; i != count; ++i) {
+    assert(results[i] == items[i]);
+}
+```
+
+#### Preallocation (correctly using `try_enqueue`)
+
+`try_enqueue`, unlike just plain `enqueue`, will never allocate memory. If there's not enough room in the
+queue, it simply returns false. The key to using this method properly, then, is to ensure enough space is
+pre-allocated for your desired maximum element count.
+
+The constructor accepts a count of the number of elements that it should reserve space for. Because the
+queue works with blocks of elements, however, and not individual elements themselves, the value to pass
+in order to obtain an effective number of pre-allocated element slots is non-obvious.
+
+First, be aware that the count passed is rounded up to the next multiple of the block size. Note that the
+default block size is 32 (this can be changed via the traits). Second, once a slot in a block has been
+enqueued to, that slot cannot be re-used until the rest of the block has been completely filled
+up and then completely emptied. This affects the number of blocks you need in order to account for the
+overhead of partially-filled blocks. Third, each producer (whether implicit or explicit) claims and recycles
+blocks in a different manner, which again affects the number of blocks you need to account for a desired number of
+usable slots.
+
+Suppose you want the queue to be able to hold at least `N` elements at any given time. Without delving too
+deep into the rather arcane implementation details, here are some simple formulas for the number of elements
+to request for pre-allocation in such a case. Note the division is intended to be arithmetic division and not
+integer division (in order for `ceil()` to work).
+
+For explicit producers (using tokens to enqueue):
+
+```C++
+(ceil(N / BLOCK_SIZE) + 1) * MAX_NUM_PRODUCERS * BLOCK_SIZE
+```
+
+For implicit producers (no tokens):
+
+```C++
+(ceil(N / BLOCK_SIZE) - 1 + 2 * MAX_NUM_PRODUCERS) * BLOCK_SIZE
+```
+
+When using mixed producer types:
+
+```C++
+((ceil(N / BLOCK_SIZE) - 1) * (MAX_EXPLICIT_PRODUCERS + 1) + 2 * (MAX_IMPLICIT_PRODUCERS + MAX_EXPLICIT_PRODUCERS)) * BLOCK_SIZE
+```
+
+If these formulas seem rather inconvenient, you can use the constructor overload that accepts the minimum
+number of elements (`N`) and the maximum number of explicit and implicit producers directly, and let it do the
+computation for you.
+
+In addition to blocks, there are other internal data structures that require allocating memory if they need to resize (grow).
+If using `try_enqueue` exclusively, the initial sizes may be exceeded, causing subsequent `try_enqueue` operations to fail.
+Specifically, the `INITIAL_IMPLICIT_PRODUCER_HASH_SIZE` trait limits the number of implicit producers that can be active at once
+before the internal hash needs resizing. Along the same lines, the `IMPLICIT_INITIAL_INDEX_SIZE` trait limits the number of
+unconsumed elements that an implicit producer can insert before its internal hash needs resizing. Similarly, the
+`EXPLICIT_INITIAL_INDEX_SIZE` trait limits the number of unconsumed elements that an explicit producer can insert before its
+internal hash needs resizing. In order to avoid hitting these limits when using `try_enqueue`, it is crucial to adjust the
+initial sizes in the traits appropriately, in addition to sizing the number of blocks properly as outlined above.
+
+Finally, it's important to note that because the queue is only eventually consistent and takes advantage of
+weak memory ordering for speed, there's always a possibility that under contention `try_enqueue` will fail
+even if the queue is correctly pre-sized for the desired number of elements. (e.g. A given thread may think that
+the queue's full even when that's no longer the case.) So no matter what, you still need to handle the failure
+case (perhaps looping until it succeeds), unless you don't mind dropping elements.
+
+#### Exception safety
+
+The queue is exception safe, and will never become corrupted if used with a type that may throw exceptions.
+The queue itself never throws any exceptions (operations fail gracefully (return false) if memory allocation
+fails instead of throwing `std::bad_alloc`).
+
+It is important to note that the guarantees of exception safety only hold if the element type never throws
+from its destructor, and that any iterators passed into the queue (for bulk operations) never throw either.
+Note that in particular this means `std::back_inserter` iterators must be used with care, since the vector
+being inserted into may need to allocate and throw a `std::bad_alloc` exception from inside the iterator;
+so be sure to reserve enough capacity in the target container first if you do this.
+
+The guarantees are presently as follows:
+- Enqueue operations are rolled back completely if an exception is thrown from an element's constructor.
+  For bulk enqueue operations, this means that elements are copied instead of moved (in order to avoid
+  having only some objects moved in the event of an exception). Non-bulk enqueues always use
+  the move constructor if one is available.
+- If the assignment operator throws during a dequeue operation (both single and bulk), the element(s) are
+  considered dequeued regardless. In such a case, the dequeued elements are all properly destructed before
+  the exception is propagated, but there's no way to get the elements themselves back.
+- Any exception that is thrown is propagated up the call stack, at which point the queue is in a consistent
+  state.
+
+Note: If any of your type's copy constructors/move constructors/assignment operators don't throw, be sure
+to annotate them with `noexcept`; this will avoid the exception-checking overhead in the queue where possible
+(even with zero-cost exceptions, there's still a code size impact that has to be taken into account).
+
+#### Traits
+
+The queue also supports a traits template argument which defines various types, constants,
+and the memory allocation and deallocation functions that are to be used by the queue. The typical pattern
+to providing your own traits is to create a class that inherits from the default traits
+and override only the values you wish to change. Example:
+
+```C++
+struct MyTraits : public moodycamel::ConcurrentQueueDefaultTraits
+{
+	static const size_t BLOCK_SIZE = 256;		// Use bigger blocks
+};
+
+moodycamel::ConcurrentQueue<int, MyTraits> q;
+```
+
+#### How to dequeue types without calling the constructor
+
+The normal way to dequeue an item is to pass in an existing object by reference, which
+is then assigned to internally by the queue (using the move-assignment operator if possible).
+This can pose a problem for types that are
+expensive to construct or don't have a default constructor; fortunately, there is a simple
+workaround: Create a wrapper class that copies the memory contents of the object when it
+is assigned by the queue (a poor man's move, essentially). Note that this only works if
+the object contains no internal pointers. Example:
+
+```C++
+struct MyObjectMover {
+    inline void operator=(MyObject&& obj) {
+        std::memcpy(data, &obj, sizeof(MyObject));
+        
+        // TODO: Cleanup obj so that when it's destructed by the queue
+        // it doesn't corrupt the data of the object we just moved it into
+    }
+
+    inline MyObject& obj() { return *reinterpret_cast<MyObject*>(data); }
+
+private:
+    align(alignof(MyObject)) char data[sizeof(MyObject)];
+};
+```
+
+A less dodgy alternative, if moves are cheap but default construction is not, is to use a
+wrapper that defers construction until the object is assigned, enabling use of the move
+constructor:
+
+```C++
+struct MyObjectMover {
+    inline void operator=(MyObject&& x) {
+        new (data) MyObject(std::move(x));
+        created = true;
+    }
+
+    inline MyObject& obj() {
+        assert(created);
+        return *reinterpret_cast<MyObject*>(data);
+    }
+
+    ~MyObjectMover() {
+        if (created)
+            obj().~MyObject();
+    }
+
+private:
+    align(alignof(MyObject)) char data[sizeof(MyObject)];
+    bool created = false;
+};
+```
+
+## Samples
+
+There are some more detailed samples [here][samples.md]. The source of
+the [unit tests][unittest-src] and [benchmarks][benchmark-src] are available for reference as well.
+
+## Benchmarks
+
+See my blog post for some [benchmark results][benchmarks] (including versus `boost::lockfree::queue` and `tbb::concurrent_queue`),
+or run the benchmarks yourself (requires MinGW and certain GnuWin32 utilities to build on Windows, or a recent
+g++ on Linux):
+
+```Shell
+cd build
+make benchmarks
+bin/benchmarks
+```
+
+The short version of the benchmarks is that it's so fast (especially the bulk methods), that if you're actually
+using the queue to *do* anything, the queue won't be your bottleneck.
+
+## Tests (and bugs)
+
+I've written quite a few unit tests as well as a randomized long-running fuzz tester. I also ran the
+core queue algorithm through the [CDSChecker][cdschecker] C++11 memory model model checker. Some of the
+inner algorithms were tested separately using the [Relacy][relacy] model checker, and full integration
+tests were also performed with Relacy.
+I've tested
+on Linux (Fedora 19) and Windows (7), but only on x86 processors so far (Intel and AMD). The code was
+written to be platform-independent, however, and should work across all processors and OSes.
+
+Due to the complexity of the implementation and the difficult-to-test nature of lock-free code in general,
+there may still be bugs. If anyone is seeing buggy behaviour, I'd like to hear about it! (Especially if
+a unit test for it can be cooked up.) Just open an issue on GitHub.
+	
+## Using vcpkg
+You can download and install `moodycamel::ConcurrentQueue` using the [vcpkg](https://github.com/Microsoft/vcpkg) dependency manager:
+
+```Shell
+git clone https://github.com/Microsoft/vcpkg.git
+cd vcpkg
+./bootstrap-vcpkg.sh
+./vcpkg integrate install
+vcpkg install concurrentqueue
+```
+	
+The `moodycamel::ConcurrentQueue` port in vcpkg is kept up to date by Microsoft team members and community contributors. If the version is out of date, please [create an issue or pull request](https://github.com/Microsoft/vcpkg) on the vcpkg repository.
+
+## License
+
+I'm releasing the source of this repository (with the exception of third-party code, i.e. the Boost queue
+(used in the benchmarks for comparison), Intel's TBB library (ditto), CDSChecker, Relacy, and Jeff Preshing's
+cross-platform semaphore, which all have their own licenses)
+under a simplified BSD license. I'm also dual-licensing under the Boost Software License.
+See the [LICENSE.md][license] file for more details.
+
+Note that lock-free programming is a patent minefield, and this code may very
+well violate a pending patent (I haven't looked), though it does not to my present knowledge.
+I did design and implement this queue from scratch.
+
+## Diving into the code
+
+If you're interested in the source code itself, it helps to have a rough idea of how it's laid out. This
+section attempts to describe that.
+
+The queue is formed of several basic parts (listed here in roughly the order they appear in the source). There's the
+helper functions (e.g. for rounding to a power of 2). There's the default traits of the queue, which contain the
+constants and malloc/free functions used by the queue. There's the producer and consumer tokens. Then there's the queue's
+public API itself, starting with the constructor, destructor, and swap/assignment methods. There's the public enqueue methods,
+which are all wrappers around a small set of private enqueue methods found later on. There's the dequeue methods, which are
+defined inline and are relatively straightforward.
+
+Then there's all the main internal data structures. First, there's a lock-free free list, used for recycling spent blocks (elements
+are enqueued to blocks internally). Then there's the block structure itself, which has two different ways of tracking whether
+it's fully emptied or not (remember, given two parallel consumers, there's no way to know which one will finish first) depending on where it's used.
+Then there's a small base class for the two types of internal SPMC producer queues (one for explicit producers that holds onto memory
+but attempts to be faster, and one for implicit ones which attempt to recycle more memory back into the parent but is a little slower).
+The explicit producer is defined first, then the implicit one. They both contain the same general four methods: One to enqueue, one to
+dequeue, one to enqueue in bulk, and one to dequeue in bulk. (Obviously they have constructors and destructors too, and helper methods.)
+The main difference between them is how the block handling is done (they both use the same blocks, but in different ways, and map indices
+to them in different ways).
+
+Finally, there's the miscellaneous internal methods: There's the ones that handle the initial block pool (populated when the queue is constructed),
+and an abstract block pool that comprises the initial pool and any blocks on the free list. There's ones that handle the producer list
+(a lock-free add-only linked list of all the producers in the system). There's ones that handle the implicit producer lookup table (which
+is really a sort of specialized TLS lookup). And then there's some helper methods for allocating and freeing objects, and the data members
+of the queue itself, followed lastly by the free-standing swap functions.
+
+
+[blog]: http://moodycamel.com/blog/2014/a-fast-general-purpose-lock-free-queue-for-c++
+[design]: http://moodycamel.com/blog/2014/detailed-design-of-a-lock-free-queue
+[samples.md]: https://github.com/cameron314/concurrentqueue/blob/master/samples.md
+[source]: https://github.com/cameron314/concurrentqueue
+[concurrentqueue.h]: https://github.com/cameron314/concurrentqueue/blob/master/concurrentqueue.h
+[blockingconcurrentqueue.h]: https://github.com/cameron314/concurrentqueue/blob/master/blockingconcurrentqueue.h
+[lightweightsemaphore.h]: https://github.com/cameron314/concurrentqueue/blob/master/lightweightsemaphore.h
+[unittest-src]: https://github.com/cameron314/concurrentqueue/tree/master/tests/unittests
+[benchmarks]: http://moodycamel.com/blog/2014/a-fast-general-purpose-lock-free-queue-for-c++#benchmarks
+[benchmark-src]: https://github.com/cameron314/concurrentqueue/tree/master/benchmarks
+[license]: https://github.com/cameron314/concurrentqueue/blob/master/LICENSE.md
+[cdschecker]: http://demsky.eecs.uci.edu/c11modelchecker.html
+[relacy]: http://www.1024cores.net/home/relacy-race-detector
+[spsc]: https://github.com/cameron314/readerwriterqueue
+[salsa]: http://webee.technion.ac.il/~idish/ftp/spaa049-gidron.pdf
diff --git a/archive/2025/summer/bsc_karidas/external/concurrentqueue/blockingconcurrentqueue.h b/archive/2025/summer/bsc_karidas/external/concurrentqueue/blockingconcurrentqueue.h
new file mode 100644
index 000000000..205a4db70
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/external/concurrentqueue/blockingconcurrentqueue.h
@@ -0,0 +1,582 @@
+// Provides an efficient blocking version of moodycamel::ConcurrentQueue.
+// ©2015-2020 Cameron Desrochers. Distributed under the terms of the simplified
+// BSD license, available at the top of concurrentqueue.h.
+// Also dual-licensed under the Boost Software License (see LICENSE.md)
+// Uses Jeff Preshing's semaphore implementation (under the terms of its
+// separate zlib license, see lightweightsemaphore.h).
+
+#pragma once
+
+#include "concurrentqueue.h"
+#include "lightweightsemaphore.h"
+
+#include <type_traits>
+#include <cerrno>
+#include <memory>
+#include <chrono>
+#include <ctime>
+
+namespace moodycamel
+{
+// This is a blocking version of the queue. It has an almost identical interface to
+// the normal non-blocking version, with the addition of various wait_dequeue() methods
+// and the removal of producer-specific dequeue methods.
+template<typename T, typename Traits = ConcurrentQueueDefaultTraits>
+class BlockingConcurrentQueue
+{
+private:
+	typedef ::moodycamel::ConcurrentQueue<T, Traits> ConcurrentQueue;
+	typedef ::moodycamel::LightweightSemaphore LightweightSemaphore;
+
+public:
+	typedef typename ConcurrentQueue::producer_token_t producer_token_t;
+	typedef typename ConcurrentQueue::consumer_token_t consumer_token_t;
+	
+	typedef typename ConcurrentQueue::index_t index_t;
+	typedef typename ConcurrentQueue::size_t size_t;
+	typedef typename std::make_signed<size_t>::type ssize_t;
+	
+	static const size_t BLOCK_SIZE = ConcurrentQueue::BLOCK_SIZE;
+	static const size_t EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD = ConcurrentQueue::EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD;
+	static const size_t EXPLICIT_INITIAL_INDEX_SIZE = ConcurrentQueue::EXPLICIT_INITIAL_INDEX_SIZE;
+	static const size_t IMPLICIT_INITIAL_INDEX_SIZE = ConcurrentQueue::IMPLICIT_INITIAL_INDEX_SIZE;
+	static const size_t INITIAL_IMPLICIT_PRODUCER_HASH_SIZE = ConcurrentQueue::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE;
+	static const std::uint32_t EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE = ConcurrentQueue::EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE;
+	static const size_t MAX_SUBQUEUE_SIZE = ConcurrentQueue::MAX_SUBQUEUE_SIZE;
+	
+public:
+	// Creates a queue with at least `capacity` element slots; note that the
+	// actual number of elements that can be inserted without additional memory
+	// allocation depends on the number of producers and the block size (e.g. if
+	// the block size is equal to `capacity`, only a single block will be allocated
+	// up-front, which means only a single producer will be able to enqueue elements
+	// without an extra allocation -- blocks aren't shared between producers).
+	// This method is not thread safe -- it is up to the user to ensure that the
+	// queue is fully constructed before it starts being used by other threads (this
+	// includes making the memory effects of construction visible, possibly with a
+	// memory barrier).
+	explicit BlockingConcurrentQueue(size_t capacity = 6 * BLOCK_SIZE)
+		: inner(capacity), sema(create<LightweightSemaphore, ssize_t, int>(0, (int)Traits::MAX_SEMA_SPINS), &BlockingConcurrentQueue::template destroy<LightweightSemaphore>)
+	{
+		assert(reinterpret_cast<ConcurrentQueue*>((BlockingConcurrentQueue*)1) == &((BlockingConcurrentQueue*)1)->inner && "BlockingConcurrentQueue must have ConcurrentQueue as its first member");
+		if (!sema) {
+			MOODYCAMEL_THROW(std::bad_alloc());
+		}
+	}
+	
+	BlockingConcurrentQueue(size_t minCapacity, size_t maxExplicitProducers, size_t maxImplicitProducers)
+		: inner(minCapacity, maxExplicitProducers, maxImplicitProducers), sema(create<LightweightSemaphore, ssize_t, int>(0, (int)Traits::MAX_SEMA_SPINS), &BlockingConcurrentQueue::template destroy<LightweightSemaphore>)
+	{
+		assert(reinterpret_cast<ConcurrentQueue*>((BlockingConcurrentQueue*)1) == &((BlockingConcurrentQueue*)1)->inner && "BlockingConcurrentQueue must have ConcurrentQueue as its first member");
+		if (!sema) {
+			MOODYCAMEL_THROW(std::bad_alloc());
+		}
+	}
+	
+	// Disable copying and copy assignment
+	BlockingConcurrentQueue(BlockingConcurrentQueue const&) MOODYCAMEL_DELETE_FUNCTION;
+	BlockingConcurrentQueue& operator=(BlockingConcurrentQueue const&) MOODYCAMEL_DELETE_FUNCTION;
+	
+	// Moving is supported, but note that it is *not* a thread-safe operation.
+	// Nobody can use the queue while it's being moved, and the memory effects
+	// of that move must be propagated to other threads before they can use it.
+	// Note: When a queue is moved, its tokens are still valid but can only be
+	// used with the destination queue (i.e. semantically they are moved along
+	// with the queue itself).
+	BlockingConcurrentQueue(BlockingConcurrentQueue&& other) MOODYCAMEL_NOEXCEPT
+		: inner(std::move(other.inner)), sema(std::move(other.sema))
+	{ }
+	
+	inline BlockingConcurrentQueue& operator=(BlockingConcurrentQueue&& other) MOODYCAMEL_NOEXCEPT
+	{
+		return swap_internal(other);
+	}
+	
+	// Swaps this queue's state with the other's. Not thread-safe.
+	// Swapping two queues does not invalidate their tokens, however
+	// the tokens that were created for one queue must be used with
+	// only the swapped queue (i.e. the tokens are tied to the
+	// queue's movable state, not the object itself).
+	inline void swap(BlockingConcurrentQueue& other) MOODYCAMEL_NOEXCEPT
+	{
+		swap_internal(other);
+	}
+	
+private:
+	BlockingConcurrentQueue& swap_internal(BlockingConcurrentQueue& other)
+	{
+		if (this == &other) {
+			return *this;
+		}
+		
+		inner.swap(other.inner);
+		sema.swap(other.sema);
+		return *this;
+	}
+	
+public:
+	// Enqueues a single item (by copying it).
+	// Allocates memory if required. Only fails if memory allocation fails (or implicit
+	// production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0,
+	// or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed).
+	// Thread-safe.
+	inline bool enqueue(T const& item)
+	{
+		if ((details::likely)(inner.enqueue(item))) {
+			sema->signal();
+			return true;
+		}
+		return false;
+	}
+	
+	// Enqueues a single item (by moving it, if possible).
+	// Allocates memory if required. Only fails if memory allocation fails (or implicit
+	// production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0,
+	// or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed).
+	// Thread-safe.
+	inline bool enqueue(T&& item)
+	{
+		if ((details::likely)(inner.enqueue(std::move(item)))) {
+			sema->signal();
+			return true;
+		}
+		return false;
+	}
+	
+	// Enqueues a single item (by copying it) using an explicit producer token.
+	// Allocates memory if required. Only fails if memory allocation fails (or
+	// Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed).
+	// Thread-safe.
+	inline bool enqueue(producer_token_t const& token, T const& item)
+	{
+		if ((details::likely)(inner.enqueue(token, item))) {
+			sema->signal();
+			return true;
+		}
+		return false;
+	}
+	
+	// Enqueues a single item (by moving it, if possible) using an explicit producer token.
+	// Allocates memory if required. Only fails if memory allocation fails (or
+	// Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed).
+	// Thread-safe.
+	inline bool enqueue(producer_token_t const& token, T&& item)
+	{
+		if ((details::likely)(inner.enqueue(token, std::move(item)))) {
+			sema->signal();
+			return true;
+		}
+		return false;
+	}
+	
+	// Enqueues several items.
+	// Allocates memory if required. Only fails if memory allocation fails (or
+	// implicit production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE
+	// is 0, or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed).
+	// Note: Use std::make_move_iterator if the elements should be moved instead of copied.
+	// Thread-safe.
+	template<typename It>
+	inline bool enqueue_bulk(It itemFirst, size_t count)
+	{
+		if ((details::likely)(inner.enqueue_bulk(std::forward<It>(itemFirst), count))) {
+			sema->signal((LightweightSemaphore::ssize_t)(ssize_t)count);
+			return true;
+		}
+		return false;
+	}
+	
+	// Enqueues several items using an explicit producer token.
+	// Allocates memory if required. Only fails if memory allocation fails
+	// (or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed).
+	// Note: Use std::make_move_iterator if the elements should be moved
+	// instead of copied.
+	// Thread-safe.
+	template<typename It>
+	inline bool enqueue_bulk(producer_token_t const& token, It itemFirst, size_t count)
+	{
+		if ((details::likely)(inner.enqueue_bulk(token, std::forward<It>(itemFirst), count))) {
+			sema->signal((LightweightSemaphore::ssize_t)(ssize_t)count);
+			return true;
+		}
+		return false;
+	}
+	
+	// Enqueues a single item (by copying it).
+	// Does not allocate memory. Fails if not enough room to enqueue (or implicit
+	// production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE
+	// is 0).
+	// Thread-safe.
+	inline bool try_enqueue(T const& item)
+	{
+		if (inner.try_enqueue(item)) {
+			sema->signal();
+			return true;
+		}
+		return false;
+	}
+	
+	// Enqueues a single item (by moving it, if possible).
+	// Does not allocate memory (except for one-time implicit producer).
+	// Fails if not enough room to enqueue (or implicit production is
+	// disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0).
+	// Thread-safe.
+	inline bool try_enqueue(T&& item)
+	{
+		if (inner.try_enqueue(std::move(item))) {
+			sema->signal();
+			return true;
+		}
+		return false;
+	}
+	
+	// Enqueues a single item (by copying it) using an explicit producer token.
+	// Does not allocate memory. Fails if not enough room to enqueue.
+	// Thread-safe.
+	inline bool try_enqueue(producer_token_t const& token, T const& item)
+	{
+		if (inner.try_enqueue(token, item)) {
+			sema->signal();
+			return true;
+		}
+		return false;
+	}
+	
+	// Enqueues a single item (by moving it, if possible) using an explicit producer token.
+	// Does not allocate memory. Fails if not enough room to enqueue.
+	// Thread-safe.
+	inline bool try_enqueue(producer_token_t const& token, T&& item)
+	{
+		if (inner.try_enqueue(token, std::move(item))) {
+			sema->signal();
+			return true;
+		}
+		return false;
+	}
+	
+	// Enqueues several items.
+	// Does not allocate memory (except for one-time implicit producer).
+	// Fails if not enough room to enqueue (or implicit production is
+	// disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0).
+	// Note: Use std::make_move_iterator if the elements should be moved
+	// instead of copied.
+	// Thread-safe.
+	template<typename It>
+	inline bool try_enqueue_bulk(It itemFirst, size_t count)
+	{
+		if (inner.try_enqueue_bulk(std::forward<It>(itemFirst), count)) {
+			sema->signal((LightweightSemaphore::ssize_t)(ssize_t)count);
+			return true;
+		}
+		return false;
+	}
+	
+	// Enqueues several items using an explicit producer token.
+	// Does not allocate memory. Fails if not enough room to enqueue.
+	// Note: Use std::make_move_iterator if the elements should be moved
+	// instead of copied.
+	// Thread-safe.
+	template<typename It>
+	inline bool try_enqueue_bulk(producer_token_t const& token, It itemFirst, size_t count)
+	{
+		if (inner.try_enqueue_bulk(token, std::forward<It>(itemFirst), count)) {
+			sema->signal((LightweightSemaphore::ssize_t)(ssize_t)count);
+			return true;
+		}
+		return false;
+	}
+	
+	
+	// Attempts to dequeue from the queue.
+	// Returns false if all producer streams appeared empty at the time they
+	// were checked (so, the queue is likely but not guaranteed to be empty).
+	// Never allocates. Thread-safe.
+	template<typename U>
+	inline bool try_dequeue(U& item)
+	{
+		if (sema->tryWait()) {
+			while (!inner.try_dequeue(item)) {
+				continue;
+			}
+			return true;
+		}
+		return false;
+	}
+	
+	// Attempts to dequeue from the queue using an explicit consumer token.
+	// Returns false if all producer streams appeared empty at the time they
+	// were checked (so, the queue is likely but not guaranteed to be empty).
+	// Never allocates. Thread-safe.
+	template<typename U>
+	inline bool try_dequeue(consumer_token_t& token, U& item)
+	{
+		if (sema->tryWait()) {
+			while (!inner.try_dequeue(token, item)) {
+				continue;
+			}
+			return true;
+		}
+		return false;
+	}
+	
+	// Attempts to dequeue several elements from the queue.
+	// Returns the number of items actually dequeued.
+	// Returns 0 if all producer streams appeared empty at the time they
+	// were checked (so, the queue is likely but not guaranteed to be empty).
+	// Never allocates. Thread-safe.
+	template<typename It>
+	inline size_t try_dequeue_bulk(It itemFirst, size_t max)
+	{
+		size_t count = 0;
+		max = (size_t)sema->tryWaitMany((LightweightSemaphore::ssize_t)(ssize_t)max);
+		while (count != max) {
+			count += inner.template try_dequeue_bulk<It&>(itemFirst, max - count);
+		}
+		return count;
+	}
+	
+	// Attempts to dequeue several elements from the queue using an explicit consumer token.
+	// Returns the number of items actually dequeued.
+	// Returns 0 if all producer streams appeared empty at the time they
+	// were checked (so, the queue is likely but not guaranteed to be empty).
+	// Never allocates. Thread-safe.
+	template<typename It>
+	inline size_t try_dequeue_bulk(consumer_token_t& token, It itemFirst, size_t max)
+	{
+		size_t count = 0;
+		max = (size_t)sema->tryWaitMany((LightweightSemaphore::ssize_t)(ssize_t)max);
+		while (count != max) {
+			count += inner.template try_dequeue_bulk<It&>(token, itemFirst, max - count);
+		}
+		return count;
+	}
+	
+	
+	
+	// Blocks the current thread until there's something to dequeue, then
+	// dequeues it.
+	// Never allocates. Thread-safe.
+	template<typename U>
+	inline void wait_dequeue(U& item)
+	{
+		while (!sema->wait()) {
+			continue;
+		}
+		while (!inner.try_dequeue(item)) {
+			continue;
+		}
+	}
+
+	// Blocks the current thread until either there's something to dequeue
+	// or the timeout (specified in microseconds) expires. Returns false
+	// without setting `item` if the timeout expires, otherwise assigns
+	// to `item` and returns true.
+	// Using a negative timeout indicates an indefinite timeout,
+	// and is thus functionally equivalent to calling wait_dequeue.
+	// Never allocates. Thread-safe.
+	template<typename U>
+	inline bool wait_dequeue_timed(U& item, std::int64_t timeout_usecs)
+	{
+		if (!sema->wait(timeout_usecs)) {
+			return false;
+		}
+		while (!inner.try_dequeue(item)) {
+			continue;
+		}
+		return true;
+	}
+    
+    // Blocks the current thread until either there's something to dequeue
+	// or the timeout expires. Returns false without setting `item` if the
+    // timeout expires, otherwise assigns to `item` and returns true.
+	// Never allocates. Thread-safe.
+	template<typename U, typename Rep, typename Period>
+	inline bool wait_dequeue_timed(U& item, std::chrono::duration<Rep, Period> const& timeout)
+    {
+        return wait_dequeue_timed(item, std::chrono::duration_cast<std::chrono::microseconds>(timeout).count());
+    }
+	
+	// Blocks the current thread until there's something to dequeue, then
+	// dequeues it using an explicit consumer token.
+	// Never allocates. Thread-safe.
+	template<typename U>
+	inline void wait_dequeue(consumer_token_t& token, U& item)
+	{
+		while (!sema->wait()) {
+			continue;
+		}
+		while (!inner.try_dequeue(token, item)) {
+			continue;
+		}
+	}
+	
+	// Blocks the current thread until either there's something to dequeue
+	// or the timeout (specified in microseconds) expires. Returns false
+	// without setting `item` if the timeout expires, otherwise assigns
+	// to `item` and returns true.
+	// Using a negative timeout indicates an indefinite timeout,
+	// and is thus functionally equivalent to calling wait_dequeue.
+	// Never allocates. Thread-safe.
+	template<typename U>
+	inline bool wait_dequeue_timed(consumer_token_t& token, U& item, std::int64_t timeout_usecs)
+	{
+		if (!sema->wait(timeout_usecs)) {
+			return false;
+		}
+		while (!inner.try_dequeue(token, item)) {
+			continue;
+		}
+		return true;
+	}
+    
+    // Blocks the current thread until either there's something to dequeue
+	// or the timeout expires. Returns false without setting `item` if the
+    // timeout expires, otherwise assigns to `item` and returns true.
+	// Never allocates. Thread-safe.
+	template<typename U, typename Rep, typename Period>
+	inline bool wait_dequeue_timed(consumer_token_t& token, U& item, std::chrono::duration<Rep, Period> const& timeout)
+    {
+        return wait_dequeue_timed(token, item, std::chrono::duration_cast<std::chrono::microseconds>(timeout).count());
+    }
+	
+	// Attempts to dequeue several elements from the queue.
+	// Returns the number of items actually dequeued, which will
+	// always be at least one (this method blocks until the queue
+	// is non-empty) and at most max.
+	// Never allocates. Thread-safe.
+	template<typename It>
+	inline size_t wait_dequeue_bulk(It itemFirst, size_t max)
+	{
+		size_t count = 0;
+		max = (size_t)sema->waitMany((LightweightSemaphore::ssize_t)(ssize_t)max);
+		while (count != max) {
+			count += inner.template try_dequeue_bulk<It&>(itemFirst, max - count);
+		}
+		return count;
+	}
+	
+	// Attempts to dequeue several elements from the queue.
+	// Returns the number of items actually dequeued, which can
+	// be 0 if the timeout expires while waiting for elements,
+	// and at most max.
+	// Using a negative timeout indicates an indefinite timeout,
+	// and is thus functionally equivalent to calling wait_dequeue_bulk.
+	// Never allocates. Thread-safe.
+	template<typename It>
+	inline size_t wait_dequeue_bulk_timed(It itemFirst, size_t max, std::int64_t timeout_usecs)
+	{
+		size_t count = 0;
+		max = (size_t)sema->waitMany((LightweightSemaphore::ssize_t)(ssize_t)max, timeout_usecs);
+		while (count != max) {
+			count += inner.template try_dequeue_bulk<It&>(itemFirst, max - count);
+		}
+		return count;
+	}
+    
+    // Attempts to dequeue several elements from the queue.
+	// Returns the number of items actually dequeued, which can
+	// be 0 if the timeout expires while waiting for elements,
+	// and at most max.
+	// Never allocates. Thread-safe.
+	template<typename It, typename Rep, typename Period>
+	inline size_t wait_dequeue_bulk_timed(It itemFirst, size_t max, std::chrono::duration<Rep, Period> const& timeout)
+    {
+        return wait_dequeue_bulk_timed<It&>(itemFirst, max, std::chrono::duration_cast<std::chrono::microseconds>(timeout).count());
+    }
+	
+	// Attempts to dequeue several elements from the queue using an explicit consumer token.
+	// Returns the number of items actually dequeued, which will
+	// always be at least one (this method blocks until the queue
+	// is non-empty) and at most max.
+	// Never allocates. Thread-safe.
+	template<typename It>
+	inline size_t wait_dequeue_bulk(consumer_token_t& token, It itemFirst, size_t max)
+	{
+		size_t count = 0;
+		max = (size_t)sema->waitMany((LightweightSemaphore::ssize_t)(ssize_t)max);
+		while (count != max) {
+			count += inner.template try_dequeue_bulk<It&>(token, itemFirst, max - count);
+		}
+		return count;
+	}
+	
+	// Attempts to dequeue several elements from the queue using an explicit consumer token.
+	// Returns the number of items actually dequeued, which can
+	// be 0 if the timeout expires while waiting for elements,
+	// and at most max.
+	// Using a negative timeout indicates an indefinite timeout,
+	// and is thus functionally equivalent to calling wait_dequeue_bulk.
+	// Never allocates. Thread-safe.
+	template<typename It>
+	inline size_t wait_dequeue_bulk_timed(consumer_token_t& token, It itemFirst, size_t max, std::int64_t timeout_usecs)
+	{
+		size_t count = 0;
+		max = (size_t)sema->waitMany((LightweightSemaphore::ssize_t)(ssize_t)max, timeout_usecs);
+		while (count != max) {
+			count += inner.template try_dequeue_bulk<It&>(token, itemFirst, max - count);
+		}
+		return count;
+	}
+	
+	// Attempts to dequeue several elements from the queue using an explicit consumer token.
+	// Returns the number of items actually dequeued, which can
+	// be 0 if the timeout expires while waiting for elements,
+	// and at most max.
+	// Never allocates. Thread-safe.
+	template<typename It, typename Rep, typename Period>
+	inline size_t wait_dequeue_bulk_timed(consumer_token_t& token, It itemFirst, size_t max, std::chrono::duration<Rep, Period> const& timeout)
+    {
+        return wait_dequeue_bulk_timed<It&>(token, itemFirst, max, std::chrono::duration_cast<std::chrono::microseconds>(timeout).count());
+    }
+	
+	
+	// Returns an estimate of the total number of elements currently in the queue. This
+	// estimate is only accurate if the queue has completely stabilized before it is called
+	// (i.e. all enqueue and dequeue operations have completed and their memory effects are
+	// visible on the calling thread, and no further operations start while this method is
+	// being called).
+	// Thread-safe.
+	inline size_t size_approx() const
+	{
+		return (size_t)sema->availableApprox();
+	}
+	
+	
+	// Returns true if the underlying atomic variables used by
+	// the queue are lock-free (they should be on most platforms).
+	// Thread-safe.
+	static constexpr bool is_lock_free()
+	{
+		return ConcurrentQueue::is_lock_free();
+	}
+	
+
+private:
+	template<typename U, typename A1, typename A2>
+	static inline U* create(A1&& a1, A2&& a2)
+	{
+		void* p = (Traits::malloc)(sizeof(U));
+		return p != nullptr ? new (p) U(std::forward<A1>(a1), std::forward<A2>(a2)) : nullptr;
+	}
+	
+	template<typename U>
+	static inline void destroy(U* p)
+	{
+		if (p != nullptr) {
+			p->~U();
+		}
+		(Traits::free)(p);
+	}
+	
+private:
+	ConcurrentQueue inner;
+	std::unique_ptr<LightweightSemaphore, void (*)(LightweightSemaphore*)> sema;
+};
+
+
+template<typename T, typename Traits>
+inline void swap(BlockingConcurrentQueue<T, Traits>& a, BlockingConcurrentQueue<T, Traits>& b) MOODYCAMEL_NOEXCEPT
+{
+	a.swap(b);
+}
+
+}	// end namespace moodycamel
diff --git a/archive/2025/summer/bsc_karidas/external/concurrentqueue/c_api/blockingconcurrentqueue.cpp b/archive/2025/summer/bsc_karidas/external/concurrentqueue/c_api/blockingconcurrentqueue.cpp
new file mode 100644
index 000000000..a27e60e2c
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/external/concurrentqueue/c_api/blockingconcurrentqueue.cpp
@@ -0,0 +1,40 @@
+#include "concurrentqueue.h"
+#include "../blockingconcurrentqueue.h"
+
+typedef moodycamel::BlockingConcurrentQueue<void*> MoodycamelBCQType, *MoodycamelBCQPtr;
+
+extern "C" {
+	
+int moodycamel_bcq_create(MoodycamelBCQHandle* handle)
+{
+	MoodycamelBCQPtr retval = new MoodycamelBCQType;
+	if (retval == nullptr) {
+		return 0;
+	}
+	*handle = retval;
+	return 1;
+}
+
+int moodycamel_bcq_destroy(MoodycamelBCQHandle handle)
+{
+	delete reinterpret_cast<MoodycamelBCQPtr>(handle);
+	return 1;
+}
+
+int moodycamel_bcq_enqueue(MoodycamelBCQHandle handle, MoodycamelValue value)
+{
+	return reinterpret_cast<MoodycamelBCQPtr>(handle)->enqueue(value) ? 1 : 0;
+}
+
+int moodycamel_bcq_wait_dequeue(MoodycamelBCQHandle handle, MoodycamelValue* value)
+{
+	reinterpret_cast<MoodycamelBCQPtr>(handle)->wait_dequeue(*value);
+	return 1;
+}
+
+int moodycamel_bcq_try_dequeue(MoodycamelBCQHandle handle, MoodycamelValue* value)
+{
+	return reinterpret_cast<MoodycamelBCQPtr>(handle)->try_dequeue(*value) ? 1 : 0;
+}
+
+}
\ No newline at end of file
diff --git a/archive/2025/summer/bsc_karidas/external/concurrentqueue/c_api/concurrentqueue.cpp b/archive/2025/summer/bsc_karidas/external/concurrentqueue/c_api/concurrentqueue.cpp
new file mode 100644
index 000000000..4947344d6
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/external/concurrentqueue/c_api/concurrentqueue.cpp
@@ -0,0 +1,39 @@
+#include "concurrentqueue.h"
+#include "../concurrentqueue.h"
+
+typedef moodycamel::ConcurrentQueue<void*> MoodycamelCQType, *MoodycamelCQPtr;
+
+extern "C" {
+
+int moodycamel_cq_create(MoodycamelCQHandle* handle)
+{
+	MoodycamelCQPtr retval = new MoodycamelCQType;
+	if (retval == nullptr) {
+		return 0;
+	}
+	*handle = retval;
+	return 1;
+}
+
+int moodycamel_cq_destroy(MoodycamelCQHandle handle)
+{
+	delete reinterpret_cast<MoodycamelCQPtr>(handle);
+	return 1;
+}
+
+int moodycamel_cq_enqueue(MoodycamelCQHandle handle, MoodycamelValue value)
+{
+	return reinterpret_cast<MoodycamelCQPtr>(handle)->enqueue(value) ? 1 : 0;
+}
+
+int moodycamel_cq_try_dequeue(MoodycamelCQHandle handle, MoodycamelValue* value)
+{
+	return reinterpret_cast<MoodycamelCQPtr>(handle)->try_dequeue(*value) ? 1 : 0;
+}
+
+size_t moodycamel_cq_size_approx(MoodycamelCQHandle handle)
+{
+    return reinterpret_cast<MoodycamelCQPtr>(handle)->size_approx();
+}
+
+}
diff --git a/archive/2025/summer/bsc_karidas/external/concurrentqueue/c_api/concurrentqueue.h b/archive/2025/summer/bsc_karidas/external/concurrentqueue/c_api/concurrentqueue.h
new file mode 100644
index 000000000..27822d00e
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/external/concurrentqueue/c_api/concurrentqueue.h
@@ -0,0 +1,41 @@
+#pragma once
+
+#include <stddef.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef MOODYCAMEL_EXPORT
+#ifdef _WIN32
+#if defined(MOODYCAMEL_STATIC) //preferred way
+#define MOODYCAMEL_EXPORT
+#elif defined(DLL_EXPORT)
+#define MOODYCAMEL_EXPORT __declspec(dllexport)
+#else
+#define MOODYCAMEL_EXPORT __declspec(dllimport)
+#endif
+#else
+#define MOODYCAMEL_EXPORT
+#endif
+#endif
+
+typedef void* MoodycamelCQHandle;
+typedef void* MoodycamelBCQHandle;
+typedef void* MoodycamelValue;
+
+MOODYCAMEL_EXPORT int moodycamel_cq_create(MoodycamelCQHandle* handle);
+MOODYCAMEL_EXPORT int moodycamel_cq_destroy(MoodycamelCQHandle handle);
+MOODYCAMEL_EXPORT int moodycamel_cq_enqueue(MoodycamelCQHandle handle, MoodycamelValue value);
+MOODYCAMEL_EXPORT int moodycamel_cq_try_dequeue(MoodycamelCQHandle handle, MoodycamelValue* value);
+MOODYCAMEL_EXPORT size_t moodycamel_cq_size_approx(MoodycamelCQHandle handle);
+
+MOODYCAMEL_EXPORT int moodycamel_bcq_create(MoodycamelBCQHandle* handle);
+MOODYCAMEL_EXPORT int moodycamel_bcq_destroy(MoodycamelBCQHandle handle);
+MOODYCAMEL_EXPORT int moodycamel_bcq_enqueue(MoodycamelBCQHandle handle, MoodycamelValue value);
+MOODYCAMEL_EXPORT int moodycamel_bcq_wait_dequeue(MoodycamelBCQHandle handle, MoodycamelValue* value);
+MOODYCAMEL_EXPORT int moodycamel_bcq_try_dequeue(MoodycamelBCQHandle handle, MoodycamelValue* value);
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/archive/2025/summer/bsc_karidas/external/concurrentqueue/concurrentqueue.h b/archive/2025/summer/bsc_karidas/external/concurrentqueue/concurrentqueue.h
new file mode 100644
index 000000000..2fc775400
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/external/concurrentqueue/concurrentqueue.h
@@ -0,0 +1,3747 @@
+// Provides a C++11 implementation of a multi-producer, multi-consumer lock-free queue.
+// An overview, including benchmark results, is provided here:
+//     http://moodycamel.com/blog/2014/a-fast-general-purpose-lock-free-queue-for-c++
+// The full design is also described in excruciating detail at:
+//    http://moodycamel.com/blog/2014/detailed-design-of-a-lock-free-queue
+
+// Simplified BSD license:
+// Copyright (c) 2013-2020, Cameron Desrochers.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// - Redistributions of source code must retain the above copyright notice, this list of
+// conditions and the following disclaimer.
+// - Redistributions in binary form must reproduce the above copyright notice, this list of
+// conditions and the following disclaimer in the documentation and/or other materials
+// provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+// OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+// TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Also dual-licensed under the Boost Software License (see LICENSE.md)
+
+#pragma once
+
+#if defined(__GNUC__) && !defined(__INTEL_COMPILER)
+// Disable -Wconversion warnings (spuriously triggered when Traits::size_t and
+// Traits::index_t are set to < 32 bits, causing integer promotion, causing warnings
+// upon assigning any computed values)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wconversion"
+
+#ifdef MCDBGQ_USE_RELACY
+#pragma GCC diagnostic ignored "-Wint-to-pointer-cast"
+#endif
+#endif
+
+#if defined(_MSC_VER) && (!defined(_HAS_CXX17) || !_HAS_CXX17)
+// VS2019 with /W4 warns about constant conditional expressions but unless /std=c++17 or higher
+// does not support `if constexpr`, so we have no choice but to simply disable the warning
+#pragma warning(push)
+#pragma warning(disable: 4127)  // conditional expression is constant
+#endif
+
+#if defined(__APPLE__)
+#include "TargetConditionals.h"
+#endif
+
+#ifdef MCDBGQ_USE_RELACY
+#include "relacy/relacy_std.hpp"
+#include "relacy_shims.h"
+// We only use malloc/free anyway, and the delete macro messes up `= delete` method declarations.
+// We'll override the default trait malloc ourselves without a macro.
+#undef new
+#undef delete
+#undef malloc
+#undef free
+#else
+#include <atomic>		// Requires C++11. Sorry VS2010.
+#include <cassert>
+#endif
+#include <cstddef>              // for max_align_t
+#include <cstdint>
+#include <cstdlib>
+#include <type_traits>
+#include <algorithm>
+#include <utility>
+#include <limits>
+#include <climits>		// for CHAR_BIT
+#include <array>
+#include <thread>		// partly for __WINPTHREADS_VERSION if on MinGW-w64 w/ POSIX threading
+#include <mutex>        // used for thread exit synchronization
+
+// Platform-specific definitions of a numeric thread ID type and an invalid value
+namespace moodycamel { namespace details {
+	template<typename thread_id_t> struct thread_id_converter {
+		typedef thread_id_t thread_id_numeric_size_t;
+		typedef thread_id_t thread_id_hash_t;
+		static thread_id_hash_t prehash(thread_id_t const& x) { return x; }
+	};
+} }
+#if defined(MCDBGQ_USE_RELACY)
+namespace moodycamel { namespace details {
+	typedef std::uint32_t thread_id_t;
+	static const thread_id_t invalid_thread_id  = 0xFFFFFFFFU;
+	static const thread_id_t invalid_thread_id2 = 0xFFFFFFFEU;
+	static inline thread_id_t thread_id() { return rl::thread_index(); }
+} }
+#elif defined(_WIN32) || defined(__WINDOWS__) || defined(__WIN32__)
+// No sense pulling in windows.h in a header, we'll manually declare the function
+// we use and rely on backwards-compatibility for this not to break
+extern "C" __declspec(dllimport) unsigned long __stdcall GetCurrentThreadId(void);
+namespace moodycamel { namespace details {
+	static_assert(sizeof(unsigned long) == sizeof(std::uint32_t), "Expected size of unsigned long to be 32 bits on Windows");
+	typedef std::uint32_t thread_id_t;
+	static const thread_id_t invalid_thread_id  = 0;			// See http://blogs.msdn.com/b/oldnewthing/archive/2004/02/23/78395.aspx
+	static const thread_id_t invalid_thread_id2 = 0xFFFFFFFFU;	// Not technically guaranteed to be invalid, but is never used in practice. Note that all Win32 thread IDs are presently multiples of 4.
+	static inline thread_id_t thread_id() { return static_cast<thread_id_t>(::GetCurrentThreadId()); }
+} }
+#elif defined(__arm__) || defined(_M_ARM) || defined(__aarch64__) || (defined(__APPLE__) && TARGET_OS_IPHONE) || defined(__MVS__) || defined(MOODYCAMEL_NO_THREAD_LOCAL)
+namespace moodycamel { namespace details {
+	static_assert(sizeof(std::thread::id) == 4 || sizeof(std::thread::id) == 8, "std::thread::id is expected to be either 4 or 8 bytes");
+	
+	typedef std::thread::id thread_id_t;
+	static const thread_id_t invalid_thread_id;         // Default ctor creates invalid ID
+
+	// Note we don't define a invalid_thread_id2 since std::thread::id doesn't have one; it's
+	// only used if MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED is defined anyway, which it won't
+	// be.
+	static inline thread_id_t thread_id() { return std::this_thread::get_id(); }
+
+	template<std::size_t> struct thread_id_size { };
+	template<> struct thread_id_size<4> { typedef std::uint32_t numeric_t; };
+	template<> struct thread_id_size<8> { typedef std::uint64_t numeric_t; };
+
+	template<> struct thread_id_converter<thread_id_t> {
+		typedef thread_id_size<sizeof(thread_id_t)>::numeric_t thread_id_numeric_size_t;
+#ifndef __APPLE__
+		typedef std::size_t thread_id_hash_t;
+#else
+		typedef thread_id_numeric_size_t thread_id_hash_t;
+#endif
+
+		static thread_id_hash_t prehash(thread_id_t const& x)
+		{
+#ifndef __APPLE__
+			return std::hash<std::thread::id>()(x);
+#else
+			return *reinterpret_cast<thread_id_hash_t const*>(&x);
+#endif
+		}
+	};
+} }
+#else
+// Use a nice trick from this answer: http://stackoverflow.com/a/8438730/21475
+// In order to get a numeric thread ID in a platform-independent way, we use a thread-local
+// static variable's address as a thread identifier :-)
+#if defined(__GNUC__) || defined(__INTEL_COMPILER)
+#define MOODYCAMEL_THREADLOCAL __thread
+#elif defined(_MSC_VER)
+#define MOODYCAMEL_THREADLOCAL __declspec(thread)
+#else
+// Assume C++11 compliant compiler
+#define MOODYCAMEL_THREADLOCAL thread_local
+#endif
+namespace moodycamel { namespace details {
+	typedef std::uintptr_t thread_id_t;
+	static const thread_id_t invalid_thread_id  = 0;		// Address can't be nullptr
+	static const thread_id_t invalid_thread_id2 = 1;		// Member accesses off a null pointer are also generally invalid. Plus it's not aligned.
+	inline thread_id_t thread_id() { static MOODYCAMEL_THREADLOCAL int x; return reinterpret_cast<thread_id_t>(&x); }
+} }
+#endif
+
+// Constexpr if
+#ifndef MOODYCAMEL_CONSTEXPR_IF
+#if (defined(_MSC_VER) && defined(_HAS_CXX17) && _HAS_CXX17) || __cplusplus > 201402L
+#define MOODYCAMEL_CONSTEXPR_IF if constexpr
+#define MOODYCAMEL_MAYBE_UNUSED [[maybe_unused]]
+#else
+#define MOODYCAMEL_CONSTEXPR_IF if
+#define MOODYCAMEL_MAYBE_UNUSED
+#endif
+#endif
+
+// Exceptions
+#ifndef MOODYCAMEL_EXCEPTIONS_ENABLED
+#if (defined(_MSC_VER) && defined(_CPPUNWIND)) || (defined(__GNUC__) && defined(__EXCEPTIONS)) || (!defined(_MSC_VER) && !defined(__GNUC__))
+#define MOODYCAMEL_EXCEPTIONS_ENABLED
+#endif
+#endif
+#ifdef MOODYCAMEL_EXCEPTIONS_ENABLED
+#define MOODYCAMEL_TRY try
+#define MOODYCAMEL_CATCH(...) catch(__VA_ARGS__)
+#define MOODYCAMEL_RETHROW throw
+#define MOODYCAMEL_THROW(expr) throw (expr)
+#else
+#define MOODYCAMEL_TRY MOODYCAMEL_CONSTEXPR_IF (true)
+#define MOODYCAMEL_CATCH(...) else MOODYCAMEL_CONSTEXPR_IF (false)
+#define MOODYCAMEL_RETHROW
+#define MOODYCAMEL_THROW(expr)
+#endif
+
+#ifndef MOODYCAMEL_NOEXCEPT
+#if !defined(MOODYCAMEL_EXCEPTIONS_ENABLED)
+#define MOODYCAMEL_NOEXCEPT
+#define MOODYCAMEL_NOEXCEPT_CTOR(type, valueType, expr) true
+#define MOODYCAMEL_NOEXCEPT_ASSIGN(type, valueType, expr) true
+#elif defined(_MSC_VER) && defined(_NOEXCEPT) && _MSC_VER < 1800
+// VS2012's std::is_nothrow_[move_]constructible is broken and returns true when it shouldn't :-(
+// We have to assume *all* non-trivial constructors may throw on VS2012!
+#define MOODYCAMEL_NOEXCEPT _NOEXCEPT
+#define MOODYCAMEL_NOEXCEPT_CTOR(type, valueType, expr) (std::is_rvalue_reference<valueType>::value && std::is_move_constructible<type>::value ? std::is_trivially_move_constructible<type>::value : std::is_trivially_copy_constructible<type>::value)
+#define MOODYCAMEL_NOEXCEPT_ASSIGN(type, valueType, expr) ((std::is_rvalue_reference<valueType>::value && std::is_move_assignable<type>::value ? std::is_trivially_move_assignable<type>::value || std::is_nothrow_move_assignable<type>::value : std::is_trivially_copy_assignable<type>::value || std::is_nothrow_copy_assignable<type>::value) && MOODYCAMEL_NOEXCEPT_CTOR(type, valueType, expr))
+#elif defined(_MSC_VER) && defined(_NOEXCEPT) && _MSC_VER < 1900
+#define MOODYCAMEL_NOEXCEPT _NOEXCEPT
+#define MOODYCAMEL_NOEXCEPT_CTOR(type, valueType, expr) (std::is_rvalue_reference<valueType>::value && std::is_move_constructible<type>::value ? std::is_trivially_move_constructible<type>::value || std::is_nothrow_move_constructible<type>::value : std::is_trivially_copy_constructible<type>::value || std::is_nothrow_copy_constructible<type>::value)
+#define MOODYCAMEL_NOEXCEPT_ASSIGN(type, valueType, expr) ((std::is_rvalue_reference<valueType>::value && std::is_move_assignable<type>::value ? std::is_trivially_move_assignable<type>::value || std::is_nothrow_move_assignable<type>::value : std::is_trivially_copy_assignable<type>::value || std::is_nothrow_copy_assignable<type>::value) && MOODYCAMEL_NOEXCEPT_CTOR(type, valueType, expr))
+#else
+#define MOODYCAMEL_NOEXCEPT noexcept
+#define MOODYCAMEL_NOEXCEPT_CTOR(type, valueType, expr) noexcept(expr)
+#define MOODYCAMEL_NOEXCEPT_ASSIGN(type, valueType, expr) noexcept(expr)
+#endif
+#endif
+
+#ifndef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED
+#ifdef MCDBGQ_USE_RELACY
+#define MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED
+#else
+// VS2013 doesn't support `thread_local`, and MinGW-w64 w/ POSIX threading has a crippling bug: http://sourceforge.net/p/mingw-w64/bugs/445
+// g++ <=4.7 doesn't support thread_local either.
+// Finally, iOS/ARM doesn't have support for it either, and g++/ARM allows it to compile but it's unconfirmed to actually work
+#if (!defined(_MSC_VER) || _MSC_VER >= 1900) && (!defined(__MINGW32__) && !defined(__MINGW64__) || !defined(__WINPTHREADS_VERSION)) && (!defined(__GNUC__) || __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)) && (!defined(__APPLE__) || !TARGET_OS_IPHONE) && !defined(__arm__) && !defined(_M_ARM) && !defined(__aarch64__) && !defined(__MVS__)
+// Assume `thread_local` is fully supported in all other C++11 compilers/platforms
+#define MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED    // tentatively enabled for now; years ago several users report having problems with it on
+#endif
+#endif
+#endif
+
+// VS2012 doesn't support deleted functions. 
+// In this case, we declare the function normally but don't define it. A link error will be generated if the function is called.
+#ifndef MOODYCAMEL_DELETE_FUNCTION
+#if defined(_MSC_VER) && _MSC_VER < 1800
+#define MOODYCAMEL_DELETE_FUNCTION
+#else
+#define MOODYCAMEL_DELETE_FUNCTION = delete
+#endif
+#endif
+
+namespace moodycamel { namespace details {
+#ifndef MOODYCAMEL_ALIGNAS
+// VS2013 doesn't support alignas or alignof, and align() requires a constant literal
+#if defined(_MSC_VER) && _MSC_VER <= 1800
+#define MOODYCAMEL_ALIGNAS(alignment) __declspec(align(alignment))
+#define MOODYCAMEL_ALIGNOF(obj) __alignof(obj)
+#define MOODYCAMEL_ALIGNED_TYPE_LIKE(T, obj) typename details::Vs2013Aligned<std::alignment_of<obj>::value, T>::type
+	template<int Align, typename T> struct Vs2013Aligned { };  // default, unsupported alignment
+	template<typename T> struct Vs2013Aligned<1, T> { typedef __declspec(align(1)) T type; };
+	template<typename T> struct Vs2013Aligned<2, T> { typedef __declspec(align(2)) T type; };
+	template<typename T> struct Vs2013Aligned<4, T> { typedef __declspec(align(4)) T type; };
+	template<typename T> struct Vs2013Aligned<8, T> { typedef __declspec(align(8)) T type; };
+	template<typename T> struct Vs2013Aligned<16, T> { typedef __declspec(align(16)) T type; };
+	template<typename T> struct Vs2013Aligned<32, T> { typedef __declspec(align(32)) T type; };
+	template<typename T> struct Vs2013Aligned<64, T> { typedef __declspec(align(64)) T type; };
+	template<typename T> struct Vs2013Aligned<128, T> { typedef __declspec(align(128)) T type; };
+	template<typename T> struct Vs2013Aligned<256, T> { typedef __declspec(align(256)) T type; };
+#else
+	template<typename T> struct identity { typedef T type; };
+#define MOODYCAMEL_ALIGNAS(alignment) alignas(alignment)
+#define MOODYCAMEL_ALIGNOF(obj) alignof(obj)
+#define MOODYCAMEL_ALIGNED_TYPE_LIKE(T, obj) alignas(alignof(obj)) typename details::identity<T>::type
+#endif
+#endif
+} }
+
+
+// TSAN can false report races in lock-free code.  To enable TSAN to be used from projects that use this one,
+// we can apply per-function compile-time suppression.
+// See https://clang.llvm.org/docs/ThreadSanitizer.html#has-feature-thread-sanitizer
+#define MOODYCAMEL_NO_TSAN
+#if defined(__has_feature)
+ #if __has_feature(thread_sanitizer)
+  #undef MOODYCAMEL_NO_TSAN
+  #define MOODYCAMEL_NO_TSAN __attribute__((no_sanitize("thread")))
+ #endif // TSAN
+#endif // TSAN
+
+// Compiler-specific likely/unlikely hints
+namespace moodycamel { namespace details {
+#if defined(__GNUC__)
+	static inline bool (likely)(bool x) { return __builtin_expect((x), true); }
+	static inline bool (unlikely)(bool x) { return __builtin_expect((x), false); }
+#else
+	static inline bool (likely)(bool x) { return x; }
+	static inline bool (unlikely)(bool x) { return x; }
+#endif
+} }
+
+#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG
+#include "internal/concurrentqueue_internal_debug.h"
+#endif
+
+namespace moodycamel {
+namespace details {
+	template<typename T>
+	struct const_numeric_max {
+		static_assert(std::is_integral<T>::value, "const_numeric_max can only be used with integers");
+		static const T value = std::numeric_limits<T>::is_signed
+			? (static_cast<T>(1) << (sizeof(T) * CHAR_BIT - 1)) - static_cast<T>(1)
+			: static_cast<T>(-1);
+	};
+
+#if defined(__GLIBCXX__)
+	typedef ::max_align_t std_max_align_t;      // libstdc++ forgot to add it to std:: for a while
+#else
+	typedef std::max_align_t std_max_align_t;   // Others (e.g. MSVC) insist it can *only* be accessed via std::
+#endif
+
+	// Some platforms have incorrectly set max_align_t to a type with <8 bytes alignment even while supporting
+	// 8-byte aligned scalar values (*cough* 32-bit iOS). Work around this with our own union. See issue #64.
+	typedef union {
+		std_max_align_t x;
+		long long y;
+		void* z;
+	} max_align_t;
+}
+
+// Default traits for the ConcurrentQueue. To change some of the
+// traits without re-implementing all of them, inherit from this
+// struct and shadow the declarations you wish to be different;
+// since the traits are used as a template type parameter, the
+// shadowed declarations will be used where defined, and the defaults
+// otherwise.
+struct ConcurrentQueueDefaultTraits
+{
+	// General-purpose size type. std::size_t is strongly recommended.
+	typedef std::size_t size_t;
+	
+	// The type used for the enqueue and dequeue indices. Must be at least as
+	// large as size_t. Should be significantly larger than the number of elements
+	// you expect to hold at once, especially if you have a high turnover rate;
+	// for example, on 32-bit x86, if you expect to have over a hundred million
+	// elements or pump several million elements through your queue in a very
+	// short space of time, using a 32-bit type *may* trigger a race condition.
+	// A 64-bit int type is recommended in that case, and in practice will
+	// prevent a race condition no matter the usage of the queue. Note that
+	// whether the queue is lock-free with a 64-int type depends on the whether
+	// std::atomic<std::uint64_t> is lock-free, which is platform-specific.
+	typedef std::size_t index_t;
+	
+	// Internally, all elements are enqueued and dequeued from multi-element
+	// blocks; this is the smallest controllable unit. If you expect few elements
+	// but many producers, a smaller block size should be favoured. For few producers
+	// and/or many elements, a larger block size is preferred. A sane default
+	// is provided. Must be a power of 2.
+	static const size_t BLOCK_SIZE = 32;
+	
+	// For explicit producers (i.e. when using a producer token), the block is
+	// checked for being empty by iterating through a list of flags, one per element.
+	// For large block sizes, this is too inefficient, and switching to an atomic
+	// counter-based approach is faster. The switch is made for block sizes strictly
+	// larger than this threshold.
+	static const size_t EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD = 32;
+	
+	// How many full blocks can be expected for a single explicit producer? This should
+	// reflect that number's maximum for optimal performance. Must be a power of 2.
+	static const size_t EXPLICIT_INITIAL_INDEX_SIZE = 32;
+	
+	// How many full blocks can be expected for a single implicit producer? This should
+	// reflect that number's maximum for optimal performance. Must be a power of 2.
+	static const size_t IMPLICIT_INITIAL_INDEX_SIZE = 32;
+	
+	// The initial size of the hash table mapping thread IDs to implicit producers.
+	// Note that the hash is resized every time it becomes half full.
+	// Must be a power of two, and either 0 or at least 1. If 0, implicit production
+	// (using the enqueue methods without an explicit producer token) is disabled.
+	static const size_t INITIAL_IMPLICIT_PRODUCER_HASH_SIZE = 32;
+	
+	// Controls the number of items that an explicit consumer (i.e. one with a token)
+	// must consume before it causes all consumers to rotate and move on to the next
+	// internal queue.
+	static const std::uint32_t EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE = 256;
+	
+	// The maximum number of elements (inclusive) that can be enqueued to a sub-queue.
+	// Enqueue operations that would cause this limit to be surpassed will fail. Note
+	// that this limit is enforced at the block level (for performance reasons), i.e.
+	// it's rounded up to the nearest block size.
+	static const size_t MAX_SUBQUEUE_SIZE = details::const_numeric_max<size_t>::value;
+
+	// The number of times to spin before sleeping when waiting on a semaphore.
+	// Recommended values are on the order of 1000-10000 unless the number of
+	// consumer threads exceeds the number of idle cores (in which case try 0-100).
+	// Only affects instances of the BlockingConcurrentQueue.
+	static const int MAX_SEMA_SPINS = 10000;
+
+	// Whether to recycle dynamically-allocated blocks into an internal free list or
+	// not. If false, only pre-allocated blocks (controlled by the constructor
+	// arguments) will be recycled, and all others will be `free`d back to the heap.
+	// Note that blocks consumed by explicit producers are only freed on destruction
+	// of the queue (not following destruction of the token) regardless of this trait.
+	static const bool RECYCLE_ALLOCATED_BLOCKS = false;
+
+	
+#ifndef MCDBGQ_USE_RELACY
+	// Memory allocation can be customized if needed.
+	// malloc should return nullptr on failure, and handle alignment like std::malloc.
+#if defined(malloc) || defined(free)
+	// Gah, this is 2015, stop defining macros that break standard code already!
+	// Work around malloc/free being special macros:
+	static inline void* WORKAROUND_malloc(size_t size) { return malloc(size); }
+	static inline void WORKAROUND_free(void* ptr) { return free(ptr); }
+	static inline void* (malloc)(size_t size) { return WORKAROUND_malloc(size); }
+	static inline void (free)(void* ptr) { return WORKAROUND_free(ptr); }
+#else
+	static inline void* malloc(size_t size) { return std::malloc(size); }
+	static inline void free(void* ptr) { return std::free(ptr); }
+#endif
+#else
+	// Debug versions when running under the Relacy race detector (ignore
+	// these in user code)
+	static inline void* malloc(size_t size) { return rl::rl_malloc(size, $); }
+	static inline void free(void* ptr) { return rl::rl_free(ptr, $); }
+#endif
+};
+
+
+// When producing or consuming many elements, the most efficient way is to:
+//    1) Use one of the bulk-operation methods of the queue with a token
+//    2) Failing that, use the bulk-operation methods without a token
+//    3) Failing that, create a token and use that with the single-item methods
+//    4) Failing that, use the single-parameter methods of the queue
+// Having said that, don't create tokens willy-nilly -- ideally there should be
+// a maximum of one token per thread (of each kind).
+struct ProducerToken;
+struct ConsumerToken;
+
+template<typename T, typename Traits> class ConcurrentQueue;
+template<typename T, typename Traits> class BlockingConcurrentQueue;
+class ConcurrentQueueTests;
+
+
+namespace details
+{
+	struct ConcurrentQueueProducerTypelessBase
+	{
+		ConcurrentQueueProducerTypelessBase* next;
+		std::atomic<bool> inactive;
+		ProducerToken* token;
+		
+		ConcurrentQueueProducerTypelessBase()
+			: next(nullptr), inactive(false), token(nullptr)
+		{
+		}
+	};
+	
+	template<bool use32> struct _hash_32_or_64 {
+		static inline std::uint32_t hash(std::uint32_t h)
+		{
+			// MurmurHash3 finalizer -- see https://code.google.com/p/smhasher/source/browse/trunk/MurmurHash3.cpp
+			// Since the thread ID is already unique, all we really want to do is propagate that
+			// uniqueness evenly across all the bits, so that we can use a subset of the bits while
+			// reducing collisions significantly
+			h ^= h >> 16;
+			h *= 0x85ebca6b;
+			h ^= h >> 13;
+			h *= 0xc2b2ae35;
+			return h ^ (h >> 16);
+		}
+	};
+	template<> struct _hash_32_or_64<1> {
+		static inline std::uint64_t hash(std::uint64_t h)
+		{
+			h ^= h >> 33;
+			h *= 0xff51afd7ed558ccd;
+			h ^= h >> 33;
+			h *= 0xc4ceb9fe1a85ec53;
+			return h ^ (h >> 33);
+		}
+	};
+	template<std::size_t size> struct hash_32_or_64 : public _hash_32_or_64<(size > 4)> {  };
+	
+	static inline size_t hash_thread_id(thread_id_t id)
+	{
+		static_assert(sizeof(thread_id_t) <= 8, "Expected a platform where thread IDs are at most 64-bit values");
+		return static_cast<size_t>(hash_32_or_64<sizeof(thread_id_converter<thread_id_t>::thread_id_hash_t)>::hash(
+			thread_id_converter<thread_id_t>::prehash(id)));
+	}
+	
+	template<typename T>
+	static inline bool circular_less_than(T a, T b)
+	{
+		static_assert(std::is_integral<T>::value && !std::numeric_limits<T>::is_signed, "circular_less_than is intended to be used only with unsigned integer types");
+		return static_cast<T>(a - b) > static_cast<T>(static_cast<T>(1) << (static_cast<T>(sizeof(T) * CHAR_BIT - 1)));
+		// Note: extra parens around rhs of operator<< is MSVC bug: https://developercommunity2.visualstudio.com/t/C4554-triggers-when-both-lhs-and-rhs-is/10034931
+		//       silencing the bug requires #pragma warning(disable: 4554) around the calling code and has no effect when done here.
+	}
+	
+	template<typename U>
+	static inline char* align_for(char* ptr)
+	{
+		const std::size_t alignment = std::alignment_of<U>::value;
+		return ptr + (alignment - (reinterpret_cast<std::uintptr_t>(ptr) % alignment)) % alignment;
+	}
+
+	template<typename T>
+	static inline T ceil_to_pow_2(T x)
+	{
+		static_assert(std::is_integral<T>::value && !std::numeric_limits<T>::is_signed, "ceil_to_pow_2 is intended to be used only with unsigned integer types");
+
+		// Adapted from http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2
+		--x;
+		x |= x >> 1;
+		x |= x >> 2;
+		x |= x >> 4;
+		for (std::size_t i = 1; i < sizeof(T); i <<= 1) {
+			x |= x >> (i << 3);
+		}
+		++x;
+		return x;
+	}
+	
+	template<typename T>
+	static inline void swap_relaxed(std::atomic<T>& left, std::atomic<T>& right)
+	{
+		T temp = left.load(std::memory_order_relaxed);
+		left.store(right.load(std::memory_order_relaxed), std::memory_order_relaxed);
+		right.store(temp, std::memory_order_relaxed);
+	}
+	
+	template<typename T>
+	static inline T const& nomove(T const& x)
+	{
+		return x;
+	}
+	
+	template<bool Enable>
+	struct nomove_if
+	{
+		template<typename T>
+		static inline T const& eval(T const& x)
+		{
+			return x;
+		}
+	};
+	
+	template<>
+	struct nomove_if<false>
+	{
+		template<typename U>
+		static inline auto eval(U&& x)
+			-> decltype(std::forward<U>(x))
+		{
+			return std::forward<U>(x);
+		}
+	};
+	
+	template<typename It>
+	static inline auto deref_noexcept(It& it) MOODYCAMEL_NOEXCEPT -> decltype(*it)
+	{
+		return *it;
+	}
+	
+#if defined(__clang__) || !defined(__GNUC__) || __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
+	template<typename T> struct is_trivially_destructible : std::is_trivially_destructible<T> { };
+#else
+	template<typename T> struct is_trivially_destructible : std::has_trivial_destructor<T> { };
+#endif
+	
+#ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED
+#ifdef MCDBGQ_USE_RELACY
+	typedef RelacyThreadExitListener ThreadExitListener;
+	typedef RelacyThreadExitNotifier ThreadExitNotifier;
+#else
+	class ThreadExitNotifier;
+
+	struct ThreadExitListener
+	{
+		typedef void (*callback_t)(void*);
+		callback_t callback;
+		void* userData;
+		
+		ThreadExitListener* next;		// reserved for use by the ThreadExitNotifier
+		ThreadExitNotifier* chain;		// reserved for use by the ThreadExitNotifier
+	};
+
+	class ThreadExitNotifier
+	{
+	public:
+		static void subscribe(ThreadExitListener* listener)
+		{
+			auto& tlsInst = instance();
+			std::lock_guard<std::mutex> guard(mutex());
+			listener->next = tlsInst.tail;
+			listener->chain = &tlsInst;
+			tlsInst.tail = listener;
+		}
+		
+		static void unsubscribe(ThreadExitListener* listener)
+		{
+			std::lock_guard<std::mutex> guard(mutex());
+			if (!listener->chain) {
+				return;  // race with ~ThreadExitNotifier
+			}
+			auto& tlsInst = *listener->chain;
+			listener->chain = nullptr;
+			ThreadExitListener** prev = &tlsInst.tail;
+			for (auto ptr = tlsInst.tail; ptr != nullptr; ptr = ptr->next) {
+				if (ptr == listener) {
+					*prev = ptr->next;
+					break;
+				}
+				prev = &ptr->next;
+			}
+		}
+		
+	private:
+		ThreadExitNotifier() : tail(nullptr) { }
+		ThreadExitNotifier(ThreadExitNotifier const&) MOODYCAMEL_DELETE_FUNCTION;
+		ThreadExitNotifier& operator=(ThreadExitNotifier const&) MOODYCAMEL_DELETE_FUNCTION;
+		
+		~ThreadExitNotifier()
+		{
+			// This thread is about to exit, let everyone know!
+			assert(this == &instance() && "If this assert fails, you likely have a buggy compiler! Change the preprocessor conditions such that MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED is no longer defined.");
+			std::lock_guard<std::mutex> guard(mutex());
+			for (auto ptr = tail; ptr != nullptr; ptr = ptr->next) {
+				ptr->chain = nullptr;
+				ptr->callback(ptr->userData);
+			}
+		}
+		
+		// Thread-local
+		static inline ThreadExitNotifier& instance()
+		{
+			static thread_local ThreadExitNotifier notifier;
+			return notifier;
+		}
+
+		static inline std::mutex& mutex()
+		{
+			// Must be static because the ThreadExitNotifier could be destroyed while unsubscribe is called
+			static std::mutex mutex;
+			return mutex;
+		}
+		
+	private:
+		ThreadExitListener* tail;
+	};
+#endif
+#endif
+	
+	template<typename T> struct static_is_lock_free_num { enum { value = 0 }; };
+	template<> struct static_is_lock_free_num<signed char> { enum { value = ATOMIC_CHAR_LOCK_FREE }; };
+	template<> struct static_is_lock_free_num<short> { enum { value = ATOMIC_SHORT_LOCK_FREE }; };
+	template<> struct static_is_lock_free_num<int> { enum { value = ATOMIC_INT_LOCK_FREE }; };
+	template<> struct static_is_lock_free_num<long> { enum { value = ATOMIC_LONG_LOCK_FREE }; };
+	template<> struct static_is_lock_free_num<long long> { enum { value = ATOMIC_LLONG_LOCK_FREE }; };
+	template<typename T> struct static_is_lock_free : static_is_lock_free_num<typename std::make_signed<T>::type> {  };
+	template<> struct static_is_lock_free<bool> { enum { value = ATOMIC_BOOL_LOCK_FREE }; };
+	template<typename U> struct static_is_lock_free<U*> { enum { value = ATOMIC_POINTER_LOCK_FREE }; };
+}
+
+
+struct ProducerToken
+{
+	template<typename T, typename Traits>
+	explicit ProducerToken(ConcurrentQueue<T, Traits>& queue);
+	
+	template<typename T, typename Traits>
+	explicit ProducerToken(BlockingConcurrentQueue<T, Traits>& queue);
+	
+	ProducerToken(ProducerToken&& other) MOODYCAMEL_NOEXCEPT
+		: producer(other.producer)
+	{
+		other.producer = nullptr;
+		if (producer != nullptr) {
+			producer->token = this;
+		}
+	}
+	
+	inline ProducerToken& operator=(ProducerToken&& other) MOODYCAMEL_NOEXCEPT
+	{
+		swap(other);
+		return *this;
+	}
+	
+	void swap(ProducerToken& other) MOODYCAMEL_NOEXCEPT
+	{
+		std::swap(producer, other.producer);
+		if (producer != nullptr) {
+			producer->token = this;
+		}
+		if (other.producer != nullptr) {
+			other.producer->token = &other;
+		}
+	}
+	
+	// A token is always valid unless:
+	//     1) Memory allocation failed during construction
+	//     2) It was moved via the move constructor
+	//        (Note: assignment does a swap, leaving both potentially valid)
+	//     3) The associated queue was destroyed
+	// Note that if valid() returns true, that only indicates
+	// that the token is valid for use with a specific queue,
+	// but not which one; that's up to the user to track.
+	inline bool valid() const { return producer != nullptr; }
+	
+	~ProducerToken()
+	{
+		if (producer != nullptr) {
+			producer->token = nullptr;
+			producer->inactive.store(true, std::memory_order_release);
+		}
+	}
+	
+	// Disable copying and assignment
+	ProducerToken(ProducerToken const&) MOODYCAMEL_DELETE_FUNCTION;
+	ProducerToken& operator=(ProducerToken const&) MOODYCAMEL_DELETE_FUNCTION;
+	
+private:
+	template<typename T, typename Traits> friend class ConcurrentQueue;
+	friend class ConcurrentQueueTests;
+	
+protected:
+	details::ConcurrentQueueProducerTypelessBase* producer;
+};
+
+
+struct ConsumerToken
+{
+	template<typename T, typename Traits>
+	explicit ConsumerToken(ConcurrentQueue<T, Traits>& q);
+	
+	template<typename T, typename Traits>
+	explicit ConsumerToken(BlockingConcurrentQueue<T, Traits>& q);
+	
+	ConsumerToken(ConsumerToken&& other) MOODYCAMEL_NOEXCEPT
+		: initialOffset(other.initialOffset), lastKnownGlobalOffset(other.lastKnownGlobalOffset), itemsConsumedFromCurrent(other.itemsConsumedFromCurrent), currentProducer(other.currentProducer), desiredProducer(other.desiredProducer)
+	{
+	}
+	
+	inline ConsumerToken& operator=(ConsumerToken&& other) MOODYCAMEL_NOEXCEPT
+	{
+		swap(other);
+		return *this;
+	}
+	
+	void swap(ConsumerToken& other) MOODYCAMEL_NOEXCEPT
+	{
+		std::swap(initialOffset, other.initialOffset);
+		std::swap(lastKnownGlobalOffset, other.lastKnownGlobalOffset);
+		std::swap(itemsConsumedFromCurrent, other.itemsConsumedFromCurrent);
+		std::swap(currentProducer, other.currentProducer);
+		std::swap(desiredProducer, other.desiredProducer);
+	}
+	
+	// Disable copying and assignment
+	ConsumerToken(ConsumerToken const&) MOODYCAMEL_DELETE_FUNCTION;
+	ConsumerToken& operator=(ConsumerToken const&) MOODYCAMEL_DELETE_FUNCTION;
+
+private:
+	template<typename T, typename Traits> friend class ConcurrentQueue;
+	friend class ConcurrentQueueTests;
+	
+private: // but shared with ConcurrentQueue
+	std::uint32_t initialOffset;
+	std::uint32_t lastKnownGlobalOffset;
+	std::uint32_t itemsConsumedFromCurrent;
+	details::ConcurrentQueueProducerTypelessBase* currentProducer;
+	details::ConcurrentQueueProducerTypelessBase* desiredProducer;
+};
+
+// Need to forward-declare this swap because it's in a namespace.
+// See http://stackoverflow.com/questions/4492062/why-does-a-c-friend-class-need-a-forward-declaration-only-in-other-namespaces
+template<typename T, typename Traits>
+inline void swap(typename ConcurrentQueue<T, Traits>::ImplicitProducerKVP& a, typename ConcurrentQueue<T, Traits>::ImplicitProducerKVP& b) MOODYCAMEL_NOEXCEPT;
+
+
+template<typename T, typename Traits = ConcurrentQueueDefaultTraits>
+class ConcurrentQueue
+{
+public:
+	typedef ::moodycamel::ProducerToken producer_token_t;
+	typedef ::moodycamel::ConsumerToken consumer_token_t;
+	
+	typedef typename Traits::index_t index_t;
+	typedef typename Traits::size_t size_t;
+	
+	static const size_t BLOCK_SIZE = static_cast<size_t>(Traits::BLOCK_SIZE);
+	static const size_t EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD = static_cast<size_t>(Traits::EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD);
+	static const size_t EXPLICIT_INITIAL_INDEX_SIZE = static_cast<size_t>(Traits::EXPLICIT_INITIAL_INDEX_SIZE);
+	static const size_t IMPLICIT_INITIAL_INDEX_SIZE = static_cast<size_t>(Traits::IMPLICIT_INITIAL_INDEX_SIZE);
+	static const size_t INITIAL_IMPLICIT_PRODUCER_HASH_SIZE = static_cast<size_t>(Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE);
+	static const std::uint32_t EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE = static_cast<std::uint32_t>(Traits::EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE);
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable: 4307)		// + integral constant overflow (that's what the ternary expression is for!)
+#pragma warning(disable: 4309)		// static_cast: Truncation of constant value
+#endif
+	static const size_t MAX_SUBQUEUE_SIZE = (details::const_numeric_max<size_t>::value - static_cast<size_t>(Traits::MAX_SUBQUEUE_SIZE) < BLOCK_SIZE) ? details::const_numeric_max<size_t>::value : ((static_cast<size_t>(Traits::MAX_SUBQUEUE_SIZE) + (BLOCK_SIZE - 1)) / BLOCK_SIZE * BLOCK_SIZE);
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif
+
+	static_assert(!std::numeric_limits<size_t>::is_signed && std::is_integral<size_t>::value, "Traits::size_t must be an unsigned integral type");
+	static_assert(!std::numeric_limits<index_t>::is_signed && std::is_integral<index_t>::value, "Traits::index_t must be an unsigned integral type");
+	static_assert(sizeof(index_t) >= sizeof(size_t), "Traits::index_t must be at least as wide as Traits::size_t");
+	static_assert((BLOCK_SIZE > 1) && !(BLOCK_SIZE & (BLOCK_SIZE - 1)), "Traits::BLOCK_SIZE must be a power of 2 (and at least 2)");
+	static_assert((EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD > 1) && !(EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD & (EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD - 1)), "Traits::EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD must be a power of 2 (and greater than 1)");
+	static_assert((EXPLICIT_INITIAL_INDEX_SIZE > 1) && !(EXPLICIT_INITIAL_INDEX_SIZE & (EXPLICIT_INITIAL_INDEX_SIZE - 1)), "Traits::EXPLICIT_INITIAL_INDEX_SIZE must be a power of 2 (and greater than 1)");
+	static_assert((IMPLICIT_INITIAL_INDEX_SIZE > 1) && !(IMPLICIT_INITIAL_INDEX_SIZE & (IMPLICIT_INITIAL_INDEX_SIZE - 1)), "Traits::IMPLICIT_INITIAL_INDEX_SIZE must be a power of 2 (and greater than 1)");
+	static_assert((INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) || !(INITIAL_IMPLICIT_PRODUCER_HASH_SIZE & (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE - 1)), "Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE must be a power of 2");
+	static_assert(INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0 || INITIAL_IMPLICIT_PRODUCER_HASH_SIZE >= 1, "Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE must be at least 1 (or 0 to disable implicit enqueueing)");
+
+public:
+	// Creates a queue with at least `capacity` element slots; note that the
+	// actual number of elements that can be inserted without additional memory
+	// allocation depends on the number of producers and the block size (e.g. if
+	// the block size is equal to `capacity`, only a single block will be allocated
+	// up-front, which means only a single producer will be able to enqueue elements
+	// without an extra allocation -- blocks aren't shared between producers).
+	// This method is not thread safe -- it is up to the user to ensure that the
+	// queue is fully constructed before it starts being used by other threads (this
+	// includes making the memory effects of construction visible, possibly with a
+	// memory barrier).
+	explicit ConcurrentQueue(size_t capacity = 32 * BLOCK_SIZE)
+		: producerListTail(nullptr),
+		producerCount(0),
+		initialBlockPoolIndex(0),
+		nextExplicitConsumerId(0),
+		globalExplicitConsumerOffset(0)
+	{
+		implicitProducerHashResizeInProgress.clear(std::memory_order_relaxed);
+		populate_initial_implicit_producer_hash();
+		populate_initial_block_list(capacity / BLOCK_SIZE + ((capacity & (BLOCK_SIZE - 1)) == 0 ? 0 : 1));
+		
+#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG
+		// Track all the producers using a fully-resolved typed list for
+		// each kind; this makes it possible to debug them starting from
+		// the root queue object (otherwise wacky casts are needed that
+		// don't compile in the debugger's expression evaluator).
+		explicitProducers.store(nullptr, std::memory_order_relaxed);
+		implicitProducers.store(nullptr, std::memory_order_relaxed);
+#endif
+	}
+	
+	// Computes the correct amount of pre-allocated blocks for you based
+	// on the minimum number of elements you want available at any given
+	// time, and the maximum concurrent number of each type of producer.
+	ConcurrentQueue(size_t minCapacity, size_t maxExplicitProducers, size_t maxImplicitProducers)
+		: producerListTail(nullptr),
+		producerCount(0),
+		initialBlockPoolIndex(0),
+		nextExplicitConsumerId(0),
+		globalExplicitConsumerOffset(0)
+	{
+		implicitProducerHashResizeInProgress.clear(std::memory_order_relaxed);
+		populate_initial_implicit_producer_hash();
+		size_t blocks = (((minCapacity + BLOCK_SIZE - 1) / BLOCK_SIZE) - 1) * (maxExplicitProducers + 1) + 2 * (maxExplicitProducers + maxImplicitProducers);
+		populate_initial_block_list(blocks);
+		
+#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG
+		explicitProducers.store(nullptr, std::memory_order_relaxed);
+		implicitProducers.store(nullptr, std::memory_order_relaxed);
+#endif
+	}
+	
+	// Note: The queue should not be accessed concurrently while it's
+	// being deleted. It's up to the user to synchronize this.
+	// This method is not thread safe.
+	~ConcurrentQueue()
+	{
+		// Destroy producers
+		auto ptr = producerListTail.load(std::memory_order_relaxed);
+		while (ptr != nullptr) {
+			auto next = ptr->next_prod();
+			if (ptr->token != nullptr) {
+				ptr->token->producer = nullptr;
+			}
+			destroy(ptr);
+			ptr = next;
+		}
+		
+		// Destroy implicit producer hash tables
+		MOODYCAMEL_CONSTEXPR_IF (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE != 0) {
+			auto hash = implicitProducerHash.load(std::memory_order_relaxed);
+			while (hash != nullptr) {
+				auto prev = hash->prev;
+				if (prev != nullptr) {		// The last hash is part of this object and was not allocated dynamically
+					for (size_t i = 0; i != hash->capacity; ++i) {
+						hash->entries[i].~ImplicitProducerKVP();
+					}
+					hash->~ImplicitProducerHash();
+					(Traits::free)(hash);
+				}
+				hash = prev;
+			}
+		}
+		
+		// Destroy global free list
+		auto block = freeList.head_unsafe();
+		while (block != nullptr) {
+			auto next = block->freeListNext.load(std::memory_order_relaxed);
+			if (block->dynamicallyAllocated) {
+				destroy(block);
+			}
+			block = next;
+		}
+		
+		// Destroy initial free list
+		destroy_array(initialBlockPool, initialBlockPoolSize);
+	}
+
+	// Disable copying and copy assignment
+	ConcurrentQueue(ConcurrentQueue const&) MOODYCAMEL_DELETE_FUNCTION;
+	ConcurrentQueue& operator=(ConcurrentQueue const&) MOODYCAMEL_DELETE_FUNCTION;
+	
+	// Moving is supported, but note that it is *not* a thread-safe operation.
+	// Nobody can use the queue while it's being moved, and the memory effects
+	// of that move must be propagated to other threads before they can use it.
+	// Note: When a queue is moved, its tokens are still valid but can only be
+	// used with the destination queue (i.e. semantically they are moved along
+	// with the queue itself).
+	ConcurrentQueue(ConcurrentQueue&& other) MOODYCAMEL_NOEXCEPT
+		: producerListTail(other.producerListTail.load(std::memory_order_relaxed)),
+		producerCount(other.producerCount.load(std::memory_order_relaxed)),
+		initialBlockPoolIndex(other.initialBlockPoolIndex.load(std::memory_order_relaxed)),
+		initialBlockPool(other.initialBlockPool),
+		initialBlockPoolSize(other.initialBlockPoolSize),
+		freeList(std::move(other.freeList)),
+		nextExplicitConsumerId(other.nextExplicitConsumerId.load(std::memory_order_relaxed)),
+		globalExplicitConsumerOffset(other.globalExplicitConsumerOffset.load(std::memory_order_relaxed))
+	{
+		// Move the other one into this, and leave the other one as an empty queue
+		implicitProducerHashResizeInProgress.clear(std::memory_order_relaxed);
+		populate_initial_implicit_producer_hash();
+		swap_implicit_producer_hashes(other);
+		
+		other.producerListTail.store(nullptr, std::memory_order_relaxed);
+		other.producerCount.store(0, std::memory_order_relaxed);
+		other.nextExplicitConsumerId.store(0, std::memory_order_relaxed);
+		other.globalExplicitConsumerOffset.store(0, std::memory_order_relaxed);
+		
+#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG
+		explicitProducers.store(other.explicitProducers.load(std::memory_order_relaxed), std::memory_order_relaxed);
+		other.explicitProducers.store(nullptr, std::memory_order_relaxed);
+		implicitProducers.store(other.implicitProducers.load(std::memory_order_relaxed), std::memory_order_relaxed);
+		other.implicitProducers.store(nullptr, std::memory_order_relaxed);
+#endif
+		
+		other.initialBlockPoolIndex.store(0, std::memory_order_relaxed);
+		other.initialBlockPoolSize = 0;
+		other.initialBlockPool = nullptr;
+		
+		reown_producers();
+	}
+	
+	inline ConcurrentQueue& operator=(ConcurrentQueue&& other) MOODYCAMEL_NOEXCEPT
+	{
+		return swap_internal(other);
+	}
+	
+	// Swaps this queue's state with the other's. Not thread-safe.
+	// Swapping two queues does not invalidate their tokens, however
+	// the tokens that were created for one queue must be used with
+	// only the swapped queue (i.e. the tokens are tied to the
+	// queue's movable state, not the object itself).
+	inline void swap(ConcurrentQueue& other) MOODYCAMEL_NOEXCEPT
+	{
+		swap_internal(other);
+	}
+	
+private:
+	ConcurrentQueue& swap_internal(ConcurrentQueue& other)
+	{
+		if (this == &other) {
+			return *this;
+		}
+		
+		details::swap_relaxed(producerListTail, other.producerListTail);
+		details::swap_relaxed(producerCount, other.producerCount);
+		details::swap_relaxed(initialBlockPoolIndex, other.initialBlockPoolIndex);
+		std::swap(initialBlockPool, other.initialBlockPool);
+		std::swap(initialBlockPoolSize, other.initialBlockPoolSize);
+		freeList.swap(other.freeList);
+		details::swap_relaxed(nextExplicitConsumerId, other.nextExplicitConsumerId);
+		details::swap_relaxed(globalExplicitConsumerOffset, other.globalExplicitConsumerOffset);
+		
+		swap_implicit_producer_hashes(other);
+		
+		reown_producers();
+		other.reown_producers();
+		
+#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG
+		details::swap_relaxed(explicitProducers, other.explicitProducers);
+		details::swap_relaxed(implicitProducers, other.implicitProducers);
+#endif
+		
+		return *this;
+	}
+	
+public:
+	// Enqueues a single item (by copying it).
+	// Allocates memory if required. Only fails if memory allocation fails (or implicit
+	// production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0,
+	// or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed).
+	// Thread-safe.
+	inline bool enqueue(T const& item)
+	{
+		MOODYCAMEL_CONSTEXPR_IF (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) return false;
+		else return inner_enqueue<CanAlloc>(item);
+	}
+	
+	// Enqueues a single item (by moving it, if possible).
+	// Allocates memory if required. Only fails if memory allocation fails (or implicit
+	// production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0,
+	// or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed).
+	// Thread-safe.
+	inline bool enqueue(T&& item)
+	{
+		MOODYCAMEL_CONSTEXPR_IF (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) return false;
+		else return inner_enqueue<CanAlloc>(std::move(item));
+	}
+	
+	// Enqueues a single item (by copying it) using an explicit producer token.
+	// Allocates memory if required. Only fails if memory allocation fails (or
+	// Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed).
+	// Thread-safe.
+	inline bool enqueue(producer_token_t const& token, T const& item)
+	{
+		return inner_enqueue<CanAlloc>(token, item);
+	}
+	
+	// Enqueues a single item (by moving it, if possible) using an explicit producer token.
+	// Allocates memory if required. Only fails if memory allocation fails (or
+	// Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed).
+	// Thread-safe.
+	inline bool enqueue(producer_token_t const& token, T&& item)
+	{
+		return inner_enqueue<CanAlloc>(token, std::move(item));
+	}
+	
+	// Enqueues several items.
+	// Allocates memory if required. Only fails if memory allocation fails (or
+	// implicit production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE
+	// is 0, or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed).
+	// Note: Use std::make_move_iterator if the elements should be moved instead of copied.
+	// Thread-safe.
+	template<typename It>
+	bool enqueue_bulk(It itemFirst, size_t count)
+	{
+		MOODYCAMEL_CONSTEXPR_IF (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) return false;
+		else return inner_enqueue_bulk<CanAlloc>(itemFirst, count);
+	}
+	
+	// Enqueues several items using an explicit producer token.
+	// Allocates memory if required. Only fails if memory allocation fails
+	// (or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed).
+	// Note: Use std::make_move_iterator if the elements should be moved
+	// instead of copied.
+	// Thread-safe.
+	template<typename It>
+	bool enqueue_bulk(producer_token_t const& token, It itemFirst, size_t count)
+	{
+		return inner_enqueue_bulk<CanAlloc>(token, itemFirst, count);
+	}
+	
+	// Enqueues a single item (by copying it).
+	// Does not allocate memory. Fails if not enough room to enqueue (or implicit
+	// production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE
+	// is 0).
+	// Thread-safe.
+	inline bool try_enqueue(T const& item)
+	{
+		MOODYCAMEL_CONSTEXPR_IF (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) return false;
+		else return inner_enqueue<CannotAlloc>(item);
+	}
+	
+	// Enqueues a single item (by moving it, if possible).
+	// Does not allocate memory (except for one-time implicit producer).
+	// Fails if not enough room to enqueue (or implicit production is
+	// disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0).
+	// Thread-safe.
+	inline bool try_enqueue(T&& item)
+	{
+		MOODYCAMEL_CONSTEXPR_IF (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) return false;
+		else return inner_enqueue<CannotAlloc>(std::move(item));
+	}
+	
+	// Enqueues a single item (by copying it) using an explicit producer token.
+	// Does not allocate memory. Fails if not enough room to enqueue.
+	// Thread-safe.
+	inline bool try_enqueue(producer_token_t const& token, T const& item)
+	{
+		return inner_enqueue<CannotAlloc>(token, item);
+	}
+	
+	// Enqueues a single item (by moving it, if possible) using an explicit producer token.
+	// Does not allocate memory. Fails if not enough room to enqueue.
+	// Thread-safe.
+	inline bool try_enqueue(producer_token_t const& token, T&& item)
+	{
+		return inner_enqueue<CannotAlloc>(token, std::move(item));
+	}
+	
+	// Enqueues several items.
+	// Does not allocate memory (except for one-time implicit producer).
+	// Fails if not enough room to enqueue (or implicit production is
+	// disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0).
+	// Note: Use std::make_move_iterator if the elements should be moved
+	// instead of copied.
+	// Thread-safe.
+	template<typename It>
+	bool try_enqueue_bulk(It itemFirst, size_t count)
+	{
+		MOODYCAMEL_CONSTEXPR_IF (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) return false;
+		else return inner_enqueue_bulk<CannotAlloc>(itemFirst, count);
+	}
+	
+	// Enqueues several items using an explicit producer token.
+	// Does not allocate memory. Fails if not enough room to enqueue.
+	// Note: Use std::make_move_iterator if the elements should be moved
+	// instead of copied.
+	// Thread-safe.
+	template<typename It>
+	bool try_enqueue_bulk(producer_token_t const& token, It itemFirst, size_t count)
+	{
+		return inner_enqueue_bulk<CannotAlloc>(token, itemFirst, count);
+	}
+	
+	
+	
+	// Attempts to dequeue from the queue.
+	// Returns false if all producer streams appeared empty at the time they
+	// were checked (so, the queue is likely but not guaranteed to be empty).
+	// Never allocates. Thread-safe.
+	template<typename U>
+	bool try_dequeue(U& item)
+	{
+		// Instead of simply trying each producer in turn (which could cause needless contention on the first
+		// producer), we score them heuristically.
+		size_t nonEmptyCount = 0;
+		ProducerBase* best = nullptr;
+		size_t bestSize = 0;
+		for (auto ptr = producerListTail.load(std::memory_order_acquire); nonEmptyCount < 3 && ptr != nullptr; ptr = ptr->next_prod()) {
+			auto size = ptr->size_approx();
+			if (size > 0) {
+				if (size > bestSize) {
+					bestSize = size;
+					best = ptr;
+				}
+				++nonEmptyCount;
+			}
+		}
+		
+		// If there was at least one non-empty queue but it appears empty at the time
+		// we try to dequeue from it, we need to make sure every queue's been tried
+		if (nonEmptyCount > 0) {
+			if ((details::likely)(best->dequeue(item))) {
+				return true;
+			}
+			for (auto ptr = producerListTail.load(std::memory_order_acquire); ptr != nullptr; ptr = ptr->next_prod()) {
+				if (ptr != best && ptr->dequeue(item)) {
+					return true;
+				}
+			}
+		}
+		return false;
+	}
+	
+	// Attempts to dequeue from the queue.
+	// Returns false if all producer streams appeared empty at the time they
+	// were checked (so, the queue is likely but not guaranteed to be empty).
+	// This differs from the try_dequeue(item) method in that this one does
+	// not attempt to reduce contention by interleaving the order that producer
+	// streams are dequeued from. So, using this method can reduce overall throughput
+	// under contention, but will give more predictable results in single-threaded
+	// consumer scenarios. This is mostly only useful for internal unit tests.
+	// Never allocates. Thread-safe.
+	template<typename U>
+	bool try_dequeue_non_interleaved(U& item)
+	{
+		for (auto ptr = producerListTail.load(std::memory_order_acquire); ptr != nullptr; ptr = ptr->next_prod()) {
+			if (ptr->dequeue(item)) {
+				return true;
+			}
+		}
+		return false;
+	}
+	
+	// Attempts to dequeue from the queue using an explicit consumer token.
+	// Returns false if all producer streams appeared empty at the time they
+	// were checked (so, the queue is likely but not guaranteed to be empty).
+	// Never allocates. Thread-safe.
+	template<typename U>
+	bool try_dequeue(consumer_token_t& token, U& item)
+	{
+		// The idea is roughly as follows:
+		// Every 256 items from one producer, make everyone rotate (increase the global offset) -> this means the highest efficiency consumer dictates the rotation speed of everyone else, more or less
+		// If you see that the global offset has changed, you must reset your consumption counter and move to your designated place
+		// If there's no items where you're supposed to be, keep moving until you find a producer with some items
+		// If the global offset has not changed but you've run out of items to consume, move over from your current position until you find an producer with something in it
+		
+		if (token.desiredProducer == nullptr || token.lastKnownGlobalOffset != globalExplicitConsumerOffset.load(std::memory_order_relaxed)) {
+			if (!update_current_producer_after_rotation(token)) {
+				return false;
+			}
+		}
+		
+		// If there was at least one non-empty queue but it appears empty at the time
+		// we try to dequeue from it, we need to make sure every queue's been tried
+		if (static_cast<ProducerBase*>(token.currentProducer)->dequeue(item)) {
+			if (++token.itemsConsumedFromCurrent == EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE) {
+				globalExplicitConsumerOffset.fetch_add(1, std::memory_order_relaxed);
+			}
+			return true;
+		}
+		
+		auto tail = producerListTail.load(std::memory_order_acquire);
+		auto ptr = static_cast<ProducerBase*>(token.currentProducer)->next_prod();
+		if (ptr == nullptr) {
+			ptr = tail;
+		}
+		while (ptr != static_cast<ProducerBase*>(token.currentProducer)) {
+			if (ptr->dequeue(item)) {
+				token.currentProducer = ptr;
+				token.itemsConsumedFromCurrent = 1;
+				return true;
+			}
+			ptr = ptr->next_prod();
+			if (ptr == nullptr) {
+				ptr = tail;
+			}
+		}
+		return false;
+	}
+	
+	// Attempts to dequeue several elements from the queue.
+	// Returns the number of items actually dequeued.
+	// Returns 0 if all producer streams appeared empty at the time they
+	// were checked (so, the queue is likely but not guaranteed to be empty).
+	// Never allocates. Thread-safe.
+	template<typename It>
+	size_t try_dequeue_bulk(It itemFirst, size_t max)
+	{
+		size_t count = 0;
+		for (auto ptr = producerListTail.load(std::memory_order_acquire); ptr != nullptr; ptr = ptr->next_prod()) {
+			count += ptr->dequeue_bulk(itemFirst, max - count);
+			if (count == max) {
+				break;
+			}
+		}
+		return count;
+	}
+	
+	// Attempts to dequeue several elements from the queue using an explicit consumer token.
+	// Returns the number of items actually dequeued.
+	// Returns 0 if all producer streams appeared empty at the time they
+	// were checked (so, the queue is likely but not guaranteed to be empty).
+	// Never allocates. Thread-safe.
+	template<typename It>
+	size_t try_dequeue_bulk(consumer_token_t& token, It itemFirst, size_t max)
+	{
+		if (token.desiredProducer == nullptr || token.lastKnownGlobalOffset != globalExplicitConsumerOffset.load(std::memory_order_relaxed)) {
+			if (!update_current_producer_after_rotation(token)) {
+				return 0;
+			}
+		}
+		
+		size_t count = static_cast<ProducerBase*>(token.currentProducer)->dequeue_bulk(itemFirst, max);
+		if (count == max) {
+			if ((token.itemsConsumedFromCurrent += static_cast<std::uint32_t>(max)) >= EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE) {
+				globalExplicitConsumerOffset.fetch_add(1, std::memory_order_relaxed);
+			}
+			return max;
+		}
+		token.itemsConsumedFromCurrent += static_cast<std::uint32_t>(count);
+		max -= count;
+		
+		auto tail = producerListTail.load(std::memory_order_acquire);
+		auto ptr = static_cast<ProducerBase*>(token.currentProducer)->next_prod();
+		if (ptr == nullptr) {
+			ptr = tail;
+		}
+		while (ptr != static_cast<ProducerBase*>(token.currentProducer)) {
+			auto dequeued = ptr->dequeue_bulk(itemFirst, max);
+			count += dequeued;
+			if (dequeued != 0) {
+				token.currentProducer = ptr;
+				token.itemsConsumedFromCurrent = static_cast<std::uint32_t>(dequeued);
+			}
+			if (dequeued == max) {
+				break;
+			}
+			max -= dequeued;
+			ptr = ptr->next_prod();
+			if (ptr == nullptr) {
+				ptr = tail;
+			}
+		}
+		return count;
+	}
+	
+	
+	
+	// Attempts to dequeue from a specific producer's inner queue.
+	// If you happen to know which producer you want to dequeue from, this
+	// is significantly faster than using the general-case try_dequeue methods.
+	// Returns false if the producer's queue appeared empty at the time it
+	// was checked (so, the queue is likely but not guaranteed to be empty).
+	// Never allocates. Thread-safe.
+	template<typename U>
+	inline bool try_dequeue_from_producer(producer_token_t const& producer, U& item)
+	{
+		return static_cast<ExplicitProducer*>(producer.producer)->dequeue(item);
+	}
+	
+	// Attempts to dequeue several elements from a specific producer's inner queue.
+	// Returns the number of items actually dequeued.
+	// If you happen to know which producer you want to dequeue from, this
+	// is significantly faster than using the general-case try_dequeue methods.
+	// Returns 0 if the producer's queue appeared empty at the time it
+	// was checked (so, the queue is likely but not guaranteed to be empty).
+	// Never allocates. Thread-safe.
+	template<typename It>
+	inline size_t try_dequeue_bulk_from_producer(producer_token_t const& producer, It itemFirst, size_t max)
+	{
+		return static_cast<ExplicitProducer*>(producer.producer)->dequeue_bulk(itemFirst, max);
+	}
+	
+	
+	// Returns an estimate of the total number of elements currently in the queue. This
+	// estimate is only accurate if the queue has completely stabilized before it is called
+	// (i.e. all enqueue and dequeue operations have completed and their memory effects are
+	// visible on the calling thread, and no further operations start while this method is
+	// being called).
+	// Thread-safe.
+	size_t size_approx() const
+	{
+		size_t size = 0;
+		for (auto ptr = producerListTail.load(std::memory_order_acquire); ptr != nullptr; ptr = ptr->next_prod()) {
+			size += ptr->size_approx();
+		}
+		return size;
+	}
+	
+	
+	// Returns true if the underlying atomic variables used by
+	// the queue are lock-free (they should be on most platforms).
+	// Thread-safe.
+	static constexpr bool is_lock_free()
+	{
+		return
+			details::static_is_lock_free<bool>::value == 2 &&
+			details::static_is_lock_free<size_t>::value == 2 &&
+			details::static_is_lock_free<std::uint32_t>::value == 2 &&
+			details::static_is_lock_free<index_t>::value == 2 &&
+			details::static_is_lock_free<void*>::value == 2 &&
+			details::static_is_lock_free<typename details::thread_id_converter<details::thread_id_t>::thread_id_numeric_size_t>::value == 2;
+	}
+
+
+private:
+	friend struct ProducerToken;
+	friend struct ConsumerToken;
+	struct ExplicitProducer;
+	friend struct ExplicitProducer;
+	struct ImplicitProducer;
+	friend struct ImplicitProducer;
+	friend class ConcurrentQueueTests;
+		
+	enum AllocationMode { CanAlloc, CannotAlloc };
+	
+	
+	///////////////////////////////
+	// Queue methods
+	///////////////////////////////
+	
+	template<AllocationMode canAlloc, typename U>
+	inline bool inner_enqueue(producer_token_t const& token, U&& element)
+	{
+		return static_cast<ExplicitProducer*>(token.producer)->ConcurrentQueue::ExplicitProducer::template enqueue<canAlloc>(std::forward<U>(element));
+	}
+	
+	template<AllocationMode canAlloc, typename U>
+	inline bool inner_enqueue(U&& element)
+	{
+		auto producer = get_or_add_implicit_producer();
+		return producer == nullptr ? false : producer->ConcurrentQueue::ImplicitProducer::template enqueue<canAlloc>(std::forward<U>(element));
+	}
+	
+	template<AllocationMode canAlloc, typename It>
+	inline bool inner_enqueue_bulk(producer_token_t const& token, It itemFirst, size_t count)
+	{
+		return static_cast<ExplicitProducer*>(token.producer)->ConcurrentQueue::ExplicitProducer::template enqueue_bulk<canAlloc>(itemFirst, count);
+	}
+	
+	template<AllocationMode canAlloc, typename It>
+	inline bool inner_enqueue_bulk(It itemFirst, size_t count)
+	{
+		auto producer = get_or_add_implicit_producer();
+		return producer == nullptr ? false : producer->ConcurrentQueue::ImplicitProducer::template enqueue_bulk<canAlloc>(itemFirst, count);
+	}
+	
+	inline bool update_current_producer_after_rotation(consumer_token_t& token)
+	{
+		// Ah, there's been a rotation, figure out where we should be!
+		auto tail = producerListTail.load(std::memory_order_acquire);
+		if (token.desiredProducer == nullptr && tail == nullptr) {
+			return false;
+		}
+		auto prodCount = producerCount.load(std::memory_order_relaxed);
+		auto globalOffset = globalExplicitConsumerOffset.load(std::memory_order_relaxed);
+		if ((details::unlikely)(token.desiredProducer == nullptr)) {
+			// Aha, first time we're dequeueing anything.
+			// Figure out our local position
+			// Note: offset is from start, not end, but we're traversing from end -- subtract from count first
+			std::uint32_t offset = prodCount - 1 - (token.initialOffset % prodCount);
+			token.desiredProducer = tail;
+			for (std::uint32_t i = 0; i != offset; ++i) {
+				token.desiredProducer = static_cast<ProducerBase*>(token.desiredProducer)->next_prod();
+				if (token.desiredProducer == nullptr) {
+					token.desiredProducer = tail;
+				}
+			}
+		}
+		
+		std::uint32_t delta = globalOffset - token.lastKnownGlobalOffset;
+		if (delta >= prodCount) {
+			delta = delta % prodCount;
+		}
+		for (std::uint32_t i = 0; i != delta; ++i) {
+			token.desiredProducer = static_cast<ProducerBase*>(token.desiredProducer)->next_prod();
+			if (token.desiredProducer == nullptr) {
+				token.desiredProducer = tail;
+			}
+		}
+		
+		token.lastKnownGlobalOffset = globalOffset;
+		token.currentProducer = token.desiredProducer;
+		token.itemsConsumedFromCurrent = 0;
+		return true;
+	}
+	
+	
+	///////////////////////////
+	// Free list
+	///////////////////////////
+	
+	template <typename N>
+	struct FreeListNode
+	{
+		FreeListNode() : freeListRefs(0), freeListNext(nullptr) { }
+		
+		std::atomic<std::uint32_t> freeListRefs;
+		std::atomic<N*> freeListNext;
+	};
+	
+	// A simple CAS-based lock-free free list. Not the fastest thing in the world under heavy contention, but
+	// simple and correct (assuming nodes are never freed until after the free list is destroyed), and fairly
+	// speedy under low contention.
+	template<typename N>		// N must inherit FreeListNode or have the same fields (and initialization of them)
+	struct FreeList
+	{
+		FreeList() : freeListHead(nullptr) { }
+		FreeList(FreeList&& other) : freeListHead(other.freeListHead.load(std::memory_order_relaxed)) { other.freeListHead.store(nullptr, std::memory_order_relaxed); }
+		void swap(FreeList& other) { details::swap_relaxed(freeListHead, other.freeListHead); }
+		
+		FreeList(FreeList const&) MOODYCAMEL_DELETE_FUNCTION;
+		FreeList& operator=(FreeList const&) MOODYCAMEL_DELETE_FUNCTION;
+		
+		inline void add(N* node)
+		{
+#ifdef MCDBGQ_NOLOCKFREE_FREELIST
+			debug::DebugLock lock(mutex);
+#endif		
+			// We know that the should-be-on-freelist bit is 0 at this point, so it's safe to
+			// set it using a fetch_add
+			if (node->freeListRefs.fetch_add(SHOULD_BE_ON_FREELIST, std::memory_order_acq_rel) == 0) {
+				// Oh look! We were the last ones referencing this node, and we know
+				// we want to add it to the free list, so let's do it!
+		 		add_knowing_refcount_is_zero(node);
+			}
+		}
+		
+		inline N* try_get()
+		{
+#ifdef MCDBGQ_NOLOCKFREE_FREELIST
+			debug::DebugLock lock(mutex);
+#endif		
+			auto head = freeListHead.load(std::memory_order_acquire);
+			while (head != nullptr) {
+				auto prevHead = head;
+				auto refs = head->freeListRefs.load(std::memory_order_relaxed);
+				if ((refs & REFS_MASK) == 0 || !head->freeListRefs.compare_exchange_strong(refs, refs + 1, std::memory_order_acquire)) {
+					head = freeListHead.load(std::memory_order_acquire);
+					continue;
+				}
+				
+				// Good, reference count has been incremented (it wasn't at zero), which means we can read the
+				// next and not worry about it changing between now and the time we do the CAS
+				auto next = head->freeListNext.load(std::memory_order_relaxed);
+				if (freeListHead.compare_exchange_strong(head, next, std::memory_order_acquire, std::memory_order_relaxed)) {
+					// Yay, got the node. This means it was on the list, which means shouldBeOnFreeList must be false no
+					// matter the refcount (because nobody else knows it's been taken off yet, it can't have been put back on).
+					assert((head->freeListRefs.load(std::memory_order_relaxed) & SHOULD_BE_ON_FREELIST) == 0);
+					
+					// Decrease refcount twice, once for our ref, and once for the list's ref
+					head->freeListRefs.fetch_sub(2, std::memory_order_release);
+					return head;
+				}
+				
+				// OK, the head must have changed on us, but we still need to decrease the refcount we increased.
+				// Note that we don't need to release any memory effects, but we do need to ensure that the reference
+				// count decrement happens-after the CAS on the head.
+				refs = prevHead->freeListRefs.fetch_sub(1, std::memory_order_acq_rel);
+				if (refs == SHOULD_BE_ON_FREELIST + 1) {
+					add_knowing_refcount_is_zero(prevHead);
+				}
+			}
+			
+			return nullptr;
+		}
+		
+		// Useful for traversing the list when there's no contention (e.g. to destroy remaining nodes)
+		N* head_unsafe() const { return freeListHead.load(std::memory_order_relaxed); }
+		
+	private:
+		inline void add_knowing_refcount_is_zero(N* node)
+		{
+			// Since the refcount is zero, and nobody can increase it once it's zero (except us, and we run
+			// only one copy of this method per node at a time, i.e. the single thread case), then we know
+			// we can safely change the next pointer of the node; however, once the refcount is back above
+			// zero, then other threads could increase it (happens under heavy contention, when the refcount
+			// goes to zero in between a load and a refcount increment of a node in try_get, then back up to
+			// something non-zero, then the refcount increment is done by the other thread) -- so, if the CAS
+			// to add the node to the actual list fails, decrease the refcount and leave the add operation to
+			// the next thread who puts the refcount back at zero (which could be us, hence the loop).
+			auto head = freeListHead.load(std::memory_order_relaxed);
+			while (true) {
+				node->freeListNext.store(head, std::memory_order_relaxed);
+				node->freeListRefs.store(1, std::memory_order_release);
+				if (!freeListHead.compare_exchange_strong(head, node, std::memory_order_release, std::memory_order_relaxed)) {
+					// Hmm, the add failed, but we can only try again when the refcount goes back to zero
+					if (node->freeListRefs.fetch_add(SHOULD_BE_ON_FREELIST - 1, std::memory_order_acq_rel) == 1) {
+						continue;
+					}
+				}
+				return;
+			}
+		}
+		
+	private:
+		// Implemented like a stack, but where node order doesn't matter (nodes are inserted out of order under contention)
+		std::atomic<N*> freeListHead;
+	
+	static const std::uint32_t REFS_MASK = 0x7FFFFFFF;
+	static const std::uint32_t SHOULD_BE_ON_FREELIST = 0x80000000;
+		
+#ifdef MCDBGQ_NOLOCKFREE_FREELIST
+		debug::DebugMutex mutex;
+#endif
+	};
+	
+	
+	///////////////////////////
+	// Block
+	///////////////////////////
+	
+	enum InnerQueueContext { implicit_context = 0, explicit_context = 1 };
+	
+	struct Block
+	{
+		Block()
+			: next(nullptr), elementsCompletelyDequeued(0), freeListRefs(0), freeListNext(nullptr), dynamicallyAllocated(true)
+		{
+#ifdef MCDBGQ_TRACKMEM
+			owner = nullptr;
+#endif
+		}
+		
+		template<InnerQueueContext context>
+		inline bool is_empty() const
+		{
+			MOODYCAMEL_CONSTEXPR_IF (context == explicit_context && BLOCK_SIZE <= EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD) {
+				// Check flags
+				for (size_t i = 0; i < BLOCK_SIZE; ++i) {
+					if (!emptyFlags[i].load(std::memory_order_relaxed)) {
+						return false;
+					}
+				}
+				
+				// Aha, empty; make sure we have all other memory effects that happened before the empty flags were set
+				std::atomic_thread_fence(std::memory_order_acquire);
+				return true;
+			}
+			else {
+				// Check counter
+				if (elementsCompletelyDequeued.load(std::memory_order_relaxed) == BLOCK_SIZE) {
+					std::atomic_thread_fence(std::memory_order_acquire);
+					return true;
+				}
+				assert(elementsCompletelyDequeued.load(std::memory_order_relaxed) <= BLOCK_SIZE);
+				return false;
+			}
+		}
+		
+		// Returns true if the block is now empty (does not apply in explicit context)
+		template<InnerQueueContext context>
+		inline bool set_empty(MOODYCAMEL_MAYBE_UNUSED index_t i)
+		{
+			MOODYCAMEL_CONSTEXPR_IF (context == explicit_context && BLOCK_SIZE <= EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD) {
+				// Set flag
+				assert(!emptyFlags[BLOCK_SIZE - 1 - static_cast<size_t>(i & static_cast<index_t>(BLOCK_SIZE - 1))].load(std::memory_order_relaxed));
+				emptyFlags[BLOCK_SIZE - 1 - static_cast<size_t>(i & static_cast<index_t>(BLOCK_SIZE - 1))].store(true, std::memory_order_release);
+				return false;
+			}
+			else {
+				// Increment counter
+				auto prevVal = elementsCompletelyDequeued.fetch_add(1, std::memory_order_acq_rel);
+				assert(prevVal < BLOCK_SIZE);
+				return prevVal == BLOCK_SIZE - 1;
+			}
+		}
+		
+		// Sets multiple contiguous item statuses to 'empty' (assumes no wrapping and count > 0).
+		// Returns true if the block is now empty (does not apply in explicit context).
+		template<InnerQueueContext context>
+		inline bool set_many_empty(MOODYCAMEL_MAYBE_UNUSED index_t i, size_t count)
+		{
+			MOODYCAMEL_CONSTEXPR_IF (context == explicit_context && BLOCK_SIZE <= EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD) {
+				// Set flags
+				std::atomic_thread_fence(std::memory_order_release);
+				i = BLOCK_SIZE - 1 - static_cast<size_t>(i & static_cast<index_t>(BLOCK_SIZE - 1)) - count + 1;
+				for (size_t j = 0; j != count; ++j) {
+					assert(!emptyFlags[i + j].load(std::memory_order_relaxed));
+					emptyFlags[i + j].store(true, std::memory_order_relaxed);
+				}
+				return false;
+			}
+			else {
+				// Increment counter
+				auto prevVal = elementsCompletelyDequeued.fetch_add(count, std::memory_order_acq_rel);
+				assert(prevVal + count <= BLOCK_SIZE);
+				return prevVal + count == BLOCK_SIZE;
+			}
+		}
+		
+		template<InnerQueueContext context>
+		inline void set_all_empty()
+		{
+			MOODYCAMEL_CONSTEXPR_IF (context == explicit_context && BLOCK_SIZE <= EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD) {
+				// Set all flags
+				for (size_t i = 0; i != BLOCK_SIZE; ++i) {
+					emptyFlags[i].store(true, std::memory_order_relaxed);
+				}
+			}
+			else {
+				// Reset counter
+				elementsCompletelyDequeued.store(BLOCK_SIZE, std::memory_order_relaxed);
+			}
+		}
+		
+		template<InnerQueueContext context>
+		inline void reset_empty()
+		{
+			MOODYCAMEL_CONSTEXPR_IF (context == explicit_context && BLOCK_SIZE <= EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD) {
+				// Reset flags
+				for (size_t i = 0; i != BLOCK_SIZE; ++i) {
+					emptyFlags[i].store(false, std::memory_order_relaxed);
+				}
+			}
+			else {
+				// Reset counter
+				elementsCompletelyDequeued.store(0, std::memory_order_relaxed);
+			}
+		}
+		
+		inline T* operator[](index_t idx) MOODYCAMEL_NOEXCEPT { return static_cast<T*>(static_cast<void*>(elements)) + static_cast<size_t>(idx & static_cast<index_t>(BLOCK_SIZE - 1)); }
+		inline T const* operator[](index_t idx) const MOODYCAMEL_NOEXCEPT { return static_cast<T const*>(static_cast<void const*>(elements)) + static_cast<size_t>(idx & static_cast<index_t>(BLOCK_SIZE - 1)); }
+		
+	private:
+		static_assert(std::alignment_of<T>::value <= sizeof(T), "The queue does not support types with an alignment greater than their size at this time");
+		MOODYCAMEL_ALIGNED_TYPE_LIKE(char[sizeof(T) * BLOCK_SIZE], T) elements;
+	public:
+		Block* next;
+		std::atomic<size_t> elementsCompletelyDequeued;
+		std::atomic<bool> emptyFlags[BLOCK_SIZE <= EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD ? BLOCK_SIZE : 1];
+	public:
+		std::atomic<std::uint32_t> freeListRefs;
+		std::atomic<Block*> freeListNext;
+		bool dynamicallyAllocated;		// Perhaps a better name for this would be 'isNotPartOfInitialBlockPool'
+		
+#ifdef MCDBGQ_TRACKMEM
+		void* owner;
+#endif
+	};
+	static_assert(std::alignment_of<Block>::value >= std::alignment_of<T>::value, "Internal error: Blocks must be at least as aligned as the type they are wrapping");
+
+
+#ifdef MCDBGQ_TRACKMEM
+public:
+	struct MemStats;
+private:
+#endif
+	
+	///////////////////////////
+	// Producer base
+	///////////////////////////
+	
+	struct ProducerBase : public details::ConcurrentQueueProducerTypelessBase
+	{
+		ProducerBase(ConcurrentQueue* parent_, bool isExplicit_) :
+			tailIndex(0),
+			headIndex(0),
+			dequeueOptimisticCount(0),
+			dequeueOvercommit(0),
+			tailBlock(nullptr),
+			isExplicit(isExplicit_),
+			parent(parent_)
+		{
+		}
+		
+		virtual ~ProducerBase() { }
+		
+		template<typename U>
+		inline bool dequeue(U& element)
+		{
+			if (isExplicit) {
+				return static_cast<ExplicitProducer*>(this)->dequeue(element);
+			}
+			else {
+				return static_cast<ImplicitProducer*>(this)->dequeue(element);
+			}
+		}
+		
+		template<typename It>
+		inline size_t dequeue_bulk(It& itemFirst, size_t max)
+		{
+			if (isExplicit) {
+				return static_cast<ExplicitProducer*>(this)->dequeue_bulk(itemFirst, max);
+			}
+			else {
+				return static_cast<ImplicitProducer*>(this)->dequeue_bulk(itemFirst, max);
+			}
+		}
+		
+		inline ProducerBase* next_prod() const { return static_cast<ProducerBase*>(next); }
+		
+		inline size_t size_approx() const
+		{
+			auto tail = tailIndex.load(std::memory_order_relaxed);
+			auto head = headIndex.load(std::memory_order_relaxed);
+			return details::circular_less_than(head, tail) ? static_cast<size_t>(tail - head) : 0;
+		}
+		
+		inline index_t getTail() const { return tailIndex.load(std::memory_order_relaxed); }
+	protected:
+		std::atomic<index_t> tailIndex;		// Where to enqueue to next
+		std::atomic<index_t> headIndex;		// Where to dequeue from next
+		
+		std::atomic<index_t> dequeueOptimisticCount;
+		std::atomic<index_t> dequeueOvercommit;
+		
+		Block* tailBlock;
+		
+	public:
+		bool isExplicit;
+		ConcurrentQueue* parent;
+		
+	protected:
+#ifdef MCDBGQ_TRACKMEM
+		friend struct MemStats;
+#endif
+	};
+	
+	
+	///////////////////////////
+	// Explicit queue
+	///////////////////////////
+		
+	struct ExplicitProducer : public ProducerBase
+	{
+		explicit ExplicitProducer(ConcurrentQueue* parent_) :
+			ProducerBase(parent_, true),
+			blockIndex(nullptr),
+			pr_blockIndexSlotsUsed(0),
+			pr_blockIndexSize(EXPLICIT_INITIAL_INDEX_SIZE >> 1),
+			pr_blockIndexFront(0),
+			pr_blockIndexEntries(nullptr),
+			pr_blockIndexRaw(nullptr)
+		{
+			size_t poolBasedIndexSize = details::ceil_to_pow_2(parent_->initialBlockPoolSize) >> 1;
+			if (poolBasedIndexSize > pr_blockIndexSize) {
+				pr_blockIndexSize = poolBasedIndexSize;
+			}
+			
+			new_block_index(0);		// This creates an index with double the number of current entries, i.e. EXPLICIT_INITIAL_INDEX_SIZE
+		}
+		
+		~ExplicitProducer()
+		{
+			// Destruct any elements not yet dequeued.
+			// Since we're in the destructor, we can assume all elements
+			// are either completely dequeued or completely not (no halfways).
+			if (this->tailBlock != nullptr) {		// Note this means there must be a block index too
+				// First find the block that's partially dequeued, if any
+				Block* halfDequeuedBlock = nullptr;
+				if ((this->headIndex.load(std::memory_order_relaxed) & static_cast<index_t>(BLOCK_SIZE - 1)) != 0) {
+					// The head's not on a block boundary, meaning a block somewhere is partially dequeued
+					// (or the head block is the tail block and was fully dequeued, but the head/tail are still not on a boundary)
+					size_t i = (pr_blockIndexFront - pr_blockIndexSlotsUsed) & (pr_blockIndexSize - 1);
+					while (details::circular_less_than<index_t>(pr_blockIndexEntries[i].base + BLOCK_SIZE, this->headIndex.load(std::memory_order_relaxed))) {
+						i = (i + 1) & (pr_blockIndexSize - 1);
+					}
+					assert(details::circular_less_than<index_t>(pr_blockIndexEntries[i].base, this->headIndex.load(std::memory_order_relaxed)));
+					halfDequeuedBlock = pr_blockIndexEntries[i].block;
+				}
+				
+				// Start at the head block (note the first line in the loop gives us the head from the tail on the first iteration)
+				auto block = this->tailBlock;
+				do {
+					block = block->next;
+					if (block->ConcurrentQueue::Block::template is_empty<explicit_context>()) {
+						continue;
+					}
+					
+					size_t i = 0;	// Offset into block
+					if (block == halfDequeuedBlock) {
+						i = static_cast<size_t>(this->headIndex.load(std::memory_order_relaxed) & static_cast<index_t>(BLOCK_SIZE - 1));
+					}
+					
+					// Walk through all the items in the block; if this is the tail block, we need to stop when we reach the tail index
+					auto lastValidIndex = (this->tailIndex.load(std::memory_order_relaxed) & static_cast<index_t>(BLOCK_SIZE - 1)) == 0 ? BLOCK_SIZE : static_cast<size_t>(this->tailIndex.load(std::memory_order_relaxed) & static_cast<index_t>(BLOCK_SIZE - 1));
+					while (i != BLOCK_SIZE && (block != this->tailBlock || i != lastValidIndex)) {
+						(*block)[i++]->~T();
+					}
+				} while (block != this->tailBlock);
+			}
+			
+			// Destroy all blocks that we own
+			if (this->tailBlock != nullptr) {
+				auto block = this->tailBlock;
+				do {
+					auto nextBlock = block->next;
+					this->parent->add_block_to_free_list(block);
+					block = nextBlock;
+				} while (block != this->tailBlock);
+			}
+			
+			// Destroy the block indices
+			auto header = static_cast<BlockIndexHeader*>(pr_blockIndexRaw);
+			while (header != nullptr) {
+				auto prev = static_cast<BlockIndexHeader*>(header->prev);
+				header->~BlockIndexHeader();
+				(Traits::free)(header);
+				header = prev;
+			}
+		}
+		
+		template<AllocationMode allocMode, typename U>
+		inline bool enqueue(U&& element)
+		{
+			index_t currentTailIndex = this->tailIndex.load(std::memory_order_relaxed);
+			index_t newTailIndex = 1 + currentTailIndex;
+			if ((currentTailIndex & static_cast<index_t>(BLOCK_SIZE - 1)) == 0) {
+				// We reached the end of a block, start a new one
+				auto startBlock = this->tailBlock;
+				auto originalBlockIndexSlotsUsed = pr_blockIndexSlotsUsed;
+				if (this->tailBlock != nullptr && this->tailBlock->next->ConcurrentQueue::Block::template is_empty<explicit_context>()) {
+					// We can re-use the block ahead of us, it's empty!					
+					this->tailBlock = this->tailBlock->next;
+					this->tailBlock->ConcurrentQueue::Block::template reset_empty<explicit_context>();
+					
+					// We'll put the block on the block index (guaranteed to be room since we're conceptually removing the
+					// last block from it first -- except instead of removing then adding, we can just overwrite).
+					// Note that there must be a valid block index here, since even if allocation failed in the ctor,
+					// it would have been re-attempted when adding the first block to the queue; since there is such
+					// a block, a block index must have been successfully allocated.
+				}
+				else {
+					// Whatever head value we see here is >= the last value we saw here (relatively),
+					// and <= its current value. Since we have the most recent tail, the head must be
+					// <= to it.
+					auto head = this->headIndex.load(std::memory_order_relaxed);
+					assert(!details::circular_less_than<index_t>(currentTailIndex, head));
+					if (!details::circular_less_than<index_t>(head, currentTailIndex + BLOCK_SIZE)
+						|| (MAX_SUBQUEUE_SIZE != details::const_numeric_max<size_t>::value && (MAX_SUBQUEUE_SIZE == 0 || MAX_SUBQUEUE_SIZE - BLOCK_SIZE < currentTailIndex - head))) {
+						// We can't enqueue in another block because there's not enough leeway -- the
+						// tail could surpass the head by the time the block fills up! (Or we'll exceed
+						// the size limit, if the second part of the condition was true.)
+						return false;
+					}
+					// We're going to need a new block; check that the block index has room
+					if (pr_blockIndexRaw == nullptr || pr_blockIndexSlotsUsed == pr_blockIndexSize) {
+						// Hmm, the circular block index is already full -- we'll need
+						// to allocate a new index. Note pr_blockIndexRaw can only be nullptr if
+						// the initial allocation failed in the constructor.
+						
+						MOODYCAMEL_CONSTEXPR_IF (allocMode == CannotAlloc) {
+							return false;
+						}
+						else if (!new_block_index(pr_blockIndexSlotsUsed)) {
+							return false;
+						}
+					}
+					
+					// Insert a new block in the circular linked list
+					auto newBlock = this->parent->ConcurrentQueue::template requisition_block<allocMode>();
+					if (newBlock == nullptr) {
+						return false;
+					}
+#ifdef MCDBGQ_TRACKMEM
+					newBlock->owner = this;
+#endif
+					newBlock->ConcurrentQueue::Block::template reset_empty<explicit_context>();
+					if (this->tailBlock == nullptr) {
+						newBlock->next = newBlock;
+					}
+					else {
+						newBlock->next = this->tailBlock->next;
+						this->tailBlock->next = newBlock;
+					}
+					this->tailBlock = newBlock;
+					++pr_blockIndexSlotsUsed;
+				}
+
+				MOODYCAMEL_CONSTEXPR_IF (!MOODYCAMEL_NOEXCEPT_CTOR(T, U, new (static_cast<T*>(nullptr)) T(std::forward<U>(element)))) {
+					// The constructor may throw. We want the element not to appear in the queue in
+					// that case (without corrupting the queue):
+					MOODYCAMEL_TRY {
+						new ((*this->tailBlock)[currentTailIndex]) T(std::forward<U>(element));
+					}
+					MOODYCAMEL_CATCH (...) {
+						// Revert change to the current block, but leave the new block available
+						// for next time
+						pr_blockIndexSlotsUsed = originalBlockIndexSlotsUsed;
+						this->tailBlock = startBlock == nullptr ? this->tailBlock : startBlock;
+						MOODYCAMEL_RETHROW;
+					}
+				}
+				else {
+					(void)startBlock;
+					(void)originalBlockIndexSlotsUsed;
+				}
+				
+				// Add block to block index
+				auto& entry = blockIndex.load(std::memory_order_relaxed)->entries[pr_blockIndexFront];
+				entry.base = currentTailIndex;
+				entry.block = this->tailBlock;
+				blockIndex.load(std::memory_order_relaxed)->front.store(pr_blockIndexFront, std::memory_order_release);
+				pr_blockIndexFront = (pr_blockIndexFront + 1) & (pr_blockIndexSize - 1);
+				
+				MOODYCAMEL_CONSTEXPR_IF (!MOODYCAMEL_NOEXCEPT_CTOR(T, U, new (static_cast<T*>(nullptr)) T(std::forward<U>(element)))) {
+					this->tailIndex.store(newTailIndex, std::memory_order_release);
+					return true;
+				}
+			}
+			
+			// Enqueue
+			new ((*this->tailBlock)[currentTailIndex]) T(std::forward<U>(element));
+			
+			this->tailIndex.store(newTailIndex, std::memory_order_release);
+			return true;
+		}
+		
+		template<typename U>
+		bool dequeue(U& element)
+		{
+			auto tail = this->tailIndex.load(std::memory_order_relaxed);
+			auto overcommit = this->dequeueOvercommit.load(std::memory_order_relaxed);
+			if (details::circular_less_than<index_t>(this->dequeueOptimisticCount.load(std::memory_order_relaxed) - overcommit, tail)) {
+				// Might be something to dequeue, let's give it a try
+				
+				// Note that this if is purely for performance purposes in the common case when the queue is
+				// empty and the values are eventually consistent -- we may enter here spuriously.
+				
+				// Note that whatever the values of overcommit and tail are, they are not going to change (unless we
+				// change them) and must be the same value at this point (inside the if) as when the if condition was
+				// evaluated.
+
+				// We insert an acquire fence here to synchronize-with the release upon incrementing dequeueOvercommit below.
+				// This ensures that whatever the value we got loaded into overcommit, the load of dequeueOptisticCount in
+				// the fetch_add below will result in a value at least as recent as that (and therefore at least as large).
+				// Note that I believe a compiler (signal) fence here would be sufficient due to the nature of fetch_add (all
+				// read-modify-write operations are guaranteed to work on the latest value in the modification order), but
+				// unfortunately that can't be shown to be correct using only the C++11 standard.
+				// See http://stackoverflow.com/questions/18223161/what-are-the-c11-memory-ordering-guarantees-in-this-corner-case
+				std::atomic_thread_fence(std::memory_order_acquire);
+				
+				// Increment optimistic counter, then check if it went over the boundary
+				auto myDequeueCount = this->dequeueOptimisticCount.fetch_add(1, std::memory_order_relaxed);
+				
+				// Note that since dequeueOvercommit must be <= dequeueOptimisticCount (because dequeueOvercommit is only ever
+				// incremented after dequeueOptimisticCount -- this is enforced in the `else` block below), and since we now
+				// have a version of dequeueOptimisticCount that is at least as recent as overcommit (due to the release upon
+				// incrementing dequeueOvercommit and the acquire above that synchronizes with it), overcommit <= myDequeueCount.
+				// However, we can't assert this since both dequeueOptimisticCount and dequeueOvercommit may (independently)
+				// overflow; in such a case, though, the logic still holds since the difference between the two is maintained.
+				
+				// Note that we reload tail here in case it changed; it will be the same value as before or greater, since
+				// this load is sequenced after (happens after) the earlier load above. This is supported by read-read
+				// coherency (as defined in the standard), explained here: http://en.cppreference.com/w/cpp/atomic/memory_order
+				tail = this->tailIndex.load(std::memory_order_acquire);
+				if ((details::likely)(details::circular_less_than<index_t>(myDequeueCount - overcommit, tail))) {
+					// Guaranteed to be at least one element to dequeue!
+					
+					// Get the index. Note that since there's guaranteed to be at least one element, this
+					// will never exceed tail. We need to do an acquire-release fence here since it's possible
+					// that whatever condition got us to this point was for an earlier enqueued element (that
+					// we already see the memory effects for), but that by the time we increment somebody else
+					// has incremented it, and we need to see the memory effects for *that* element, which is
+					// in such a case is necessarily visible on the thread that incremented it in the first
+					// place with the more current condition (they must have acquired a tail that is at least
+					// as recent).
+					auto index = this->headIndex.fetch_add(1, std::memory_order_acq_rel);
+					
+					
+					// Determine which block the element is in
+					
+					auto localBlockIndex = blockIndex.load(std::memory_order_acquire);
+					auto localBlockIndexHead = localBlockIndex->front.load(std::memory_order_acquire);
+					
+					// We need to be careful here about subtracting and dividing because of index wrap-around.
+					// When an index wraps, we need to preserve the sign of the offset when dividing it by the
+					// block size (in order to get a correct signed block count offset in all cases):
+					auto headBase = localBlockIndex->entries[localBlockIndexHead].base;
+					auto blockBaseIndex = index & ~static_cast<index_t>(BLOCK_SIZE - 1);
+					auto offset = static_cast<size_t>(static_cast<typename std::make_signed<index_t>::type>(blockBaseIndex - headBase) / static_cast<typename std::make_signed<index_t>::type>(BLOCK_SIZE));
+					auto block = localBlockIndex->entries[(localBlockIndexHead + offset) & (localBlockIndex->size - 1)].block;
+					
+					// Dequeue
+					auto& el = *((*block)[index]);
+					if (!MOODYCAMEL_NOEXCEPT_ASSIGN(T, T&&, element = std::move(el))) {
+						// Make sure the element is still fully dequeued and destroyed even if the assignment
+						// throws
+						struct Guard {
+							Block* block;
+							index_t index;
+							
+							~Guard()
+							{
+								(*block)[index]->~T();
+								block->ConcurrentQueue::Block::template set_empty<explicit_context>(index);
+							}
+						} guard = { block, index };
+
+						element = std::move(el); // NOLINT
+					}
+					else {
+						element = std::move(el); // NOLINT
+						el.~T(); // NOLINT
+						block->ConcurrentQueue::Block::template set_empty<explicit_context>(index);
+					}
+					
+					return true;
+				}
+				else {
+					// Wasn't anything to dequeue after all; make the effective dequeue count eventually consistent
+					this->dequeueOvercommit.fetch_add(1, std::memory_order_release);		// Release so that the fetch_add on dequeueOptimisticCount is guaranteed to happen before this write
+				}
+			}
+		
+			return false;
+		}
+		
+		template<AllocationMode allocMode, typename It>
+		bool MOODYCAMEL_NO_TSAN enqueue_bulk(It itemFirst, size_t count)
+		{
+			// First, we need to make sure we have enough room to enqueue all of the elements;
+			// this means pre-allocating blocks and putting them in the block index (but only if
+			// all the allocations succeeded).
+			index_t startTailIndex = this->tailIndex.load(std::memory_order_relaxed);
+			auto startBlock = this->tailBlock;
+			auto originalBlockIndexFront = pr_blockIndexFront;
+			auto originalBlockIndexSlotsUsed = pr_blockIndexSlotsUsed;
+			
+			Block* firstAllocatedBlock = nullptr;
+			
+			// Figure out how many blocks we'll need to allocate, and do so
+			size_t blockBaseDiff = ((startTailIndex + count - 1) & ~static_cast<index_t>(BLOCK_SIZE - 1)) - ((startTailIndex - 1) & ~static_cast<index_t>(BLOCK_SIZE - 1));
+			index_t currentTailIndex = (startTailIndex - 1) & ~static_cast<index_t>(BLOCK_SIZE - 1);
+			if (blockBaseDiff > 0) {
+				// Allocate as many blocks as possible from ahead
+				while (blockBaseDiff > 0 && this->tailBlock != nullptr && this->tailBlock->next != firstAllocatedBlock && this->tailBlock->next->ConcurrentQueue::Block::template is_empty<explicit_context>()) {
+					blockBaseDiff -= static_cast<index_t>(BLOCK_SIZE);
+					currentTailIndex += static_cast<index_t>(BLOCK_SIZE);
+					
+					this->tailBlock = this->tailBlock->next;
+					firstAllocatedBlock = firstAllocatedBlock == nullptr ? this->tailBlock : firstAllocatedBlock;
+					
+					auto& entry = blockIndex.load(std::memory_order_relaxed)->entries[pr_blockIndexFront];
+					entry.base = currentTailIndex;
+					entry.block = this->tailBlock;
+					pr_blockIndexFront = (pr_blockIndexFront + 1) & (pr_blockIndexSize - 1);
+				}
+				
+				// Now allocate as many blocks as necessary from the block pool
+				while (blockBaseDiff > 0) {
+					blockBaseDiff -= static_cast<index_t>(BLOCK_SIZE);
+					currentTailIndex += static_cast<index_t>(BLOCK_SIZE);
+					
+					auto head = this->headIndex.load(std::memory_order_relaxed);
+					assert(!details::circular_less_than<index_t>(currentTailIndex, head));
+					bool full = !details::circular_less_than<index_t>(head, currentTailIndex + BLOCK_SIZE) || (MAX_SUBQUEUE_SIZE != details::const_numeric_max<size_t>::value && (MAX_SUBQUEUE_SIZE == 0 || MAX_SUBQUEUE_SIZE - BLOCK_SIZE < currentTailIndex - head));
+					if (pr_blockIndexRaw == nullptr || pr_blockIndexSlotsUsed == pr_blockIndexSize || full) {
+						MOODYCAMEL_CONSTEXPR_IF (allocMode == CannotAlloc) {
+							// Failed to allocate, undo changes (but keep injected blocks)
+							pr_blockIndexFront = originalBlockIndexFront;
+							pr_blockIndexSlotsUsed = originalBlockIndexSlotsUsed;
+							this->tailBlock = startBlock == nullptr ? firstAllocatedBlock : startBlock;
+							return false;
+						}
+						else if (full || !new_block_index(originalBlockIndexSlotsUsed)) {
+							// Failed to allocate, undo changes (but keep injected blocks)
+							pr_blockIndexFront = originalBlockIndexFront;
+							pr_blockIndexSlotsUsed = originalBlockIndexSlotsUsed;
+							this->tailBlock = startBlock == nullptr ? firstAllocatedBlock : startBlock;
+							return false;
+						}
+						
+						// pr_blockIndexFront is updated inside new_block_index, so we need to
+						// update our fallback value too (since we keep the new index even if we
+						// later fail)
+						originalBlockIndexFront = originalBlockIndexSlotsUsed;
+					}
+					
+					// Insert a new block in the circular linked list
+					auto newBlock = this->parent->ConcurrentQueue::template requisition_block<allocMode>();
+					if (newBlock == nullptr) {
+						pr_blockIndexFront = originalBlockIndexFront;
+						pr_blockIndexSlotsUsed = originalBlockIndexSlotsUsed;
+						this->tailBlock = startBlock == nullptr ? firstAllocatedBlock : startBlock;
+						return false;
+					}
+					
+#ifdef MCDBGQ_TRACKMEM
+					newBlock->owner = this;
+#endif
+					newBlock->ConcurrentQueue::Block::template set_all_empty<explicit_context>();
+					if (this->tailBlock == nullptr) {
+						newBlock->next = newBlock;
+					}
+					else {
+						newBlock->next = this->tailBlock->next;
+						this->tailBlock->next = newBlock;
+					}
+					this->tailBlock = newBlock;
+					firstAllocatedBlock = firstAllocatedBlock == nullptr ? this->tailBlock : firstAllocatedBlock;
+					
+					++pr_blockIndexSlotsUsed;
+					
+					auto& entry = blockIndex.load(std::memory_order_relaxed)->entries[pr_blockIndexFront];
+					entry.base = currentTailIndex;
+					entry.block = this->tailBlock;
+					pr_blockIndexFront = (pr_blockIndexFront + 1) & (pr_blockIndexSize - 1);
+				}
+				
+				// Excellent, all allocations succeeded. Reset each block's emptiness before we fill them up, and
+				// publish the new block index front
+				auto block = firstAllocatedBlock;
+				while (true) {
+					block->ConcurrentQueue::Block::template reset_empty<explicit_context>();
+					if (block == this->tailBlock) {
+						break;
+					}
+					block = block->next;
+				}
+				
+				MOODYCAMEL_CONSTEXPR_IF (MOODYCAMEL_NOEXCEPT_CTOR(T, decltype(*itemFirst), new (static_cast<T*>(nullptr)) T(details::deref_noexcept(itemFirst)))) {
+					blockIndex.load(std::memory_order_relaxed)->front.store((pr_blockIndexFront - 1) & (pr_blockIndexSize - 1), std::memory_order_release);
+				}
+			}
+			
+			// Enqueue, one block at a time
+			index_t newTailIndex = startTailIndex + static_cast<index_t>(count);
+			currentTailIndex = startTailIndex;
+			auto endBlock = this->tailBlock;
+			this->tailBlock = startBlock;
+			assert((startTailIndex & static_cast<index_t>(BLOCK_SIZE - 1)) != 0 || firstAllocatedBlock != nullptr || count == 0);
+			if ((startTailIndex & static_cast<index_t>(BLOCK_SIZE - 1)) == 0 && firstAllocatedBlock != nullptr) {
+				this->tailBlock = firstAllocatedBlock;
+			}
+			while (true) {
+				index_t stopIndex = (currentTailIndex & ~static_cast<index_t>(BLOCK_SIZE - 1)) + static_cast<index_t>(BLOCK_SIZE);
+				if (details::circular_less_than<index_t>(newTailIndex, stopIndex)) {
+					stopIndex = newTailIndex;
+				}
+				MOODYCAMEL_CONSTEXPR_IF (MOODYCAMEL_NOEXCEPT_CTOR(T, decltype(*itemFirst), new (static_cast<T*>(nullptr)) T(details::deref_noexcept(itemFirst)))) {
+					while (currentTailIndex != stopIndex) {
+						new ((*this->tailBlock)[currentTailIndex++]) T(*itemFirst++);
+					}
+				}
+				else {
+					MOODYCAMEL_TRY {
+						while (currentTailIndex != stopIndex) {
+							// Must use copy constructor even if move constructor is available
+							// because we may have to revert if there's an exception.
+							// Sorry about the horrible templated next line, but it was the only way
+							// to disable moving *at compile time*, which is important because a type
+							// may only define a (noexcept) move constructor, and so calls to the
+							// cctor will not compile, even if they are in an if branch that will never
+							// be executed
+							new ((*this->tailBlock)[currentTailIndex]) T(details::nomove_if<!MOODYCAMEL_NOEXCEPT_CTOR(T, decltype(*itemFirst), new (static_cast<T*>(nullptr)) T(details::deref_noexcept(itemFirst)))>::eval(*itemFirst));
+							++currentTailIndex;
+							++itemFirst;
+						}
+					}
+					MOODYCAMEL_CATCH (...) {
+						// Oh dear, an exception's been thrown -- destroy the elements that
+						// were enqueued so far and revert the entire bulk operation (we'll keep
+						// any allocated blocks in our linked list for later, though).
+						auto constructedStopIndex = currentTailIndex;
+						auto lastBlockEnqueued = this->tailBlock;
+						
+						pr_blockIndexFront = originalBlockIndexFront;
+						pr_blockIndexSlotsUsed = originalBlockIndexSlotsUsed;
+						this->tailBlock = startBlock == nullptr ? firstAllocatedBlock : startBlock;
+						
+						if (!details::is_trivially_destructible<T>::value) {
+							auto block = startBlock;
+							if ((startTailIndex & static_cast<index_t>(BLOCK_SIZE - 1)) == 0) {
+								block = firstAllocatedBlock;
+							}
+							currentTailIndex = startTailIndex;
+							while (true) {
+								stopIndex = (currentTailIndex & ~static_cast<index_t>(BLOCK_SIZE - 1)) + static_cast<index_t>(BLOCK_SIZE);
+								if (details::circular_less_than<index_t>(constructedStopIndex, stopIndex)) {
+									stopIndex = constructedStopIndex;
+								}
+								while (currentTailIndex != stopIndex) {
+									(*block)[currentTailIndex++]->~T();
+								}
+								if (block == lastBlockEnqueued) {
+									break;
+								}
+								block = block->next;
+							}
+						}
+						MOODYCAMEL_RETHROW;
+					}
+				}
+				
+				if (this->tailBlock == endBlock) {
+					assert(currentTailIndex == newTailIndex);
+					break;
+				}
+				this->tailBlock = this->tailBlock->next;
+			}
+			
+			MOODYCAMEL_CONSTEXPR_IF (!MOODYCAMEL_NOEXCEPT_CTOR(T, decltype(*itemFirst), new (static_cast<T*>(nullptr)) T(details::deref_noexcept(itemFirst)))) {
+				if (firstAllocatedBlock != nullptr)
+					blockIndex.load(std::memory_order_relaxed)->front.store((pr_blockIndexFront - 1) & (pr_blockIndexSize - 1), std::memory_order_release);
+			}
+			
+			this->tailIndex.store(newTailIndex, std::memory_order_release);
+			return true;
+		}
+		
+		template<typename It>
+		size_t dequeue_bulk(It& itemFirst, size_t max)
+		{
+			auto tail = this->tailIndex.load(std::memory_order_relaxed);
+			auto overcommit = this->dequeueOvercommit.load(std::memory_order_relaxed);
+			auto desiredCount = static_cast<size_t>(tail - (this->dequeueOptimisticCount.load(std::memory_order_relaxed) - overcommit));
+			if (details::circular_less_than<size_t>(0, desiredCount)) {
+				desiredCount = desiredCount < max ? desiredCount : max;
+				std::atomic_thread_fence(std::memory_order_acquire);
+				
+				auto myDequeueCount = this->dequeueOptimisticCount.fetch_add(desiredCount, std::memory_order_relaxed);
+				
+				tail = this->tailIndex.load(std::memory_order_acquire);
+				auto actualCount = static_cast<size_t>(tail - (myDequeueCount - overcommit));
+				if (details::circular_less_than<size_t>(0, actualCount)) {
+					actualCount = desiredCount < actualCount ? desiredCount : actualCount;
+					if (actualCount < desiredCount) {
+						this->dequeueOvercommit.fetch_add(desiredCount - actualCount, std::memory_order_release);
+					}
+					
+					// Get the first index. Note that since there's guaranteed to be at least actualCount elements, this
+					// will never exceed tail.
+					auto firstIndex = this->headIndex.fetch_add(actualCount, std::memory_order_acq_rel);
+					
+					// Determine which block the first element is in
+					auto localBlockIndex = blockIndex.load(std::memory_order_acquire);
+					auto localBlockIndexHead = localBlockIndex->front.load(std::memory_order_acquire);
+					
+					auto headBase = localBlockIndex->entries[localBlockIndexHead].base;
+					auto firstBlockBaseIndex = firstIndex & ~static_cast<index_t>(BLOCK_SIZE - 1);
+					auto offset = static_cast<size_t>(static_cast<typename std::make_signed<index_t>::type>(firstBlockBaseIndex - headBase) / static_cast<typename std::make_signed<index_t>::type>(BLOCK_SIZE));
+					auto indexIndex = (localBlockIndexHead + offset) & (localBlockIndex->size - 1);
+					
+					// Iterate the blocks and dequeue
+					auto index = firstIndex;
+					do {
+						auto firstIndexInBlock = index;
+						index_t endIndex = (index & ~static_cast<index_t>(BLOCK_SIZE - 1)) + static_cast<index_t>(BLOCK_SIZE);
+						endIndex = details::circular_less_than<index_t>(firstIndex + static_cast<index_t>(actualCount), endIndex) ? firstIndex + static_cast<index_t>(actualCount) : endIndex;
+						auto block = localBlockIndex->entries[indexIndex].block;
+						if (MOODYCAMEL_NOEXCEPT_ASSIGN(T, T&&, details::deref_noexcept(itemFirst) = std::move((*(*block)[index])))) {
+							while (index != endIndex) {
+								auto& el = *((*block)[index]);
+								*itemFirst++ = std::move(el);
+								el.~T();
+								++index;
+							}
+						}
+						else {
+							MOODYCAMEL_TRY {
+								while (index != endIndex) {
+									auto& el = *((*block)[index]);
+									*itemFirst = std::move(el);
+									++itemFirst;
+									el.~T();
+									++index;
+								}
+							}
+							MOODYCAMEL_CATCH (...) {
+								// It's too late to revert the dequeue, but we can make sure that all
+								// the dequeued objects are properly destroyed and the block index
+								// (and empty count) are properly updated before we propagate the exception
+								do {
+									block = localBlockIndex->entries[indexIndex].block;
+									while (index != endIndex) {
+										(*block)[index++]->~T();
+									}
+									block->ConcurrentQueue::Block::template set_many_empty<explicit_context>(firstIndexInBlock, static_cast<size_t>(endIndex - firstIndexInBlock));
+									indexIndex = (indexIndex + 1) & (localBlockIndex->size - 1);
+									
+									firstIndexInBlock = index;
+									endIndex = (index & ~static_cast<index_t>(BLOCK_SIZE - 1)) + static_cast<index_t>(BLOCK_SIZE);
+									endIndex = details::circular_less_than<index_t>(firstIndex + static_cast<index_t>(actualCount), endIndex) ? firstIndex + static_cast<index_t>(actualCount) : endIndex;
+								} while (index != firstIndex + actualCount);
+								
+								MOODYCAMEL_RETHROW;
+							}
+						}
+						block->ConcurrentQueue::Block::template set_many_empty<explicit_context>(firstIndexInBlock, static_cast<size_t>(endIndex - firstIndexInBlock));
+						indexIndex = (indexIndex + 1) & (localBlockIndex->size - 1);
+					} while (index != firstIndex + actualCount);
+					
+					return actualCount;
+				}
+				else {
+					// Wasn't anything to dequeue after all; make the effective dequeue count eventually consistent
+					this->dequeueOvercommit.fetch_add(desiredCount, std::memory_order_release);
+				}
+			}
+			
+			return 0;
+		}
+		
+	private:
+		struct BlockIndexEntry
+		{
+			index_t base;
+			Block* block;
+		};
+		
+		struct BlockIndexHeader
+		{
+			size_t size;
+			std::atomic<size_t> front;		// Current slot (not next, like pr_blockIndexFront)
+			BlockIndexEntry* entries;
+			void* prev;
+		};
+		
+		
+		bool new_block_index(size_t numberOfFilledSlotsToExpose)
+		{
+			auto prevBlockSizeMask = pr_blockIndexSize - 1;
+			
+			// Create the new block
+			pr_blockIndexSize <<= 1;
+			auto newRawPtr = static_cast<char*>((Traits::malloc)(sizeof(BlockIndexHeader) + std::alignment_of<BlockIndexEntry>::value - 1 + sizeof(BlockIndexEntry) * pr_blockIndexSize));
+			if (newRawPtr == nullptr) {
+				pr_blockIndexSize >>= 1;		// Reset to allow graceful retry
+				return false;
+			}
+			
+			auto newBlockIndexEntries = reinterpret_cast<BlockIndexEntry*>(details::align_for<BlockIndexEntry>(newRawPtr + sizeof(BlockIndexHeader)));
+			
+			// Copy in all the old indices, if any
+			size_t j = 0;
+			if (pr_blockIndexSlotsUsed != 0) {
+				auto i = (pr_blockIndexFront - pr_blockIndexSlotsUsed) & prevBlockSizeMask;
+				do {
+					newBlockIndexEntries[j++] = pr_blockIndexEntries[i];
+					i = (i + 1) & prevBlockSizeMask;
+				} while (i != pr_blockIndexFront);
+			}
+			
+			// Update everything
+			auto header = new (newRawPtr) BlockIndexHeader;
+			header->size = pr_blockIndexSize;
+			header->front.store(numberOfFilledSlotsToExpose - 1, std::memory_order_relaxed);
+			header->entries = newBlockIndexEntries;
+			header->prev = pr_blockIndexRaw;		// we link the new block to the old one so we can free it later
+			
+			pr_blockIndexFront = j;
+			pr_blockIndexEntries = newBlockIndexEntries;
+			pr_blockIndexRaw = newRawPtr;
+			blockIndex.store(header, std::memory_order_release);
+			
+			return true;
+		}
+		
+	private:
+		std::atomic<BlockIndexHeader*> blockIndex;
+		
+		// To be used by producer only -- consumer must use the ones in referenced by blockIndex
+		size_t pr_blockIndexSlotsUsed;
+		size_t pr_blockIndexSize;
+		size_t pr_blockIndexFront;		// Next slot (not current)
+		BlockIndexEntry* pr_blockIndexEntries;
+		void* pr_blockIndexRaw;
+		
+#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG
+	public:
+		ExplicitProducer* nextExplicitProducer;
+	private:
+#endif
+		
+#ifdef MCDBGQ_TRACKMEM
+		friend struct MemStats;
+#endif
+	};
+	
+	
+	//////////////////////////////////
+	// Implicit queue
+	//////////////////////////////////
+	
+	struct ImplicitProducer : public ProducerBase
+	{			
+		ImplicitProducer(ConcurrentQueue* parent_) :
+			ProducerBase(parent_, false),
+			nextBlockIndexCapacity(IMPLICIT_INITIAL_INDEX_SIZE),
+			blockIndex(nullptr)
+		{
+			new_block_index();
+		}
+		
+		~ImplicitProducer()
+		{
+			// Note that since we're in the destructor we can assume that all enqueue/dequeue operations
+			// completed already; this means that all undequeued elements are placed contiguously across
+			// contiguous blocks, and that only the first and last remaining blocks can be only partially
+			// empty (all other remaining blocks must be completely full).
+			
+#ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED
+			// Unregister ourselves for thread termination notification
+			if (!this->inactive.load(std::memory_order_relaxed)) {
+				details::ThreadExitNotifier::unsubscribe(&threadExitListener);
+			}
+#endif
+			
+			// Destroy all remaining elements!
+			auto tail = this->tailIndex.load(std::memory_order_relaxed);
+			auto index = this->headIndex.load(std::memory_order_relaxed);
+			Block* block = nullptr;
+			assert(index == tail || details::circular_less_than(index, tail));
+			bool forceFreeLastBlock = index != tail;		// If we enter the loop, then the last (tail) block will not be freed
+			while (index != tail) {
+				if ((index & static_cast<index_t>(BLOCK_SIZE - 1)) == 0 || block == nullptr) {
+					if (block != nullptr) {
+						// Free the old block
+						this->parent->add_block_to_free_list(block);
+					}
+					
+					block = get_block_index_entry_for_index(index)->value.load(std::memory_order_relaxed);
+				}
+				
+				((*block)[index])->~T();
+				++index;
+			}
+			// Even if the queue is empty, there's still one block that's not on the free list
+			// (unless the head index reached the end of it, in which case the tail will be poised
+			// to create a new block).
+			if (this->tailBlock != nullptr && (forceFreeLastBlock || (tail & static_cast<index_t>(BLOCK_SIZE - 1)) != 0)) {
+				this->parent->add_block_to_free_list(this->tailBlock);
+			}
+			
+			// Destroy block index
+			auto localBlockIndex = blockIndex.load(std::memory_order_relaxed);
+			if (localBlockIndex != nullptr) {
+				for (size_t i = 0; i != localBlockIndex->capacity; ++i) {
+					localBlockIndex->index[i]->~BlockIndexEntry();
+				}
+				do {
+					auto prev = localBlockIndex->prev;
+					localBlockIndex->~BlockIndexHeader();
+					(Traits::free)(localBlockIndex);
+					localBlockIndex = prev;
+				} while (localBlockIndex != nullptr);
+			}
+		}
+		
+		template<AllocationMode allocMode, typename U>
+		inline bool enqueue(U&& element)
+		{
+			index_t currentTailIndex = this->tailIndex.load(std::memory_order_relaxed);
+			index_t newTailIndex = 1 + currentTailIndex;
+			if ((currentTailIndex & static_cast<index_t>(BLOCK_SIZE - 1)) == 0) {
+				// We reached the end of a block, start a new one
+				auto head = this->headIndex.load(std::memory_order_relaxed);
+				assert(!details::circular_less_than<index_t>(currentTailIndex, head));
+				if (!details::circular_less_than<index_t>(head, currentTailIndex + BLOCK_SIZE) || (MAX_SUBQUEUE_SIZE != details::const_numeric_max<size_t>::value && (MAX_SUBQUEUE_SIZE == 0 || MAX_SUBQUEUE_SIZE - BLOCK_SIZE < currentTailIndex - head))) {
+					return false;
+				}
+#ifdef MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX
+				debug::DebugLock lock(mutex);
+#endif
+				// Find out where we'll be inserting this block in the block index
+				BlockIndexEntry* idxEntry;
+				if (!insert_block_index_entry<allocMode>(idxEntry, currentTailIndex)) {
+					return false;
+				}
+				
+				// Get ahold of a new block
+				auto newBlock = this->parent->ConcurrentQueue::template requisition_block<allocMode>();
+				if (newBlock == nullptr) {
+					rewind_block_index_tail();
+					idxEntry->value.store(nullptr, std::memory_order_relaxed);
+					return false;
+				}
+#ifdef MCDBGQ_TRACKMEM
+				newBlock->owner = this;
+#endif
+				newBlock->ConcurrentQueue::Block::template reset_empty<implicit_context>();
+
+				MOODYCAMEL_CONSTEXPR_IF (!MOODYCAMEL_NOEXCEPT_CTOR(T, U, new (static_cast<T*>(nullptr)) T(std::forward<U>(element)))) {
+					// May throw, try to insert now before we publish the fact that we have this new block
+					MOODYCAMEL_TRY {
+						new ((*newBlock)[currentTailIndex]) T(std::forward<U>(element));
+					}
+					MOODYCAMEL_CATCH (...) {
+						rewind_block_index_tail();
+						idxEntry->value.store(nullptr, std::memory_order_relaxed);
+						this->parent->add_block_to_free_list(newBlock);
+						MOODYCAMEL_RETHROW;
+					}
+				}
+				
+				// Insert the new block into the index
+				idxEntry->value.store(newBlock, std::memory_order_relaxed);
+				
+				this->tailBlock = newBlock;
+				
+				MOODYCAMEL_CONSTEXPR_IF (!MOODYCAMEL_NOEXCEPT_CTOR(T, U, new (static_cast<T*>(nullptr)) T(std::forward<U>(element)))) {
+					this->tailIndex.store(newTailIndex, std::memory_order_release);
+					return true;
+				}
+			}
+			
+			// Enqueue
+			new ((*this->tailBlock)[currentTailIndex]) T(std::forward<U>(element));
+			
+			this->tailIndex.store(newTailIndex, std::memory_order_release);
+			return true;
+		}
+		
+		template<typename U>
+		bool dequeue(U& element)
+		{
+			// See ExplicitProducer::dequeue for rationale and explanation
+			index_t tail = this->tailIndex.load(std::memory_order_relaxed);
+			index_t overcommit = this->dequeueOvercommit.load(std::memory_order_relaxed);
+			if (details::circular_less_than<index_t>(this->dequeueOptimisticCount.load(std::memory_order_relaxed) - overcommit, tail)) {
+				std::atomic_thread_fence(std::memory_order_acquire);
+				
+				index_t myDequeueCount = this->dequeueOptimisticCount.fetch_add(1, std::memory_order_relaxed);
+				tail = this->tailIndex.load(std::memory_order_acquire);
+				if ((details::likely)(details::circular_less_than<index_t>(myDequeueCount - overcommit, tail))) {
+					index_t index = this->headIndex.fetch_add(1, std::memory_order_acq_rel);
+					
+					// Determine which block the element is in
+					auto entry = get_block_index_entry_for_index(index);
+					
+					// Dequeue
+					auto block = entry->value.load(std::memory_order_relaxed);
+					auto& el = *((*block)[index]);
+					
+					if (!MOODYCAMEL_NOEXCEPT_ASSIGN(T, T&&, element = std::move(el))) {
+#ifdef MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX
+						// Note: Acquiring the mutex with every dequeue instead of only when a block
+						// is released is very sub-optimal, but it is, after all, purely debug code.
+						debug::DebugLock lock(producer->mutex);
+#endif
+						struct Guard {
+							Block* block;
+							index_t index;
+							BlockIndexEntry* entry;
+							ConcurrentQueue* parent;
+							
+							~Guard()
+							{
+								(*block)[index]->~T();
+								if (block->ConcurrentQueue::Block::template set_empty<implicit_context>(index)) {
+									entry->value.store(nullptr, std::memory_order_relaxed);
+									parent->add_block_to_free_list(block);
+								}
+							}
+						} guard = { block, index, entry, this->parent };
+
+						element = std::move(el); // NOLINT
+					}
+					else {
+						element = std::move(el); // NOLINT
+						el.~T(); // NOLINT
+
+						if (block->ConcurrentQueue::Block::template set_empty<implicit_context>(index)) {
+							{
+#ifdef MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX
+								debug::DebugLock lock(mutex);
+#endif
+								// Add the block back into the global free pool (and remove from block index)
+								entry->value.store(nullptr, std::memory_order_relaxed);
+							}
+							this->parent->add_block_to_free_list(block);		// releases the above store
+						}
+					}
+					
+					return true;
+				}
+				else {
+					this->dequeueOvercommit.fetch_add(1, std::memory_order_release);
+				}
+			}
+		
+			return false;
+		}
+		
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable: 4706)  // assignment within conditional expression
+#endif
+		template<AllocationMode allocMode, typename It>
+		bool enqueue_bulk(It itemFirst, size_t count)
+		{
+			// First, we need to make sure we have enough room to enqueue all of the elements;
+			// this means pre-allocating blocks and putting them in the block index (but only if
+			// all the allocations succeeded).
+			
+			// Note that the tailBlock we start off with may not be owned by us any more;
+			// this happens if it was filled up exactly to the top (setting tailIndex to
+			// the first index of the next block which is not yet allocated), then dequeued
+			// completely (putting it on the free list) before we enqueue again.
+			
+			index_t startTailIndex = this->tailIndex.load(std::memory_order_relaxed);
+			auto startBlock = this->tailBlock;
+			Block* firstAllocatedBlock = nullptr;
+			auto endBlock = this->tailBlock;
+			
+			// Figure out how many blocks we'll need to allocate, and do so
+			size_t blockBaseDiff = ((startTailIndex + count - 1) & ~static_cast<index_t>(BLOCK_SIZE - 1)) - ((startTailIndex - 1) & ~static_cast<index_t>(BLOCK_SIZE - 1));
+			index_t currentTailIndex = (startTailIndex - 1) & ~static_cast<index_t>(BLOCK_SIZE - 1);
+			if (blockBaseDiff > 0) {
+#ifdef MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX
+				debug::DebugLock lock(mutex);
+#endif
+				do {
+					blockBaseDiff -= static_cast<index_t>(BLOCK_SIZE);
+					currentTailIndex += static_cast<index_t>(BLOCK_SIZE);
+					
+					// Find out where we'll be inserting this block in the block index
+					BlockIndexEntry* idxEntry = nullptr;  // initialization here unnecessary but compiler can't always tell
+					Block* newBlock;
+					bool indexInserted = false;
+					auto head = this->headIndex.load(std::memory_order_relaxed);
+					assert(!details::circular_less_than<index_t>(currentTailIndex, head));
+					bool full = !details::circular_less_than<index_t>(head, currentTailIndex + BLOCK_SIZE) || (MAX_SUBQUEUE_SIZE != details::const_numeric_max<size_t>::value && (MAX_SUBQUEUE_SIZE == 0 || MAX_SUBQUEUE_SIZE - BLOCK_SIZE < currentTailIndex - head));
+
+					if (full || !(indexInserted = insert_block_index_entry<allocMode>(idxEntry, currentTailIndex)) || (newBlock = this->parent->ConcurrentQueue::template requisition_block<allocMode>()) == nullptr) {
+						// Index allocation or block allocation failed; revert any other allocations
+						// and index insertions done so far for this operation
+						if (indexInserted) {
+							rewind_block_index_tail();
+							idxEntry->value.store(nullptr, std::memory_order_relaxed);
+						}
+						currentTailIndex = (startTailIndex - 1) & ~static_cast<index_t>(BLOCK_SIZE - 1);
+						for (auto block = firstAllocatedBlock; block != nullptr; block = block->next) {
+							currentTailIndex += static_cast<index_t>(BLOCK_SIZE);
+							idxEntry = get_block_index_entry_for_index(currentTailIndex);
+							idxEntry->value.store(nullptr, std::memory_order_relaxed);
+							rewind_block_index_tail();
+						}
+						this->parent->add_blocks_to_free_list(firstAllocatedBlock);
+						this->tailBlock = startBlock;
+						
+						return false;
+					}
+					
+#ifdef MCDBGQ_TRACKMEM
+					newBlock->owner = this;
+#endif
+					newBlock->ConcurrentQueue::Block::template reset_empty<implicit_context>();
+					newBlock->next = nullptr;
+					
+					// Insert the new block into the index
+					idxEntry->value.store(newBlock, std::memory_order_relaxed);
+					
+					// Store the chain of blocks so that we can undo if later allocations fail,
+					// and so that we can find the blocks when we do the actual enqueueing
+					if ((startTailIndex & static_cast<index_t>(BLOCK_SIZE - 1)) != 0 || firstAllocatedBlock != nullptr) {
+						assert(this->tailBlock != nullptr);
+						this->tailBlock->next = newBlock;
+					}
+					this->tailBlock = newBlock;
+					endBlock = newBlock;
+					firstAllocatedBlock = firstAllocatedBlock == nullptr ? newBlock : firstAllocatedBlock;
+				} while (blockBaseDiff > 0);
+			}
+			
+			// Enqueue, one block at a time
+			index_t newTailIndex = startTailIndex + static_cast<index_t>(count);
+			currentTailIndex = startTailIndex;
+			this->tailBlock = startBlock;
+			assert((startTailIndex & static_cast<index_t>(BLOCK_SIZE - 1)) != 0 || firstAllocatedBlock != nullptr || count == 0);
+			if ((startTailIndex & static_cast<index_t>(BLOCK_SIZE - 1)) == 0 && firstAllocatedBlock != nullptr) {
+				this->tailBlock = firstAllocatedBlock;
+			}
+			while (true) {
+				index_t stopIndex = (currentTailIndex & ~static_cast<index_t>(BLOCK_SIZE - 1)) + static_cast<index_t>(BLOCK_SIZE);
+				if (details::circular_less_than<index_t>(newTailIndex, stopIndex)) {
+					stopIndex = newTailIndex;
+				}
+				MOODYCAMEL_CONSTEXPR_IF (MOODYCAMEL_NOEXCEPT_CTOR(T, decltype(*itemFirst), new (static_cast<T*>(nullptr)) T(details::deref_noexcept(itemFirst)))) {
+					while (currentTailIndex != stopIndex) {
+						new ((*this->tailBlock)[currentTailIndex++]) T(*itemFirst++);
+					}
+				}
+				else {
+					MOODYCAMEL_TRY {
+						while (currentTailIndex != stopIndex) {
+							new ((*this->tailBlock)[currentTailIndex]) T(details::nomove_if<!MOODYCAMEL_NOEXCEPT_CTOR(T, decltype(*itemFirst), new (static_cast<T*>(nullptr)) T(details::deref_noexcept(itemFirst)))>::eval(*itemFirst));
+							++currentTailIndex;
+							++itemFirst;
+						}
+					}
+					MOODYCAMEL_CATCH (...) {
+						auto constructedStopIndex = currentTailIndex;
+						auto lastBlockEnqueued = this->tailBlock;
+						
+						if (!details::is_trivially_destructible<T>::value) {
+							auto block = startBlock;
+							if ((startTailIndex & static_cast<index_t>(BLOCK_SIZE - 1)) == 0) {
+								block = firstAllocatedBlock;
+							}
+							currentTailIndex = startTailIndex;
+							while (true) {
+								stopIndex = (currentTailIndex & ~static_cast<index_t>(BLOCK_SIZE - 1)) + static_cast<index_t>(BLOCK_SIZE);
+								if (details::circular_less_than<index_t>(constructedStopIndex, stopIndex)) {
+									stopIndex = constructedStopIndex;
+								}
+								while (currentTailIndex != stopIndex) {
+									(*block)[currentTailIndex++]->~T();
+								}
+								if (block == lastBlockEnqueued) {
+									break;
+								}
+								block = block->next;
+							}
+						}
+						
+						currentTailIndex = (startTailIndex - 1) & ~static_cast<index_t>(BLOCK_SIZE - 1);
+						for (auto block = firstAllocatedBlock; block != nullptr; block = block->next) {
+							currentTailIndex += static_cast<index_t>(BLOCK_SIZE);
+							auto idxEntry = get_block_index_entry_for_index(currentTailIndex);
+							idxEntry->value.store(nullptr, std::memory_order_relaxed);
+							rewind_block_index_tail();
+						}
+						this->parent->add_blocks_to_free_list(firstAllocatedBlock);
+						this->tailBlock = startBlock;
+						MOODYCAMEL_RETHROW;
+					}
+				}
+				
+				if (this->tailBlock == endBlock) {
+					assert(currentTailIndex == newTailIndex);
+					break;
+				}
+				this->tailBlock = this->tailBlock->next;
+			}
+			this->tailIndex.store(newTailIndex, std::memory_order_release);
+			return true;
+		}
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif
+		
+		template<typename It>
+		size_t dequeue_bulk(It& itemFirst, size_t max)
+		{
+			auto tail = this->tailIndex.load(std::memory_order_relaxed);
+			auto overcommit = this->dequeueOvercommit.load(std::memory_order_relaxed);
+			auto desiredCount = static_cast<size_t>(tail - (this->dequeueOptimisticCount.load(std::memory_order_relaxed) - overcommit));
+			if (details::circular_less_than<size_t>(0, desiredCount)) {
+				desiredCount = desiredCount < max ? desiredCount : max;
+				std::atomic_thread_fence(std::memory_order_acquire);
+				
+				auto myDequeueCount = this->dequeueOptimisticCount.fetch_add(desiredCount, std::memory_order_relaxed);
+				
+				tail = this->tailIndex.load(std::memory_order_acquire);
+				auto actualCount = static_cast<size_t>(tail - (myDequeueCount - overcommit));
+				if (details::circular_less_than<size_t>(0, actualCount)) {
+					actualCount = desiredCount < actualCount ? desiredCount : actualCount;
+					if (actualCount < desiredCount) {
+						this->dequeueOvercommit.fetch_add(desiredCount - actualCount, std::memory_order_release);
+					}
+					
+					// Get the first index. Note that since there's guaranteed to be at least actualCount elements, this
+					// will never exceed tail.
+					auto firstIndex = this->headIndex.fetch_add(actualCount, std::memory_order_acq_rel);
+					
+					// Iterate the blocks and dequeue
+					auto index = firstIndex;
+					BlockIndexHeader* localBlockIndex;
+					auto indexIndex = get_block_index_index_for_index(index, localBlockIndex);
+					do {
+						auto blockStartIndex = index;
+						index_t endIndex = (index & ~static_cast<index_t>(BLOCK_SIZE - 1)) + static_cast<index_t>(BLOCK_SIZE);
+						endIndex = details::circular_less_than<index_t>(firstIndex + static_cast<index_t>(actualCount), endIndex) ? firstIndex + static_cast<index_t>(actualCount) : endIndex;
+						
+						auto entry = localBlockIndex->index[indexIndex];
+						auto block = entry->value.load(std::memory_order_relaxed);
+						if (MOODYCAMEL_NOEXCEPT_ASSIGN(T, T&&, details::deref_noexcept(itemFirst) = std::move((*(*block)[index])))) {
+							while (index != endIndex) {
+								auto& el = *((*block)[index]);
+								*itemFirst++ = std::move(el);
+								el.~T();
+								++index;
+							}
+						}
+						else {
+							MOODYCAMEL_TRY {
+								while (index != endIndex) {
+									auto& el = *((*block)[index]);
+									*itemFirst = std::move(el);
+									++itemFirst;
+									el.~T();
+									++index;
+								}
+							}
+							MOODYCAMEL_CATCH (...) {
+								do {
+									entry = localBlockIndex->index[indexIndex];
+									block = entry->value.load(std::memory_order_relaxed);
+									while (index != endIndex) {
+										(*block)[index++]->~T();
+									}
+									
+									if (block->ConcurrentQueue::Block::template set_many_empty<implicit_context>(blockStartIndex, static_cast<size_t>(endIndex - blockStartIndex))) {
+#ifdef MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX
+										debug::DebugLock lock(mutex);
+#endif
+										entry->value.store(nullptr, std::memory_order_relaxed);
+										this->parent->add_block_to_free_list(block);
+									}
+									indexIndex = (indexIndex + 1) & (localBlockIndex->capacity - 1);
+									
+									blockStartIndex = index;
+									endIndex = (index & ~static_cast<index_t>(BLOCK_SIZE - 1)) + static_cast<index_t>(BLOCK_SIZE);
+									endIndex = details::circular_less_than<index_t>(firstIndex + static_cast<index_t>(actualCount), endIndex) ? firstIndex + static_cast<index_t>(actualCount) : endIndex;
+								} while (index != firstIndex + actualCount);
+								
+								MOODYCAMEL_RETHROW;
+							}
+						}
+						if (block->ConcurrentQueue::Block::template set_many_empty<implicit_context>(blockStartIndex, static_cast<size_t>(endIndex - blockStartIndex))) {
+							{
+#ifdef MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX
+								debug::DebugLock lock(mutex);
+#endif
+								// Note that the set_many_empty above did a release, meaning that anybody who acquires the block
+								// we're about to free can use it safely since our writes (and reads!) will have happened-before then.
+								entry->value.store(nullptr, std::memory_order_relaxed);
+							}
+							this->parent->add_block_to_free_list(block);		// releases the above store
+						}
+						indexIndex = (indexIndex + 1) & (localBlockIndex->capacity - 1);
+					} while (index != firstIndex + actualCount);
+					
+					return actualCount;
+				}
+				else {
+					this->dequeueOvercommit.fetch_add(desiredCount, std::memory_order_release);
+				}
+			}
+			
+			return 0;
+		}
+		
+	private:
+		// The block size must be > 1, so any number with the low bit set is an invalid block base index
+		static const index_t INVALID_BLOCK_BASE = 1;
+		
+		struct BlockIndexEntry
+		{
+			std::atomic<index_t> key;
+			std::atomic<Block*> value;
+		};
+		
+		struct BlockIndexHeader
+		{
+			size_t capacity;
+			std::atomic<size_t> tail;
+			BlockIndexEntry* entries;
+			BlockIndexEntry** index;
+			BlockIndexHeader* prev;
+		};
+		
+		template<AllocationMode allocMode>
+		inline bool insert_block_index_entry(BlockIndexEntry*& idxEntry, index_t blockStartIndex)
+		{
+			auto localBlockIndex = blockIndex.load(std::memory_order_relaxed);		// We're the only writer thread, relaxed is OK
+			if (localBlockIndex == nullptr) {
+				return false;  // this can happen if new_block_index failed in the constructor
+			}
+			size_t newTail = (localBlockIndex->tail.load(std::memory_order_relaxed) + 1) & (localBlockIndex->capacity - 1);
+			idxEntry = localBlockIndex->index[newTail];
+			if (idxEntry->key.load(std::memory_order_relaxed) == INVALID_BLOCK_BASE ||
+				idxEntry->value.load(std::memory_order_relaxed) == nullptr) {
+				
+				idxEntry->key.store(blockStartIndex, std::memory_order_relaxed);
+				localBlockIndex->tail.store(newTail, std::memory_order_release);
+				return true;
+			}
+			
+			// No room in the old block index, try to allocate another one!
+			MOODYCAMEL_CONSTEXPR_IF (allocMode == CannotAlloc) {
+				return false;
+			}
+			else if (!new_block_index()) {
+				return false;
+			}
+			else {
+				localBlockIndex = blockIndex.load(std::memory_order_relaxed);
+				newTail = (localBlockIndex->tail.load(std::memory_order_relaxed) + 1) & (localBlockIndex->capacity - 1);
+				idxEntry = localBlockIndex->index[newTail];
+				assert(idxEntry->key.load(std::memory_order_relaxed) == INVALID_BLOCK_BASE);
+				idxEntry->key.store(blockStartIndex, std::memory_order_relaxed);
+				localBlockIndex->tail.store(newTail, std::memory_order_release);
+				return true;
+			}
+		}
+		
+		inline void rewind_block_index_tail()
+		{
+			auto localBlockIndex = blockIndex.load(std::memory_order_relaxed);
+			localBlockIndex->tail.store((localBlockIndex->tail.load(std::memory_order_relaxed) - 1) & (localBlockIndex->capacity - 1), std::memory_order_relaxed);
+		}
+		
+		inline BlockIndexEntry* get_block_index_entry_for_index(index_t index) const
+		{
+			BlockIndexHeader* localBlockIndex;
+			auto idx = get_block_index_index_for_index(index, localBlockIndex);
+			return localBlockIndex->index[idx];
+		}
+		
+		inline size_t get_block_index_index_for_index(index_t index, BlockIndexHeader*& localBlockIndex) const
+		{
+#ifdef MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX
+			debug::DebugLock lock(mutex);
+#endif
+			index &= ~static_cast<index_t>(BLOCK_SIZE - 1);
+			localBlockIndex = blockIndex.load(std::memory_order_acquire);
+			auto tail = localBlockIndex->tail.load(std::memory_order_acquire);
+			auto tailBase = localBlockIndex->index[tail]->key.load(std::memory_order_relaxed);
+			assert(tailBase != INVALID_BLOCK_BASE);
+			// Note: Must use division instead of shift because the index may wrap around, causing a negative
+			// offset, whose negativity we want to preserve
+			auto offset = static_cast<size_t>(static_cast<typename std::make_signed<index_t>::type>(index - tailBase) / static_cast<typename std::make_signed<index_t>::type>(BLOCK_SIZE));
+			size_t idx = (tail + offset) & (localBlockIndex->capacity - 1);
+			assert(localBlockIndex->index[idx]->key.load(std::memory_order_relaxed) == index && localBlockIndex->index[idx]->value.load(std::memory_order_relaxed) != nullptr);
+			return idx;
+		}
+		
+		bool new_block_index()
+		{
+			auto prev = blockIndex.load(std::memory_order_relaxed);
+			size_t prevCapacity = prev == nullptr ? 0 : prev->capacity;
+			auto entryCount = prev == nullptr ? nextBlockIndexCapacity : prevCapacity;
+			auto raw = static_cast<char*>((Traits::malloc)(
+				sizeof(BlockIndexHeader) +
+				std::alignment_of<BlockIndexEntry>::value - 1 + sizeof(BlockIndexEntry) * entryCount +
+				std::alignment_of<BlockIndexEntry*>::value - 1 + sizeof(BlockIndexEntry*) * nextBlockIndexCapacity));
+			if (raw == nullptr) {
+				return false;
+			}
+			
+			auto header = new (raw) BlockIndexHeader;
+			auto entries = reinterpret_cast<BlockIndexEntry*>(details::align_for<BlockIndexEntry>(raw + sizeof(BlockIndexHeader)));
+			auto index = reinterpret_cast<BlockIndexEntry**>(details::align_for<BlockIndexEntry*>(reinterpret_cast<char*>(entries) + sizeof(BlockIndexEntry) * entryCount));
+			if (prev != nullptr) {
+				auto prevTail = prev->tail.load(std::memory_order_relaxed);
+				auto prevPos = prevTail;
+				size_t i = 0;
+				do {
+					prevPos = (prevPos + 1) & (prev->capacity - 1);
+					index[i++] = prev->index[prevPos];
+				} while (prevPos != prevTail);
+				assert(i == prevCapacity);
+			}
+			for (size_t i = 0; i != entryCount; ++i) {
+				new (entries + i) BlockIndexEntry;
+				entries[i].key.store(INVALID_BLOCK_BASE, std::memory_order_relaxed);
+				index[prevCapacity + i] = entries + i;
+			}
+			header->prev = prev;
+			header->entries = entries;
+			header->index = index;
+			header->capacity = nextBlockIndexCapacity;
+			header->tail.store((prevCapacity - 1) & (nextBlockIndexCapacity - 1), std::memory_order_relaxed);
+			
+			blockIndex.store(header, std::memory_order_release);
+			
+			nextBlockIndexCapacity <<= 1;
+			
+			return true;
+		}
+		
+	private:
+		size_t nextBlockIndexCapacity;
+		std::atomic<BlockIndexHeader*> blockIndex;
+
+#ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED
+	public:
+		details::ThreadExitListener threadExitListener;
+	private:
+#endif
+		
+#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG
+	public:
+		ImplicitProducer* nextImplicitProducer;
+	private:
+#endif
+
+#ifdef MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX
+		mutable debug::DebugMutex mutex;
+#endif
+#ifdef MCDBGQ_TRACKMEM
+		friend struct MemStats;
+#endif
+	};
+	
+	
+	//////////////////////////////////
+	// Block pool manipulation
+	//////////////////////////////////
+	
+	void populate_initial_block_list(size_t blockCount)
+	{
+		initialBlockPoolSize = blockCount;
+		if (initialBlockPoolSize == 0) {
+			initialBlockPool = nullptr;
+			return;
+		}
+		
+		initialBlockPool = create_array<Block>(blockCount);
+		if (initialBlockPool == nullptr) {
+			initialBlockPoolSize = 0;
+		}
+		for (size_t i = 0; i < initialBlockPoolSize; ++i) {
+			initialBlockPool[i].dynamicallyAllocated = false;
+		}
+	}
+	
+	inline Block* try_get_block_from_initial_pool()
+	{
+		if (initialBlockPoolIndex.load(std::memory_order_relaxed) >= initialBlockPoolSize) {
+			return nullptr;
+		}
+		
+		auto index = initialBlockPoolIndex.fetch_add(1, std::memory_order_relaxed);
+		
+		return index < initialBlockPoolSize ? (initialBlockPool + index) : nullptr;
+	}
+	
+	inline void add_block_to_free_list(Block* block)
+	{
+#ifdef MCDBGQ_TRACKMEM
+		block->owner = nullptr;
+#endif
+		if (!Traits::RECYCLE_ALLOCATED_BLOCKS && block->dynamicallyAllocated) {
+			destroy(block);
+		}
+		else {
+			freeList.add(block);
+		}
+	}
+	
+	inline void add_blocks_to_free_list(Block* block)
+	{
+		while (block != nullptr) {
+			auto next = block->next;
+			add_block_to_free_list(block);
+			block = next;
+		}
+	}
+	
+	inline Block* try_get_block_from_free_list()
+	{
+		return freeList.try_get();
+	}
+	
+	// Gets a free block from one of the memory pools, or allocates a new one (if applicable)
+	template<AllocationMode canAlloc>
+	Block* requisition_block()
+	{
+		auto block = try_get_block_from_initial_pool();
+		if (block != nullptr) {
+			return block;
+		}
+		
+		block = try_get_block_from_free_list();
+		if (block != nullptr) {
+			return block;
+		}
+		
+		MOODYCAMEL_CONSTEXPR_IF (canAlloc == CanAlloc) {
+			return create<Block>();
+		}
+		else {
+			return nullptr;
+		}
+	}
+	
+
+#ifdef MCDBGQ_TRACKMEM
+	public:
+		struct MemStats {
+			size_t allocatedBlocks;
+			size_t usedBlocks;
+			size_t freeBlocks;
+			size_t ownedBlocksExplicit;
+			size_t ownedBlocksImplicit;
+			size_t implicitProducers;
+			size_t explicitProducers;
+			size_t elementsEnqueued;
+			size_t blockClassBytes;
+			size_t queueClassBytes;
+			size_t implicitBlockIndexBytes;
+			size_t explicitBlockIndexBytes;
+			
+			friend class ConcurrentQueue;
+			
+		private:
+			static MemStats getFor(ConcurrentQueue* q)
+			{
+				MemStats stats = { 0 };
+				
+				stats.elementsEnqueued = q->size_approx();
+			
+				auto block = q->freeList.head_unsafe();
+				while (block != nullptr) {
+					++stats.allocatedBlocks;
+					++stats.freeBlocks;
+					block = block->freeListNext.load(std::memory_order_relaxed);
+				}
+				
+				for (auto ptr = q->producerListTail.load(std::memory_order_acquire); ptr != nullptr; ptr = ptr->next_prod()) {
+					bool implicit = dynamic_cast<ImplicitProducer*>(ptr) != nullptr;
+					stats.implicitProducers += implicit ? 1 : 0;
+					stats.explicitProducers += implicit ? 0 : 1;
+					
+					if (implicit) {
+						auto prod = static_cast<ImplicitProducer*>(ptr);
+						stats.queueClassBytes += sizeof(ImplicitProducer);
+						auto head = prod->headIndex.load(std::memory_order_relaxed);
+						auto tail = prod->tailIndex.load(std::memory_order_relaxed);
+						auto hash = prod->blockIndex.load(std::memory_order_relaxed);
+						if (hash != nullptr) {
+							for (size_t i = 0; i != hash->capacity; ++i) {
+								if (hash->index[i]->key.load(std::memory_order_relaxed) != ImplicitProducer::INVALID_BLOCK_BASE && hash->index[i]->value.load(std::memory_order_relaxed) != nullptr) {
+									++stats.allocatedBlocks;
+									++stats.ownedBlocksImplicit;
+								}
+							}
+							stats.implicitBlockIndexBytes += hash->capacity * sizeof(typename ImplicitProducer::BlockIndexEntry);
+							for (; hash != nullptr; hash = hash->prev) {
+								stats.implicitBlockIndexBytes += sizeof(typename ImplicitProducer::BlockIndexHeader) + hash->capacity * sizeof(typename ImplicitProducer::BlockIndexEntry*);
+							}
+						}
+						for (; details::circular_less_than<index_t>(head, tail); head += BLOCK_SIZE) {
+							//auto block = prod->get_block_index_entry_for_index(head);
+							++stats.usedBlocks;
+						}
+					}
+					else {
+						auto prod = static_cast<ExplicitProducer*>(ptr);
+						stats.queueClassBytes += sizeof(ExplicitProducer);
+						auto tailBlock = prod->tailBlock;
+						bool wasNonEmpty = false;
+						if (tailBlock != nullptr) {
+							auto block = tailBlock;
+							do {
+								++stats.allocatedBlocks;
+								if (!block->ConcurrentQueue::Block::template is_empty<explicit_context>() || wasNonEmpty) {
+									++stats.usedBlocks;
+									wasNonEmpty = wasNonEmpty || block != tailBlock;
+								}
+								++stats.ownedBlocksExplicit;
+								block = block->next;
+							} while (block != tailBlock);
+						}
+						auto index = prod->blockIndex.load(std::memory_order_relaxed);
+						while (index != nullptr) {
+							stats.explicitBlockIndexBytes += sizeof(typename ExplicitProducer::BlockIndexHeader) + index->size * sizeof(typename ExplicitProducer::BlockIndexEntry);
+							index = static_cast<typename ExplicitProducer::BlockIndexHeader*>(index->prev);
+						}
+					}
+				}
+				
+				auto freeOnInitialPool = q->initialBlockPoolIndex.load(std::memory_order_relaxed) >= q->initialBlockPoolSize ? 0 : q->initialBlockPoolSize - q->initialBlockPoolIndex.load(std::memory_order_relaxed);
+				stats.allocatedBlocks += freeOnInitialPool;
+				stats.freeBlocks += freeOnInitialPool;
+				
+				stats.blockClassBytes = sizeof(Block) * stats.allocatedBlocks;
+				stats.queueClassBytes += sizeof(ConcurrentQueue);
+				
+				return stats;
+			}
+		};
+		
+		// For debugging only. Not thread-safe.
+		MemStats getMemStats()
+		{
+			return MemStats::getFor(this);
+		}
+	private:
+		friend struct MemStats;
+#endif
+	
+	
+	//////////////////////////////////
+	// Producer list manipulation
+	//////////////////////////////////	
+	
+	ProducerBase* recycle_or_create_producer(bool isExplicit)
+	{
+#ifdef MCDBGQ_NOLOCKFREE_IMPLICITPRODHASH
+		debug::DebugLock lock(implicitProdMutex);
+#endif
+		// Try to re-use one first
+		for (auto ptr = producerListTail.load(std::memory_order_acquire); ptr != nullptr; ptr = ptr->next_prod()) {
+			if (ptr->inactive.load(std::memory_order_relaxed) && ptr->isExplicit == isExplicit) {
+				bool expected = true;
+				if (ptr->inactive.compare_exchange_strong(expected, /* desired */ false, std::memory_order_acquire, std::memory_order_relaxed)) {
+					// We caught one! It's been marked as activated, the caller can have it
+					return ptr;
+				}
+			}
+		}
+
+		return add_producer(isExplicit ? static_cast<ProducerBase*>(create<ExplicitProducer>(this)) : create<ImplicitProducer>(this));
+	}
+	
+	ProducerBase* add_producer(ProducerBase* producer)
+	{
+		// Handle failed memory allocation
+		if (producer == nullptr) {
+			return nullptr;
+		}
+		
+		producerCount.fetch_add(1, std::memory_order_relaxed);
+		
+		// Add it to the lock-free list
+		auto prevTail = producerListTail.load(std::memory_order_relaxed);
+		do {
+			producer->next = prevTail;
+		} while (!producerListTail.compare_exchange_weak(prevTail, producer, std::memory_order_release, std::memory_order_relaxed));
+		
+#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG
+		if (producer->isExplicit) {
+			auto prevTailExplicit = explicitProducers.load(std::memory_order_relaxed);
+			do {
+				static_cast<ExplicitProducer*>(producer)->nextExplicitProducer = prevTailExplicit;
+			} while (!explicitProducers.compare_exchange_weak(prevTailExplicit, static_cast<ExplicitProducer*>(producer), std::memory_order_release, std::memory_order_relaxed));
+		}
+		else {
+			auto prevTailImplicit = implicitProducers.load(std::memory_order_relaxed);
+			do {
+				static_cast<ImplicitProducer*>(producer)->nextImplicitProducer = prevTailImplicit;
+			} while (!implicitProducers.compare_exchange_weak(prevTailImplicit, static_cast<ImplicitProducer*>(producer), std::memory_order_release, std::memory_order_relaxed));
+		}
+#endif
+		
+		return producer;
+	}
+	
+	void reown_producers()
+	{
+		// After another instance is moved-into/swapped-with this one, all the
+		// producers we stole still think their parents are the other queue.
+		// So fix them up!
+		for (auto ptr = producerListTail.load(std::memory_order_relaxed); ptr != nullptr; ptr = ptr->next_prod()) {
+			ptr->parent = this;
+		}
+	}
+	
+	
+	//////////////////////////////////
+	// Implicit producer hash
+	//////////////////////////////////
+	
+	struct ImplicitProducerKVP
+	{
+		std::atomic<details::thread_id_t> key;
+		ImplicitProducer* value;		// No need for atomicity since it's only read by the thread that sets it in the first place
+		
+		ImplicitProducerKVP() : value(nullptr) { }
+		
+		ImplicitProducerKVP(ImplicitProducerKVP&& other) MOODYCAMEL_NOEXCEPT
+		{
+			key.store(other.key.load(std::memory_order_relaxed), std::memory_order_relaxed);
+			value = other.value;
+		}
+		
+		inline ImplicitProducerKVP& operator=(ImplicitProducerKVP&& other) MOODYCAMEL_NOEXCEPT
+		{
+			swap(other);
+			return *this;
+		}
+		
+		inline void swap(ImplicitProducerKVP& other) MOODYCAMEL_NOEXCEPT
+		{
+			if (this != &other) {
+				details::swap_relaxed(key, other.key);
+				std::swap(value, other.value);
+			}
+		}
+	};
+	
+	template<typename XT, typename XTraits>
+	friend void moodycamel::swap(typename ConcurrentQueue<XT, XTraits>::ImplicitProducerKVP&, typename ConcurrentQueue<XT, XTraits>::ImplicitProducerKVP&) MOODYCAMEL_NOEXCEPT;
+	
+	struct ImplicitProducerHash
+	{
+		size_t capacity;
+		ImplicitProducerKVP* entries;
+		ImplicitProducerHash* prev;
+	};
+	
+	inline void populate_initial_implicit_producer_hash()
+	{
+		MOODYCAMEL_CONSTEXPR_IF (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) {
+			return;
+		}
+		else {
+			implicitProducerHashCount.store(0, std::memory_order_relaxed);
+			auto hash = &initialImplicitProducerHash;
+			hash->capacity = INITIAL_IMPLICIT_PRODUCER_HASH_SIZE;
+			hash->entries = &initialImplicitProducerHashEntries[0];
+			for (size_t i = 0; i != INITIAL_IMPLICIT_PRODUCER_HASH_SIZE; ++i) {
+				initialImplicitProducerHashEntries[i].key.store(details::invalid_thread_id, std::memory_order_relaxed);
+			}
+			hash->prev = nullptr;
+			implicitProducerHash.store(hash, std::memory_order_relaxed);
+		}
+	}
+	
+	void swap_implicit_producer_hashes(ConcurrentQueue& other)
+	{
+		MOODYCAMEL_CONSTEXPR_IF (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) {
+			return;
+		}
+		else {
+			// Swap (assumes our implicit producer hash is initialized)
+			initialImplicitProducerHashEntries.swap(other.initialImplicitProducerHashEntries);
+			initialImplicitProducerHash.entries = &initialImplicitProducerHashEntries[0];
+			other.initialImplicitProducerHash.entries = &other.initialImplicitProducerHashEntries[0];
+			
+			details::swap_relaxed(implicitProducerHashCount, other.implicitProducerHashCount);
+			
+			details::swap_relaxed(implicitProducerHash, other.implicitProducerHash);
+			if (implicitProducerHash.load(std::memory_order_relaxed) == &other.initialImplicitProducerHash) {
+				implicitProducerHash.store(&initialImplicitProducerHash, std::memory_order_relaxed);
+			}
+			else {
+				ImplicitProducerHash* hash;
+				for (hash = implicitProducerHash.load(std::memory_order_relaxed); hash->prev != &other.initialImplicitProducerHash; hash = hash->prev) {
+					continue;
+				}
+				hash->prev = &initialImplicitProducerHash;
+			}
+			if (other.implicitProducerHash.load(std::memory_order_relaxed) == &initialImplicitProducerHash) {
+				other.implicitProducerHash.store(&other.initialImplicitProducerHash, std::memory_order_relaxed);
+			}
+			else {
+				ImplicitProducerHash* hash;
+				for (hash = other.implicitProducerHash.load(std::memory_order_relaxed); hash->prev != &initialImplicitProducerHash; hash = hash->prev) {
+					continue;
+				}
+				hash->prev = &other.initialImplicitProducerHash;
+			}
+		}
+	}
+	
+	// Only fails (returns nullptr) if memory allocation fails
+	ImplicitProducer* get_or_add_implicit_producer()
+	{
+		// Note that since the data is essentially thread-local (key is thread ID),
+		// there's a reduced need for fences (memory ordering is already consistent
+		// for any individual thread), except for the current table itself.
+		
+		// Start by looking for the thread ID in the current and all previous hash tables.
+		// If it's not found, it must not be in there yet, since this same thread would
+		// have added it previously to one of the tables that we traversed.
+		
+		// Code and algorithm adapted from http://preshing.com/20130605/the-worlds-simplest-lock-free-hash-table
+		
+#ifdef MCDBGQ_NOLOCKFREE_IMPLICITPRODHASH
+		debug::DebugLock lock(implicitProdMutex);
+#endif
+		
+		auto id = details::thread_id();
+		auto hashedId = details::hash_thread_id(id);
+		
+		auto mainHash = implicitProducerHash.load(std::memory_order_acquire);
+		assert(mainHash != nullptr);  // silence clang-tidy and MSVC warnings (hash cannot be null)
+		for (auto hash = mainHash; hash != nullptr; hash = hash->prev) {
+			// Look for the id in this hash
+			auto index = hashedId;
+			while (true) {		// Not an infinite loop because at least one slot is free in the hash table
+				index &= hash->capacity - 1u;
+				
+				auto probedKey = hash->entries[index].key.load(std::memory_order_relaxed);
+				if (probedKey == id) {
+					// Found it! If we had to search several hashes deep, though, we should lazily add it
+					// to the current main hash table to avoid the extended search next time.
+					// Note there's guaranteed to be room in the current hash table since every subsequent
+					// table implicitly reserves space for all previous tables (there's only one
+					// implicitProducerHashCount).
+					auto value = hash->entries[index].value;
+					if (hash != mainHash) {
+						index = hashedId;
+						while (true) {
+							index &= mainHash->capacity - 1u;
+							auto empty = details::invalid_thread_id;
+#ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED
+							auto reusable = details::invalid_thread_id2;
+							if (mainHash->entries[index].key.compare_exchange_strong(empty,    id, std::memory_order_seq_cst, std::memory_order_relaxed) ||
+								mainHash->entries[index].key.compare_exchange_strong(reusable, id, std::memory_order_seq_cst, std::memory_order_relaxed)) {
+#else
+							if (mainHash->entries[index].key.compare_exchange_strong(empty,    id, std::memory_order_seq_cst, std::memory_order_relaxed)) {
+#endif
+								mainHash->entries[index].value = value;
+								break;
+							}
+							++index;
+						}
+					}
+					
+					return value;
+				}
+				if (probedKey == details::invalid_thread_id) {
+					break;		// Not in this hash table
+				}
+				++index;
+			}
+		}
+		
+		// Insert!
+		auto newCount = 1 + implicitProducerHashCount.fetch_add(1, std::memory_order_relaxed);
+		while (true) {
+			// NOLINTNEXTLINE(clang-analyzer-core.NullDereference)
+			if (newCount >= (mainHash->capacity >> 1) && !implicitProducerHashResizeInProgress.test_and_set(std::memory_order_acquire)) {
+				// We've acquired the resize lock, try to allocate a bigger hash table.
+				// Note the acquire fence synchronizes with the release fence at the end of this block, and hence when
+				// we reload implicitProducerHash it must be the most recent version (it only gets changed within this
+				// locked block).
+				mainHash = implicitProducerHash.load(std::memory_order_acquire);
+				if (newCount >= (mainHash->capacity >> 1)) {
+					size_t newCapacity = mainHash->capacity << 1;
+					while (newCount >= (newCapacity >> 1)) {
+						newCapacity <<= 1;
+					}
+					auto raw = static_cast<char*>((Traits::malloc)(sizeof(ImplicitProducerHash) + std::alignment_of<ImplicitProducerKVP>::value - 1 + sizeof(ImplicitProducerKVP) * newCapacity));
+					if (raw == nullptr) {
+						// Allocation failed
+						implicitProducerHashCount.fetch_sub(1, std::memory_order_relaxed);
+						implicitProducerHashResizeInProgress.clear(std::memory_order_relaxed);
+						return nullptr;
+					}
+					
+					auto newHash = new (raw) ImplicitProducerHash;
+					newHash->capacity = static_cast<size_t>(newCapacity);
+					newHash->entries = reinterpret_cast<ImplicitProducerKVP*>(details::align_for<ImplicitProducerKVP>(raw + sizeof(ImplicitProducerHash)));
+					for (size_t i = 0; i != newCapacity; ++i) {
+						new (newHash->entries + i) ImplicitProducerKVP;
+						newHash->entries[i].key.store(details::invalid_thread_id, std::memory_order_relaxed);
+					}
+					newHash->prev = mainHash;
+					implicitProducerHash.store(newHash, std::memory_order_release);
+					implicitProducerHashResizeInProgress.clear(std::memory_order_release);
+					mainHash = newHash;
+				}
+				else {
+					implicitProducerHashResizeInProgress.clear(std::memory_order_release);
+				}
+			}
+			
+			// If it's < three-quarters full, add to the old one anyway so that we don't have to wait for the next table
+			// to finish being allocated by another thread (and if we just finished allocating above, the condition will
+			// always be true)
+			if (newCount < (mainHash->capacity >> 1) + (mainHash->capacity >> 2)) {
+				auto producer = static_cast<ImplicitProducer*>(recycle_or_create_producer(false));
+				if (producer == nullptr) {
+					implicitProducerHashCount.fetch_sub(1, std::memory_order_relaxed);
+					return nullptr;
+				}
+				
+#ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED
+				producer->threadExitListener.callback = &ConcurrentQueue::implicit_producer_thread_exited_callback;
+				producer->threadExitListener.userData = producer;
+				details::ThreadExitNotifier::subscribe(&producer->threadExitListener);
+#endif
+				
+				auto index = hashedId;
+				while (true) {
+					index &= mainHash->capacity - 1u;
+					auto empty = details::invalid_thread_id;
+#ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED
+					auto reusable = details::invalid_thread_id2;
+					if (mainHash->entries[index].key.compare_exchange_strong(reusable, id, std::memory_order_seq_cst, std::memory_order_relaxed)) {
+						implicitProducerHashCount.fetch_sub(1, std::memory_order_relaxed);  // already counted as a used slot
+						mainHash->entries[index].value = producer;
+						break;
+					}
+#endif
+					if (mainHash->entries[index].key.compare_exchange_strong(empty,    id, std::memory_order_seq_cst, std::memory_order_relaxed)) {
+						mainHash->entries[index].value = producer;
+						break;
+					}
+					++index;
+				}
+				return producer;
+			}
+			
+			// Hmm, the old hash is quite full and somebody else is busy allocating a new one.
+			// We need to wait for the allocating thread to finish (if it succeeds, we add, if not,
+			// we try to allocate ourselves).
+			mainHash = implicitProducerHash.load(std::memory_order_acquire);
+		}
+	}
+	
+#ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED
+	void implicit_producer_thread_exited(ImplicitProducer* producer)
+	{
+		// Remove from hash
+#ifdef MCDBGQ_NOLOCKFREE_IMPLICITPRODHASH
+		debug::DebugLock lock(implicitProdMutex);
+#endif
+		auto hash = implicitProducerHash.load(std::memory_order_acquire);
+		assert(hash != nullptr);		// The thread exit listener is only registered if we were added to a hash in the first place
+		auto id = details::thread_id();
+		auto hashedId = details::hash_thread_id(id);
+		details::thread_id_t probedKey;
+		
+		// We need to traverse all the hashes just in case other threads aren't on the current one yet and are
+		// trying to add an entry thinking there's a free slot (because they reused a producer)
+		for (; hash != nullptr; hash = hash->prev) {
+			auto index = hashedId;
+			do {
+				index &= hash->capacity - 1u;
+				probedKey = id;
+				if (hash->entries[index].key.compare_exchange_strong(probedKey, details::invalid_thread_id2, std::memory_order_seq_cst, std::memory_order_relaxed)) {
+					break;
+				}
+				++index;
+			} while (probedKey != details::invalid_thread_id);		// Can happen if the hash has changed but we weren't put back in it yet, or if we weren't added to this hash in the first place
+		}
+		
+		// Mark the queue as being recyclable
+		producer->inactive.store(true, std::memory_order_release);
+	}
+	
+	static void implicit_producer_thread_exited_callback(void* userData)
+	{
+		auto producer = static_cast<ImplicitProducer*>(userData);
+		auto queue = producer->parent;
+		queue->implicit_producer_thread_exited(producer);
+	}
+#endif
+	
+	//////////////////////////////////
+	// Utility functions
+	//////////////////////////////////
+
+	template<typename TAlign>
+	static inline void* aligned_malloc(size_t size)
+	{
+		MOODYCAMEL_CONSTEXPR_IF (std::alignment_of<TAlign>::value <= std::alignment_of<details::max_align_t>::value)
+			return (Traits::malloc)(size);
+		else {
+			size_t alignment = std::alignment_of<TAlign>::value;
+			void* raw = (Traits::malloc)(size + alignment - 1 + sizeof(void*));
+			if (!raw)
+				return nullptr;
+			char* ptr = details::align_for<TAlign>(reinterpret_cast<char*>(raw) + sizeof(void*));
+			*(reinterpret_cast<void**>(ptr) - 1) = raw;
+			return ptr;
+		}
+	}
+
+	template<typename TAlign>
+	static inline void aligned_free(void* ptr)
+	{
+		MOODYCAMEL_CONSTEXPR_IF (std::alignment_of<TAlign>::value <= std::alignment_of<details::max_align_t>::value)
+			return (Traits::free)(ptr);
+		else
+			(Traits::free)(ptr ? *(reinterpret_cast<void**>(ptr) - 1) : nullptr);
+	}
+
+	template<typename U>
+	static inline U* create_array(size_t count)
+	{
+		assert(count > 0);
+		U* p = static_cast<U*>(aligned_malloc<U>(sizeof(U) * count));
+		if (p == nullptr)
+			return nullptr;
+
+		for (size_t i = 0; i != count; ++i)
+			new (p + i) U();
+		return p;
+	}
+
+	template<typename U>
+	static inline void destroy_array(U* p, size_t count)
+	{
+		if (p != nullptr) {
+			assert(count > 0);
+			for (size_t i = count; i != 0; )
+				(p + --i)->~U();
+		}
+		aligned_free<U>(p);
+	}
+
+	template<typename U>
+	static inline U* create()
+	{
+		void* p = aligned_malloc<U>(sizeof(U));
+		return p != nullptr ? new (p) U : nullptr;
+	}
+
+	template<typename U, typename A1>
+	static inline U* create(A1&& a1)
+	{
+		void* p = aligned_malloc<U>(sizeof(U));
+		return p != nullptr ? new (p) U(std::forward<A1>(a1)) : nullptr;
+	}
+
+	template<typename U>
+	static inline void destroy(U* p)
+	{
+		if (p != nullptr)
+			p->~U();
+		aligned_free<U>(p);
+	}
+
+private:
+	std::atomic<ProducerBase*> producerListTail;
+	std::atomic<std::uint32_t> producerCount;
+	
+	std::atomic<size_t> initialBlockPoolIndex;
+	Block* initialBlockPool;
+	size_t initialBlockPoolSize;
+	
+#ifndef MCDBGQ_USEDEBUGFREELIST
+	FreeList<Block> freeList;
+#else
+	debug::DebugFreeList<Block> freeList;
+#endif
+	
+	std::atomic<ImplicitProducerHash*> implicitProducerHash;
+	std::atomic<size_t> implicitProducerHashCount;		// Number of slots logically used
+	ImplicitProducerHash initialImplicitProducerHash;
+	std::array<ImplicitProducerKVP, INITIAL_IMPLICIT_PRODUCER_HASH_SIZE> initialImplicitProducerHashEntries;
+	std::atomic_flag implicitProducerHashResizeInProgress;
+	
+	std::atomic<std::uint32_t> nextExplicitConsumerId;
+	std::atomic<std::uint32_t> globalExplicitConsumerOffset;
+	
+#ifdef MCDBGQ_NOLOCKFREE_IMPLICITPRODHASH
+	debug::DebugMutex implicitProdMutex;
+#endif
+	
+#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG
+	std::atomic<ExplicitProducer*> explicitProducers;
+	std::atomic<ImplicitProducer*> implicitProducers;
+#endif
+};
+
+
+template<typename T, typename Traits>
+ProducerToken::ProducerToken(ConcurrentQueue<T, Traits>& queue)
+	: producer(queue.recycle_or_create_producer(true))
+{
+	if (producer != nullptr) {
+		producer->token = this;
+	}
+}
+
+template<typename T, typename Traits>
+ProducerToken::ProducerToken(BlockingConcurrentQueue<T, Traits>& queue)
+	: producer(reinterpret_cast<ConcurrentQueue<T, Traits>*>(&queue)->recycle_or_create_producer(true))
+{
+	if (producer != nullptr) {
+		producer->token = this;
+	}
+}
+
+template<typename T, typename Traits>
+ConsumerToken::ConsumerToken(ConcurrentQueue<T, Traits>& queue)
+	: itemsConsumedFromCurrent(0), currentProducer(nullptr), desiredProducer(nullptr)
+{
+	initialOffset = queue.nextExplicitConsumerId.fetch_add(1, std::memory_order_release);
+	lastKnownGlobalOffset = static_cast<std::uint32_t>(-1);
+}
+
+template<typename T, typename Traits>
+ConsumerToken::ConsumerToken(BlockingConcurrentQueue<T, Traits>& queue)
+	: itemsConsumedFromCurrent(0), currentProducer(nullptr), desiredProducer(nullptr)
+{
+	initialOffset = reinterpret_cast<ConcurrentQueue<T, Traits>*>(&queue)->nextExplicitConsumerId.fetch_add(1, std::memory_order_release);
+	lastKnownGlobalOffset = static_cast<std::uint32_t>(-1);
+}
+
+template<typename T, typename Traits>
+inline void swap(ConcurrentQueue<T, Traits>& a, ConcurrentQueue<T, Traits>& b) MOODYCAMEL_NOEXCEPT
+{
+	a.swap(b);
+}
+
+inline void swap(ProducerToken& a, ProducerToken& b) MOODYCAMEL_NOEXCEPT
+{
+	a.swap(b);
+}
+
+inline void swap(ConsumerToken& a, ConsumerToken& b) MOODYCAMEL_NOEXCEPT
+{
+	a.swap(b);
+}
+
+template<typename T, typename Traits>
+inline void swap(typename ConcurrentQueue<T, Traits>::ImplicitProducerKVP& a, typename ConcurrentQueue<T, Traits>::ImplicitProducerKVP& b) MOODYCAMEL_NOEXCEPT
+{
+	a.swap(b);
+}
+
+}
+
+#if defined(_MSC_VER) && (!defined(_HAS_CXX17) || !_HAS_CXX17)
+#pragma warning(pop)
+#endif
+
+#if defined(__GNUC__) && !defined(__INTEL_COMPILER)
+#pragma GCC diagnostic pop
+#endif
diff --git a/archive/2025/summer/bsc_karidas/external/concurrentqueue/concurrentqueueConfig.cmake.in b/archive/2025/summer/bsc_karidas/external/concurrentqueue/concurrentqueueConfig.cmake.in
new file mode 100644
index 000000000..b8fad198b
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/external/concurrentqueue/concurrentqueueConfig.cmake.in
@@ -0,0 +1,3 @@
+@PACKAGE_INIT@
+
+include(${CMAKE_CURRENT_LIST_DIR}/@PROJECT_NAME@Targets.cmake)
diff --git a/archive/2025/summer/bsc_karidas/external/concurrentqueue/internal/concurrentqueue_internal_debug.h b/archive/2025/summer/bsc_karidas/external/concurrentqueue/internal/concurrentqueue_internal_debug.h
new file mode 100644
index 000000000..6db4e226b
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/external/concurrentqueue/internal/concurrentqueue_internal_debug.h
@@ -0,0 +1,87 @@
+#pragma once
+
+//#define MCDBGQ_TRACKMEM 1
+//#define MCDBGQ_NOLOCKFREE_FREELIST 1
+//#define MCDBGQ_USEDEBUGFREELIST 1
+//#define MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX 1
+//#define MCDBGQ_NOLOCKFREE_IMPLICITPRODHASH 1
+
+#if defined(_WIN32) || defined(__WINDOWS__) || defined(__WIN32__)
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+namespace moodycamel { namespace debug {
+	struct DebugMutex {
+		DebugMutex() { InitializeCriticalSectionAndSpinCount(&cs, 0x400); }
+		~DebugMutex() { DeleteCriticalSection(&cs); }
+		
+		void lock() { EnterCriticalSection(&cs); }
+		void unlock() { LeaveCriticalSection(&cs); }
+		
+	private:
+		CRITICAL_SECTION cs;
+	};
+} }
+#else
+#include <mutex>
+namespace moodycamel { namespace debug {
+	struct DebugMutex {
+		void lock() { m.lock(); }
+		void unlock() { m.unlock(); }
+		
+	private:
+		std::mutex m;
+	};
+} }
+#define
+#endif
+
+namespace moodycamel { namespace debug {
+	struct DebugLock {
+		explicit DebugLock(DebugMutex& mutex)
+			: mutex(mutex)
+		{
+			mutex.lock();
+		}
+		
+		~DebugLock()
+		{
+			mutex.unlock();
+		}
+		
+	private:
+		DebugMutex& mutex;
+	};
+	
+	
+	template<typename N>
+	struct DebugFreeList {
+		DebugFreeList() : head(nullptr) { }
+		DebugFreeList(DebugFreeList&& other) : head(other.head) { other.head = nullptr; }
+		void swap(DebugFreeList& other) { std::swap(head, other.head); }
+		
+		inline void add(N* node)
+		{
+			DebugLock lock(mutex);
+			node->freeListNext = head;
+			head = node;
+		}
+		
+		inline N* try_get()
+		{
+			DebugLock lock(mutex);
+			if (head == nullptr) {
+				return nullptr;
+			}
+			
+			auto prevHead = head;
+			head = head->freeListNext;
+			return prevHead;
+		}
+		
+		N* head_unsafe() const { return head; }
+		
+	private:
+		N* head;
+		DebugMutex mutex;
+	};
+} }
diff --git a/archive/2025/summer/bsc_karidas/external/concurrentqueue/lightweightsemaphore.h b/archive/2025/summer/bsc_karidas/external/concurrentqueue/lightweightsemaphore.h
new file mode 100644
index 000000000..a04147519
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/external/concurrentqueue/lightweightsemaphore.h
@@ -0,0 +1,427 @@
+// Provides an efficient implementation of a semaphore (LightweightSemaphore).
+// This is an extension of Jeff Preshing's sempahore implementation (licensed 
+// under the terms of its separate zlib license) that has been adapted and
+// extended by Cameron Desrochers.
+
+#pragma once
+
+#include <cstddef> // For std::size_t
+#include <atomic>
+#include <type_traits> // For std::make_signed<T>
+
+#if defined(_WIN32)
+// Avoid including windows.h in a header; we only need a handful of
+// items, so we'll redeclare them here (this is relatively safe since
+// the API generally has to remain stable between Windows versions).
+// I know this is an ugly hack but it still beats polluting the global
+// namespace with thousands of generic names or adding a .cpp for nothing.
+extern "C" {
+	struct _SECURITY_ATTRIBUTES;
+	__declspec(dllimport) void* __stdcall CreateSemaphoreW(_SECURITY_ATTRIBUTES* lpSemaphoreAttributes, long lInitialCount, long lMaximumCount, const wchar_t* lpName);
+	__declspec(dllimport) int __stdcall CloseHandle(void* hObject);
+	__declspec(dllimport) unsigned long __stdcall WaitForSingleObject(void* hHandle, unsigned long dwMilliseconds);
+	__declspec(dllimport) int __stdcall ReleaseSemaphore(void* hSemaphore, long lReleaseCount, long* lpPreviousCount);
+}
+#elif defined(__MACH__)
+#include <mach/mach.h>
+#elif defined(__MVS__)
+#include <zos-semaphore.h>
+#elif defined(__unix__)
+#include <semaphore.h>
+
+#if defined(__GLIBC_PREREQ) && defined(_GNU_SOURCE)
+#if __GLIBC_PREREQ(2,30)
+#define MOODYCAMEL_LIGHTWEIGHTSEMAPHORE_MONOTONIC
+#endif
+#endif
+#endif
+
+namespace moodycamel
+{
+namespace details
+{
+
+// Code in the mpmc_sema namespace below is an adaptation of Jeff Preshing's
+// portable + lightweight semaphore implementations, originally from
+// https://github.com/preshing/cpp11-on-multicore/blob/master/common/sema.h
+// LICENSE:
+// Copyright (c) 2015 Jeff Preshing
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+//	claim that you wrote the original software. If you use this software
+//	in a product, an acknowledgement in the product documentation would be
+//	appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+//	misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+#if defined(_WIN32)
+class Semaphore
+{
+private:
+	void* m_hSema;
+	
+	Semaphore(const Semaphore& other) MOODYCAMEL_DELETE_FUNCTION;
+	Semaphore& operator=(const Semaphore& other) MOODYCAMEL_DELETE_FUNCTION;
+
+public:
+	Semaphore(int initialCount = 0)
+	{
+		assert(initialCount >= 0);
+		const long maxLong = 0x7fffffff;
+		m_hSema = CreateSemaphoreW(nullptr, initialCount, maxLong, nullptr);
+		assert(m_hSema);
+	}
+
+	~Semaphore()
+	{
+		CloseHandle(m_hSema);
+	}
+
+	bool wait()
+	{
+		const unsigned long infinite = 0xffffffff;
+		return WaitForSingleObject(m_hSema, infinite) == 0;
+	}
+	
+	bool try_wait()
+	{
+		return WaitForSingleObject(m_hSema, 0) == 0;
+	}
+	
+	bool timed_wait(std::uint64_t usecs)
+	{
+		return WaitForSingleObject(m_hSema, (unsigned long)(usecs / 1000)) == 0;
+	}
+
+	void signal(int count = 1)
+	{
+		while (!ReleaseSemaphore(m_hSema, count, nullptr));
+	}
+};
+#elif defined(__MACH__)
+//---------------------------------------------------------
+// Semaphore (Apple iOS and OSX)
+// Can't use POSIX semaphores due to http://lists.apple.com/archives/darwin-kernel/2009/Apr/msg00010.html
+//---------------------------------------------------------
+class Semaphore
+{
+private:
+	semaphore_t m_sema;
+
+	Semaphore(const Semaphore& other) MOODYCAMEL_DELETE_FUNCTION;
+	Semaphore& operator=(const Semaphore& other) MOODYCAMEL_DELETE_FUNCTION;
+
+public:
+	Semaphore(int initialCount = 0)
+	{
+		assert(initialCount >= 0);
+		kern_return_t rc = semaphore_create(mach_task_self(), &m_sema, SYNC_POLICY_FIFO, initialCount);
+		assert(rc == KERN_SUCCESS);
+		(void)rc;
+	}
+
+	~Semaphore()
+	{
+		semaphore_destroy(mach_task_self(), m_sema);
+	}
+
+	bool wait()
+	{
+		return semaphore_wait(m_sema) == KERN_SUCCESS;
+	}
+	
+	bool try_wait()
+	{
+		return timed_wait(0);
+	}
+	
+	bool timed_wait(std::uint64_t timeout_usecs)
+	{
+		mach_timespec_t ts;
+		ts.tv_sec = static_cast<unsigned int>(timeout_usecs / 1000000);
+		ts.tv_nsec = static_cast<int>((timeout_usecs % 1000000) * 1000);
+
+		// added in OSX 10.10: https://developer.apple.com/library/prerelease/mac/documentation/General/Reference/APIDiffsMacOSX10_10SeedDiff/modules/Darwin.html
+		kern_return_t rc = semaphore_timedwait(m_sema, ts);
+		return rc == KERN_SUCCESS;
+	}
+
+	void signal()
+	{
+		while (semaphore_signal(m_sema) != KERN_SUCCESS);
+	}
+
+	void signal(int count)
+	{
+		while (count-- > 0)
+		{
+			while (semaphore_signal(m_sema) != KERN_SUCCESS);
+		}
+	}
+};
+#elif defined(__unix__) || defined(__MVS__)
+//---------------------------------------------------------
+// Semaphore (POSIX, Linux, zOS)
+//---------------------------------------------------------
+class Semaphore
+{
+private:
+	sem_t m_sema;
+
+	Semaphore(const Semaphore& other) MOODYCAMEL_DELETE_FUNCTION;
+	Semaphore& operator=(const Semaphore& other) MOODYCAMEL_DELETE_FUNCTION;
+
+public:
+	Semaphore(int initialCount = 0)
+	{
+		assert(initialCount >= 0);
+		int rc = sem_init(&m_sema, 0, static_cast<unsigned int>(initialCount));
+		assert(rc == 0);
+		(void)rc;
+	}
+
+	~Semaphore()
+	{
+		sem_destroy(&m_sema);
+	}
+
+	bool wait()
+	{
+		// http://stackoverflow.com/questions/2013181/gdb-causes-sem-wait-to-fail-with-eintr-error
+		int rc;
+		do {
+			rc = sem_wait(&m_sema);
+		} while (rc == -1 && errno == EINTR);
+		return rc == 0;
+	}
+
+	bool try_wait()
+	{
+		int rc;
+		do {
+			rc = sem_trywait(&m_sema);
+		} while (rc == -1 && errno == EINTR);
+		return rc == 0;
+	}
+
+	bool timed_wait(std::uint64_t usecs)
+	{
+		struct timespec ts;
+		const int usecs_in_1_sec = 1000000;
+		const int nsecs_in_1_sec = 1000000000;
+#ifdef MOODYCAMEL_LIGHTWEIGHTSEMAPHORE_MONOTONIC
+		clock_gettime(CLOCK_MONOTONIC, &ts);
+#else
+		clock_gettime(CLOCK_REALTIME, &ts);
+#endif
+		ts.tv_sec += (time_t)(usecs / usecs_in_1_sec);
+		ts.tv_nsec += (long)(usecs % usecs_in_1_sec) * 1000;
+		// sem_timedwait bombs if you have more than 1e9 in tv_nsec
+		// so we have to clean things up before passing it in
+		if (ts.tv_nsec >= nsecs_in_1_sec) {
+			ts.tv_nsec -= nsecs_in_1_sec;
+			++ts.tv_sec;
+		}
+
+		int rc;
+		do {
+#ifdef MOODYCAMEL_LIGHTWEIGHTSEMAPHORE_MONOTONIC
+			rc = sem_clockwait(&m_sema, CLOCK_MONOTONIC, &ts);
+#else
+			rc = sem_timedwait(&m_sema, &ts);
+#endif
+		} while (rc == -1 && errno == EINTR);
+		return rc == 0;
+	}
+
+	void signal()
+	{
+		while (sem_post(&m_sema) == -1);
+	}
+
+	void signal(int count)
+	{
+		while (count-- > 0)
+		{
+			while (sem_post(&m_sema) == -1);
+		}
+	}
+};
+#else
+#error Unsupported platform! (No semaphore wrapper available)
+#endif
+
+}	// end namespace details
+
+
+//---------------------------------------------------------
+// LightweightSemaphore
+//---------------------------------------------------------
+class LightweightSemaphore
+{
+public:
+	typedef std::make_signed<std::size_t>::type ssize_t;
+
+private:
+	std::atomic<ssize_t> m_count;
+	details::Semaphore m_sema;
+	int m_maxSpins;
+
+	bool waitWithPartialSpinning(std::int64_t timeout_usecs = -1)
+	{
+		ssize_t oldCount;
+		int spin = m_maxSpins;
+		while (--spin >= 0)
+		{
+			oldCount = m_count.load(std::memory_order_relaxed);
+			if ((oldCount > 0) && m_count.compare_exchange_strong(oldCount, oldCount - 1, std::memory_order_acquire, std::memory_order_relaxed))
+				return true;
+			std::atomic_signal_fence(std::memory_order_acquire);	 // Prevent the compiler from collapsing the loop.
+		}
+		oldCount = m_count.fetch_sub(1, std::memory_order_acquire);
+		if (oldCount > 0)
+			return true;
+		if (timeout_usecs < 0)
+		{
+			if (m_sema.wait())
+				return true;
+		}
+		if (timeout_usecs > 0 && m_sema.timed_wait((std::uint64_t)timeout_usecs))
+			return true;
+		// At this point, we've timed out waiting for the semaphore, but the
+		// count is still decremented indicating we may still be waiting on
+		// it. So we have to re-adjust the count, but only if the semaphore
+		// wasn't signaled enough times for us too since then. If it was, we
+		// need to release the semaphore too.
+		while (true)
+		{
+			oldCount = m_count.load(std::memory_order_acquire);
+			if (oldCount >= 0 && m_sema.try_wait())
+				return true;
+			if (oldCount < 0 && m_count.compare_exchange_strong(oldCount, oldCount + 1, std::memory_order_relaxed, std::memory_order_relaxed))
+				return false;
+		}
+	}
+
+	ssize_t waitManyWithPartialSpinning(ssize_t max, std::int64_t timeout_usecs = -1)
+	{
+		assert(max > 0);
+		ssize_t oldCount;
+		int spin = m_maxSpins;
+		while (--spin >= 0)
+		{
+			oldCount = m_count.load(std::memory_order_relaxed);
+			if (oldCount > 0)
+			{
+				ssize_t newCount = oldCount > max ? oldCount - max : 0;
+				if (m_count.compare_exchange_strong(oldCount, newCount, std::memory_order_acquire, std::memory_order_relaxed))
+					return oldCount - newCount;
+			}
+			std::atomic_signal_fence(std::memory_order_acquire);
+		}
+		oldCount = m_count.fetch_sub(1, std::memory_order_acquire);
+		if (oldCount <= 0)
+		{
+			if ((timeout_usecs == 0) || (timeout_usecs < 0 && !m_sema.wait()) || (timeout_usecs > 0 && !m_sema.timed_wait((std::uint64_t)timeout_usecs)))
+			{
+				while (true)
+				{
+					oldCount = m_count.load(std::memory_order_acquire);
+					if (oldCount >= 0 && m_sema.try_wait())
+						break;
+					if (oldCount < 0 && m_count.compare_exchange_strong(oldCount, oldCount + 1, std::memory_order_relaxed, std::memory_order_relaxed))
+						return 0;
+				}
+			}
+		}
+		if (max > 1)
+			return 1 + tryWaitMany(max - 1);
+		return 1;
+	}
+
+public:
+	LightweightSemaphore(ssize_t initialCount = 0, int maxSpins = 10000) : m_count(initialCount), m_maxSpins(maxSpins)
+	{
+		assert(initialCount >= 0);
+		assert(maxSpins >= 0);
+	}
+
+	bool tryWait()
+	{
+		ssize_t oldCount = m_count.load(std::memory_order_relaxed);
+		while (oldCount > 0)
+		{
+			if (m_count.compare_exchange_weak(oldCount, oldCount - 1, std::memory_order_acquire, std::memory_order_relaxed))
+				return true;
+		}
+		return false;
+	}
+
+	bool wait()
+	{
+		return tryWait() || waitWithPartialSpinning();
+	}
+
+	bool wait(std::int64_t timeout_usecs)
+	{
+		return tryWait() || waitWithPartialSpinning(timeout_usecs);
+	}
+
+	// Acquires between 0 and (greedily) max, inclusive
+	ssize_t tryWaitMany(ssize_t max)
+	{
+		assert(max >= 0);
+		ssize_t oldCount = m_count.load(std::memory_order_relaxed);
+		while (oldCount > 0)
+		{
+			ssize_t newCount = oldCount > max ? oldCount - max : 0;
+			if (m_count.compare_exchange_weak(oldCount, newCount, std::memory_order_acquire, std::memory_order_relaxed))
+				return oldCount - newCount;
+		}
+		return 0;
+	}
+
+	// Acquires at least one, and (greedily) at most max
+	ssize_t waitMany(ssize_t max, std::int64_t timeout_usecs)
+	{
+		assert(max >= 0);
+		ssize_t result = tryWaitMany(max);
+		if (result == 0 && max > 0)
+			result = waitManyWithPartialSpinning(max, timeout_usecs);
+		return result;
+	}
+	
+	ssize_t waitMany(ssize_t max)
+	{
+		ssize_t result = waitMany(max, -1);
+		assert(result > 0);
+		return result;
+	}
+
+	void signal(ssize_t count = 1)
+	{
+		assert(count >= 0);
+		ssize_t oldCount = m_count.fetch_add(count, std::memory_order_release);
+		ssize_t toRelease = -oldCount < count ? -oldCount : count;
+		if (toRelease > 0)
+		{
+			m_sema.signal((int)toRelease);
+		}
+	}
+	
+	std::size_t availableApprox() const
+	{
+		ssize_t count = m_count.load(std::memory_order_relaxed);
+		return count > 0 ? static_cast<std::size_t>(count) : 0;
+	}
+};
+
+}   // end namespace moodycamel
diff --git a/archive/2025/summer/bsc_karidas/external/concurrentqueue/samples.md b/archive/2025/summer/bsc_karidas/external/concurrentqueue/samples.md
new file mode 100644
index 000000000..52cf9b7e5
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/external/concurrentqueue/samples.md
@@ -0,0 +1,375 @@
+# Samples for moodycamel::ConcurrentQueue
+
+Here are some example usage scenarios with sample code. Note that most
+use the simplest version of each available method for demonstration purposes,
+but they can all be adapted to use tokens and/or the corresponding bulk methods for
+extra speed.
+
+
+## Hello queue
+```C++
+ConcurrentQueue<int> q;
+
+for (int i = 0; i != 123; ++i)
+	q.enqueue(i);
+
+int item;
+for (int i = 0; i != 123; ++i) {
+	q.try_dequeue(item);
+	assert(item == i);
+}
+```
+
+## Hello concurrency
+
+Basic example of how to use the queue from multiple threads, with no
+particular goal (i.e. it does nothing, but in an instructive way).
+```C++
+ConcurrentQueue<int> q;
+int dequeued[100] = { 0 };
+std::thread threads[20];
+
+// Producers
+for (int i = 0; i != 10; ++i) {
+	threads[i] = std::thread([&](int i) {
+		for (int j = 0; j != 10; ++j) {
+			q.enqueue(i * 10 + j);
+		}
+	}, i);
+}
+
+// Consumers
+for (int i = 10; i != 20; ++i) {
+	threads[i] = std::thread([&]() {
+		int item;
+		for (int j = 0; j != 20; ++j) {
+			if (q.try_dequeue(item)) {
+				++dequeued[item];
+			}
+		}
+	});
+}
+
+// Wait for all threads
+for (int i = 0; i != 20; ++i) {
+	threads[i].join();
+}
+
+// Collect any leftovers (could be some if e.g. consumers finish before producers)
+int item;
+while (q.try_dequeue(item)) {
+	++dequeued[item];
+}
+
+// Make sure everything went in and came back out!
+for (int i = 0; i != 100; ++i) {
+	assert(dequeued[i] == 1);
+}
+```
+
+## Bulk up
+
+Same as previous example, but runs faster.
+```C++
+ConcurrentQueue<int> q;
+int dequeued[100] = { 0 };
+std::thread threads[20];
+
+// Producers
+for (int i = 0; i != 10; ++i) {
+	threads[i] = std::thread([&](int i) {
+		int items[10];
+		for (int j = 0; j != 10; ++j) {
+			items[j] = i * 10 + j;
+		}
+		q.enqueue_bulk(items, 10);
+	}, i);
+}
+
+// Consumers
+for (int i = 10; i != 20; ++i) {
+	threads[i] = std::thread([&]() {
+		int items[20];
+		for (std::size_t count = q.try_dequeue_bulk(items, 20); count != 0; --count) {
+			++dequeued[items[count - 1]];
+		}
+	});
+}
+
+// Wait for all threads
+for (int i = 0; i != 20; ++i) {
+	threads[i].join();
+}
+
+// Collect any leftovers (could be some if e.g. consumers finish before producers)
+int items[10];
+std::size_t count;
+while ((count = q.try_dequeue_bulk(items, 10)) != 0) {
+	for (std::size_t i = 0; i != count; ++i) {
+		++dequeued[items[i]];
+	}
+}
+
+// Make sure everything went in and came back out!
+for (int i = 0; i != 100; ++i) {
+	assert(dequeued[i] == 1);
+}
+```
+
+## Producer/consumer model (simultaneous)
+
+In this model, one set of threads is producing items,
+and the other is consuming them concurrently until all of
+them have been consumed. The counters are required to
+ensure that all items eventually get consumed.
+```C++
+ConcurrentQueue<Item> q;
+const int ProducerCount = 8;
+const int ConsumerCount = 8;
+std::thread producers[ProducerCount];
+std::thread consumers[ConsumerCount];
+std::atomic<int> doneProducers(0);
+std::atomic<int> doneConsumers(0);
+for (int i = 0; i != ProducerCount; ++i) {
+	producers[i] = std::thread([&]() {
+		while (produce) {
+			q.enqueue(produceItem());
+		}
+		doneProducers.fetch_add(1, std::memory_order_release);
+	});
+}
+for (int i = 0; i != ConsumerCount; ++i) {
+	consumers[i] = std::thread([&]() {
+		Item item;
+		bool itemsLeft;
+		do {
+			// It's important to fence (if the producers have finished) *before* dequeueing
+			itemsLeft = doneProducers.load(std::memory_order_acquire) != ProducerCount;
+			while (q.try_dequeue(item)) {
+				itemsLeft = true;
+				consumeItem(item);
+			}
+		} while (itemsLeft || doneConsumers.fetch_add(1, std::memory_order_acq_rel) + 1 == ConsumerCount);
+		// The condition above is a bit tricky, but it's necessary to ensure that the
+		// last consumer sees the memory effects of all the other consumers before it
+		// calls try_dequeue for the last time
+	});
+}
+for (int i = 0; i != ProducerCount; ++i) {
+	producers[i].join();
+}
+for (int i = 0; i != ConsumerCount; ++i) {
+	consumers[i].join();
+}
+```
+## Producer/consumer model (simultaneous, blocking)
+
+The blocking version is different, since either the number of elements being produced needs
+to be known ahead of time, or some other coordination is required to tell the consumers when
+to stop calling wait_dequeue (not shown here). This is necessary because otherwise a consumer
+could end up blocking forever -- and destroying a queue while a consumer is blocking on it leads
+to undefined behaviour.
+```C++
+BlockingConcurrentQueue<Item> q;
+const int ProducerCount = 8;
+const int ConsumerCount = 8;
+std::thread producers[ProducerCount];
+std::thread consumers[ConsumerCount];
+std::atomic<int> promisedElementsRemaining(ProducerCount * 1000);
+for (int i = 0; i != ProducerCount; ++i) {
+	producers[i] = std::thread([&]() {
+		for (int j = 0; j != 1000; ++j) {
+			q.enqueue(produceItem());
+		}
+	});
+}
+for (int i = 0; i != ConsumerCount; ++i) {
+	consumers[i] = std::thread([&]() {
+		Item item;
+		while (promisedElementsRemaining.fetch_sub(1, std::memory_order_relaxed) > 0) {
+			q.wait_dequeue(item);
+			consumeItem(item);
+		}
+	});
+}
+for (int i = 0; i != ProducerCount; ++i) {
+	producers[i].join();
+}
+for (int i = 0; i != ConsumerCount; ++i) {
+	consumers[i].join();
+}
+```
+
+## Producer/consumer model (separate stages)
+```C++
+ConcurrentQueue<Item> q;
+
+// Production stage
+std::thread threads[8];
+for (int i = 0; i != 8; ++i) {
+	threads[i] = std::thread([&]() {
+		while (produce) {
+			q.enqueue(produceItem());
+		}
+	});
+}
+for (int i = 0; i != 8; ++i) {
+	threads[i].join();
+}
+
+// Consumption stage
+std::atomic<int> doneConsumers(0);
+for (int i = 0; i != 8; ++i) {
+	threads[i] = std::thread([&]() {
+		Item item;
+		do {
+			while (q.try_dequeue(item)) {
+				consumeItem(item);
+			}
+			// Loop again one last time if we're the last producer (with the acquired
+			// memory effects of the other producers):
+		} while (doneConsumers.fetch_add(1, std::memory_order_acq_rel) + 1 == 8);
+	});
+}
+for (int i = 0; i != 8; ++i) {
+	threads[i].join();
+}
+```
+Note that there's no point trying to use the blocking queue with this model, since
+there's no need to use the `wait` methods (all the elements are produced before any
+are consumed), and hence the complexity would be the same but with additional overhead.
+
+
+## Object pool
+
+If you don't know what threads will be using the queue in advance,
+you can't really declare any long-term tokens. The obvious solution
+is to use the implicit methods (that don't take any tokens):
+```C++
+// A pool of 'Something' objects that can be safely accessed
+// from any thread
+class SomethingPool
+{
+public:
+    Something getSomething()
+    {
+	Something obj;
+	queue.try_dequeue(obj);
+
+	// If the dequeue succeeded, obj will be an object from the
+	// thread pool, otherwise it will be the default-constructed
+	// object as declared above
+	return obj;
+    }
+
+    void recycleSomething(Something&& obj)
+    {
+	queue.enqueue(std::move(obj));
+    }
+};
+```
+
+## Threadpool task queue
+```C++
+BlockingConcurrentQueue<Task> q;
+
+// To create a task from any thread:
+q.enqueue(...);
+
+// On threadpool threads:
+Task task;
+while (true) {
+	q.wait_dequeue(task);
+
+	// Process task...
+}
+```
+
+## Multithreaded game loop
+```C++
+BlockingConcurrentQueue<Task> q;
+std::atomic<int> pendingTasks(0);
+
+// On threadpool threads:
+Task task;
+while (true) {
+	q.wait_dequeue(task);
+
+	// Process task...
+
+	pendingTasks.fetch_add(-1, std::memory_order_release);
+}
+
+// Whenever a new task needs to be processed for the frame:
+pendingTasks.fetch_add(1, std::memory_order_release);
+q.enqueue(...);
+
+// To wait for all the frame's tasks to complete before rendering:
+while (pendingTasks.load(std::memory_order_acquire) != 0)
+	continue;
+
+// Alternatively you could help out the thread pool while waiting:
+while (pendingTasks.load(std::memory_order_acquire) != 0) {
+	if (!q.try_dequeue(task)) {
+		continue;
+	}
+
+	// Process task...
+
+	pendingTasks.fetch_add(-1, std::memory_order_release);
+}
+```
+
+## Pump until empty
+
+This might be useful if, for example, you want to process any remaining items
+in the queue before it's destroyed. Note that it is your responsibility
+to ensure that the memory effects of any enqueue operations you wish to see on
+the dequeue thread are visible (i.e. if you're waiting for a certain set of elements,
+you need to use memory fences to ensure that those elements are visible to the dequeue
+thread after they've been enqueued).
+```C++
+ConcurrentQueue<Item> q;
+
+// Single-threaded pumping:
+Item item;
+while (q.try_dequeue(item)) {
+	// Process item...
+}
+// q is guaranteed to be empty here, unless there is another thread enqueueing still or
+// there was another thread dequeueing at one point and its memory effects have not
+// yet been propagated to this thread.
+
+// Multi-threaded pumping:
+std::thread threads[8];
+std::atomic<int> doneConsumers(0);
+for (int i = 0; i != 8; ++i) {
+	threads[i] = std::thread([&]() {
+		Item item;
+		do {
+			while (q.try_dequeue(item)) {
+				// Process item...
+			}
+		} while (doneConsumers.fetch_add(1, std::memory_order_acq_rel) + 1 == 8);
+		// If there are still enqueue operations happening on other threads,
+		// then the queue may not be empty at this point. However, if all enqueue
+		// operations completed before we finished pumping (and the propagation of
+		// their memory effects too), and all dequeue operations apart from those
+		// our threads did above completed before we finished pumping (and the
+		// propagation of their memory effects too), then the queue is guaranteed
+		// to be empty at this point.
+	});
+}
+for (int i = 0; i != 8; ++i) {
+	threads[i].join();
+}
+```
+
+## Wait for a queue to become empty (without dequeueing)
+
+You can't (robustly) :-) However, you can set up your own atomic counter and
+poll that instead (see the game loop example). If you're satisfied with merely an estimate, you can use
+`size_approx()`. Note that `size_approx()` may return 0 even if the queue is
+not completely empty, unless the queue has already stabilized first (no threads
+are enqueueing or dequeueing, and all memory effects of any previous operations
+have been propagated to the thread before it calls `size_approx()`).
diff --git a/archive/2025/summer/bsc_karidas/include/BufferQueue.hpp b/archive/2025/summer/bsc_karidas/include/BufferQueue.hpp
new file mode 100644
index 000000000..3a5ab3c08
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/include/BufferQueue.hpp
@@ -0,0 +1,49 @@
+#ifndef BUFFER_QUEUE_HPP
+#define BUFFER_QUEUE_HPP
+
+#include "QueueItem.hpp"
+#include "concurrentqueue.h"
+#include <atomic>
+#include <vector>
+#include <memory>
+#include <condition_variable>
+#include <chrono>
+
+class BufferQueue
+{
+public:
+    using ProducerToken = moodycamel::ProducerToken;
+    using ConsumerToken = moodycamel::ConsumerToken;
+
+private:
+    moodycamel::ConcurrentQueue<QueueItem> m_queue;
+
+public:
+    explicit BufferQueue(size_t capacity, size_t maxExplicitProducers);
+
+    ProducerToken createProducerToken() { return ProducerToken(m_queue); }
+    ConsumerToken createConsumerToken() { return ConsumerToken(m_queue); }
+
+    bool enqueueBlocking(QueueItem item,
+                         ProducerToken &token,
+                         std::chrono::milliseconds timeout = std::chrono::milliseconds::max());
+    bool enqueueBatchBlocking(std::vector<QueueItem> items,
+                              ProducerToken &token,
+                              std::chrono::milliseconds timeout = std::chrono::milliseconds::max());
+    bool tryDequeue(QueueItem &item, ConsumerToken &token);
+    size_t tryDequeueBatch(std::vector<QueueItem> &items, size_t maxItems, ConsumerToken &token);
+    bool flush();
+    size_t size() const;
+
+    // delete copy/move
+    BufferQueue(const BufferQueue &) = delete;
+    BufferQueue &operator=(const BufferQueue &) = delete;
+    BufferQueue(BufferQueue &&) = delete;
+    BufferQueue &operator=(BufferQueue &&) = delete;
+
+private:
+    bool enqueue(QueueItem item, ProducerToken &token);
+    bool enqueueBatch(std::vector<QueueItem> items, ProducerToken &token);
+};
+
+#endif
\ No newline at end of file
diff --git a/archive/2025/summer/bsc_karidas/include/Compression.hpp b/archive/2025/summer/bsc_karidas/include/Compression.hpp
new file mode 100644
index 000000000..43bc34da9
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/include/Compression.hpp
@@ -0,0 +1,17 @@
+#ifndef COMPRESSION_HPP
+#define COMPRESSION_HPP
+
+#include "LogEntry.hpp"
+#include <vector>
+#include <cstdint>
+#include <zlib.h>
+
+class Compression
+{
+public:
+    static std::vector<uint8_t> compress(std::vector<uint8_t> &&data, int level = Z_DEFAULT_COMPRESSION);
+
+    static std::vector<uint8_t> decompress(std::vector<uint8_t> &&compressedData);
+};
+
+#endif
\ No newline at end of file
diff --git a/archive/2025/summer/bsc_karidas/include/Config.hpp b/archive/2025/summer/bsc_karidas/include/Config.hpp
new file mode 100644
index 000000000..265f66a78
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/include/Config.hpp
@@ -0,0 +1,28 @@
+#ifndef CONFIG_HPP
+#define CONFIG_HPP
+
+#include <string>
+#include <chrono>
+
+struct LoggingConfig
+{
+    // api
+    std::chrono::milliseconds appendTimeout = std::chrono::milliseconds(30000);
+    // queue
+    size_t queueCapacity = 8192;
+    size_t maxExplicitProducers = 16; // maximum number of producers creating a producer token
+    // writers
+    size_t batchSize = 100;
+    size_t numWriterThreads = 2;
+    bool useEncryption = true;
+    int compressionLevel = 9; // 0 = no compression, 1-9 = compression levels
+    // segmented storage
+    std::string basePath = "./logs";
+    std::string baseFilename = "default";
+    size_t maxSegmentSize = 100 * 1024 * 1024; // 100 MB
+    size_t maxAttempts = 10;
+    std::chrono::milliseconds baseRetryDelay = std::chrono::milliseconds(1);
+    size_t maxOpenFiles = 512;
+};
+
+#endif
\ No newline at end of file
diff --git a/archive/2025/summer/bsc_karidas/include/Crypto.hpp b/archive/2025/summer/bsc_karidas/include/Crypto.hpp
new file mode 100644
index 000000000..53e5fa12e
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/include/Crypto.hpp
@@ -0,0 +1,33 @@
+#ifndef CRYPTO_HPP
+#define CRYPTO_HPP
+
+#include <vector>
+#include <string>
+#include <cstdint>
+#include <memory>
+#include <openssl/evp.h>
+
+class Crypto
+{
+private:
+    EVP_CIPHER_CTX *m_encryptCtx;
+    EVP_CIPHER_CTX *m_decryptCtx;
+
+public:
+    Crypto();
+    ~Crypto();
+
+    static constexpr size_t KEY_SIZE = 32;     // 256 bits
+    static constexpr size_t GCM_IV_SIZE = 12;  // 96 bits (recommended for GCM)
+    static constexpr size_t GCM_TAG_SIZE = 16; // 128 bits
+
+    std::vector<uint8_t> encrypt(std::vector<uint8_t> &&plaintext,
+                                 const std::vector<uint8_t> &key,
+                                 const std::vector<uint8_t> &iv);
+
+    std::vector<uint8_t> decrypt(const std::vector<uint8_t> &encryptedData,
+                                 const std::vector<uint8_t> &key,
+                                 const std::vector<uint8_t> &iv);
+};
+
+#endif
\ No newline at end of file
diff --git a/archive/2025/summer/bsc_karidas/include/LogEntry.hpp b/archive/2025/summer/bsc_karidas/include/LogEntry.hpp
new file mode 100644
index 000000000..4af355741
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/include/LogEntry.hpp
@@ -0,0 +1,61 @@
+#ifndef LOG_ENTRY_HPP
+#define LOG_ENTRY_HPP
+
+#include <string>
+#include <chrono>
+#include <vector>
+#include <memory>
+#include <cstdint>
+
+class LogEntry
+{
+public:
+    enum class ActionType
+    {
+        CREATE,
+        READ,
+        UPDATE,
+        DELETE,
+    };
+
+    LogEntry();
+
+    LogEntry(ActionType actionType,
+             std::string dataLocation,
+             std::string dataControllerId,
+             std::string dataProcessorId,
+             std::string dataSubjectId,
+             std::vector<uint8_t> payload = std::vector<uint8_t>());
+
+    std::vector<uint8_t> serialize() &&;
+    std::vector<uint8_t> serialize() const &;
+    bool deserialize(std::vector<uint8_t> &&data);
+
+    static std::vector<uint8_t> serializeBatch(std::vector<LogEntry> &&entries);
+    static std::vector<LogEntry> deserializeBatch(std::vector<uint8_t> &&batchData);
+
+    ActionType getActionType() const { return m_actionType; }
+    std::string getDataLocation() const { return m_dataLocation; }
+    std::string getDataControllerId() const { return m_dataControllerId; }
+    std::string getDataProcessorId() const { return m_dataProcessorId; }
+    std::string getDataSubjectId() const { return m_dataSubjectId; }
+    std::chrono::system_clock::time_point getTimestamp() const { return m_timestamp; }
+    const std::vector<uint8_t> &getPayload() const { return m_payload; }
+
+private:
+    // Helper methods for binary serialization
+    void appendToVector(std::vector<uint8_t> &vec, const void *data, size_t size) const;
+    void appendStringToVector(std::vector<uint8_t> &vec, const std::string &str) const;
+    void appendStringToVector(std::vector<uint8_t> &vec, std::string &&str);
+    bool extractStringFromVector(std::vector<uint8_t> &vec, size_t &offset, std::string &str);
+
+    ActionType m_actionType;                           // Type of GDPR operation
+    std::string m_dataLocation;                        // Location of the data being operated on
+    std::string m_dataControllerId;                    // ID of the entity controlling the data
+    std::string m_dataProcessorId;                     // ID of the entity performing the operation
+    std::string m_dataSubjectId;                       // ID of the data subject
+    std::chrono::system_clock::time_point m_timestamp; // When the operation occurred
+    std::vector<uint8_t> m_payload;                    // optional extra bytes
+};
+
+#endif
\ No newline at end of file
diff --git a/archive/2025/summer/bsc_karidas/include/Logger.hpp b/archive/2025/summer/bsc_karidas/include/Logger.hpp
new file mode 100644
index 000000000..d4119d364
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/include/Logger.hpp
@@ -0,0 +1,59 @@
+#ifndef LOGGER_HPP
+#define LOGGER_HPP
+
+#include "LogEntry.hpp"
+#include "BufferQueue.hpp"
+#include "QueueItem.hpp"
+#include <string>
+#include <chrono>
+#include <memory>
+#include <vector>
+#include <shared_mutex>
+#include <functional>
+#include <optional>
+
+class Logger
+{
+    friend class LoggerTest;
+
+public:
+    static Logger &getInstance();
+
+    bool initialize(std::shared_ptr<BufferQueue> queue,
+                    std::chrono::milliseconds appendTimeout = std::chrono::milliseconds::max());
+
+    BufferQueue::ProducerToken createProducerToken();
+    bool append(LogEntry entry,
+                BufferQueue::ProducerToken &token,
+                const std::optional<std::string> &filename = std::nullopt);
+    bool appendBatch(std::vector<LogEntry> entries,
+                     BufferQueue::ProducerToken &token,
+                     const std::optional<std::string> &filename = std::nullopt);
+
+    bool exportLogs(const std::string &outputPath,
+                    std::chrono::system_clock::time_point fromTimestamp = std::chrono::system_clock::time_point(),
+                    std::chrono::system_clock::time_point toTimestamp = std::chrono::system_clock::time_point());
+
+    bool reset();
+
+    ~Logger();
+
+private:
+    Logger();
+    Logger(const Logger &) = delete;
+    Logger &operator=(const Logger &) = delete;
+    // Singleton instance
+    static std::unique_ptr<Logger> s_instance;
+    static std::mutex s_instanceMutex;
+
+    std::shared_ptr<BufferQueue> m_logQueue;
+    std::chrono::milliseconds m_appendTimeout;
+
+    // State tracking
+    bool m_initialized;
+
+    // Helper to report errors
+    void reportError(const std::string &message);
+};
+
+#endif
\ No newline at end of file
diff --git a/archive/2025/summer/bsc_karidas/include/LoggingManager.hpp b/archive/2025/summer/bsc_karidas/include/LoggingManager.hpp
new file mode 100644
index 000000000..782c04d23
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/include/LoggingManager.hpp
@@ -0,0 +1,53 @@
+#ifndef LOGGING_SYSTEM_HPP
+#define LOGGING_SYSTEM_HPP
+
+#include "Config.hpp"
+#include "Logger.hpp"
+#include "BufferQueue.hpp"
+#include "SegmentedStorage.hpp"
+#include "Writer.hpp"
+#include "LogEntry.hpp"
+#include <memory>
+#include <vector>
+#include <atomic>
+#include <mutex>
+#include <chrono>
+#include <string>
+#include <optional>
+
+class LoggingManager
+{
+public:
+    explicit LoggingManager(const LoggingConfig &config);
+    ~LoggingManager();
+
+    bool start();
+    bool stop();
+
+    BufferQueue::ProducerToken createProducerToken();
+    bool append(LogEntry entry,
+                BufferQueue::ProducerToken &token,
+                const std::optional<std::string> &filename = std::nullopt);
+    bool appendBatch(std::vector<LogEntry> entries,
+                     BufferQueue::ProducerToken &token,
+                     const std::optional<std::string> &filename = std::nullopt);
+
+    bool exportLogs(const std::string &outputPath,
+                    std::chrono::system_clock::time_point fromTimestamp = std::chrono::system_clock::time_point(),
+                    std::chrono::system_clock::time_point toTimestamp = std::chrono::system_clock::time_point());
+
+private:
+    std::shared_ptr<BufferQueue> m_queue;           // Thread-safe queue for queue items
+    std::shared_ptr<SegmentedStorage> m_storage;    // Manages append-only log segments
+    std::vector<std::unique_ptr<Writer>> m_writers; // Multiple writer threads
+    std::atomic<bool> m_running{false};             // System running state
+    std::atomic<bool> m_acceptingEntries{false};    // Controls whether new entries are accepted
+    std::mutex m_systemMutex;                       // For system-wide operations
+
+    size_t m_numWriterThreads; // Number of writer threads
+    size_t m_batchSize;        // Batch size for writers
+    bool m_useEncryption;
+    int m_compressionLevel;
+};
+
+#endif
\ No newline at end of file
diff --git a/archive/2025/summer/bsc_karidas/include/QueueItem.hpp b/archive/2025/summer/bsc_karidas/include/QueueItem.hpp
new file mode 100644
index 000000000..18c19de00
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/include/QueueItem.hpp
@@ -0,0 +1,25 @@
+#ifndef QUEUE_ITEM_HPP
+#define QUEUE_ITEM_HPP
+
+#include "LogEntry.hpp"
+#include <optional>
+#include <string>
+
+struct QueueItem
+{
+    LogEntry entry;
+    std::optional<std::string> targetFilename = std::nullopt;
+
+    QueueItem() = default;
+    QueueItem(LogEntry &&logEntry)
+        : entry(std::move(logEntry)), targetFilename(std::nullopt) {}
+    QueueItem(LogEntry &&logEntry, const std::optional<std::string> &filename)
+        : entry(std::move(logEntry)), targetFilename(filename) {}
+
+    QueueItem(const QueueItem &) = default;
+    QueueItem(QueueItem &&) = default;
+    QueueItem &operator=(const QueueItem &) = default;
+    QueueItem &operator=(QueueItem &&) = default;
+};
+
+#endif
\ No newline at end of file
diff --git a/archive/2025/summer/bsc_karidas/include/SegmentedStorage.hpp b/archive/2025/summer/bsc_karidas/include/SegmentedStorage.hpp
new file mode 100644
index 000000000..3984aea79
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/include/SegmentedStorage.hpp
@@ -0,0 +1,144 @@
+#ifndef SEGMENTED_STORAGE_HPP
+#define SEGMENTED_STORAGE_HPP
+
+#include <string>
+#include <vector>
+#include <atomic>
+#include <mutex>
+#include <shared_mutex>
+#include <filesystem>
+#include <cstdint>
+#include <unordered_map>
+#include <fcntl.h>  // for open flags
+#include <unistd.h> // for close, pwrite, fsync
+#include <chrono>
+#include <thread>
+#include <stdexcept>
+#include <list> // For LRU cache
+
+class SegmentedStorage
+{
+public:
+    SegmentedStorage(const std::string &basePath,
+                     const std::string &baseFilename,
+                     size_t maxSegmentSize = 100 * 1024 * 1024, // 100 MB default
+                     size_t maxAttempts = 5,
+                     std::chrono::milliseconds baseRetryDelay = std::chrono::milliseconds(1),
+                     size_t maxOpenFiles = 512);
+
+    ~SegmentedStorage();
+
+    size_t write(std::vector<uint8_t> &&data);
+    size_t writeToFile(const std::string &filename, std::vector<uint8_t> &&data);
+    void flush();
+
+private:
+    std::string m_basePath;
+    std::string m_baseFilename;
+    size_t m_maxSegmentSize;
+    size_t m_maxAttempts;
+    std::chrono::milliseconds m_baseRetryDelay;
+    size_t m_maxOpenFiles; // Max number of cache entries
+
+    struct CacheEntry
+    {
+        int fd{-1};
+        std::atomic<size_t> segmentIndex{0};
+        std::atomic<size_t> currentOffset{0};
+        std::string currentSegmentPath;
+        mutable std::shared_mutex fileMutex; // shared for writes, exclusive for rotate/flush
+    };
+
+    // Unified LRU Cache for both file descriptors and segment information
+    class LRUCache
+    {
+    public:
+        LRUCache(size_t capacity, SegmentedStorage *parent) : m_capacity(capacity), m_parent(parent) {}
+
+        std::shared_ptr<CacheEntry> get(const std::string &filename);
+        void flush(const std::string &filename);
+        void flushAll();
+        void closeAll();
+
+    private:
+        size_t m_capacity;
+        SegmentedStorage *m_parent;
+
+        // LRU list of filenames
+        std::list<std::string> m_lruList;
+        // Map from filename to cache entry and iterator in LRU list
+        struct CacheData
+        {
+            std::shared_ptr<CacheEntry> entry;
+            std::list<std::string>::iterator lruIt;
+        };
+        std::unordered_map<std::string, CacheData> m_cache;
+        mutable std::mutex m_mutex; // Protects m_lruList and m_cache
+
+        void evictLRU();
+        std::shared_ptr<CacheEntry> reconstructState(const std::string &filename);
+    };
+
+    LRUCache m_cache;
+
+    std::string rotateSegment(const std::string &filename, std::shared_ptr<CacheEntry> entry);
+    std::string generateSegmentPath(const std::string &filename, size_t segmentIndex) const;
+    size_t getFileSize(const std::string &path) const;
+    size_t findLatestSegmentIndex(const std::string &filename) const;
+
+    // Retry helpers use member-configured parameters
+    template <typename Func>
+    auto retryWithBackoff(Func &&f)
+    {
+        for (size_t attempt = 1;; ++attempt)
+        {
+            try
+            {
+                return f();
+            }
+            catch (const std::runtime_error &)
+            {
+                if (attempt >= m_maxAttempts)
+                    throw;
+                auto delay = m_baseRetryDelay * (1 << (attempt - 1));
+                std::this_thread::sleep_for(delay);
+            }
+        }
+    }
+
+    int openWithRetry(const char *path, int flags, mode_t mode)
+    {
+        return retryWithBackoff([&]()
+                                {
+            int fd = ::open(path, flags, mode);
+            if (fd < 0) throw std::runtime_error("open failed");
+            return fd; });
+    }
+
+    size_t pwriteFull(int fd, const uint8_t *buf, size_t count, off_t offset)
+    {
+        size_t total = 0;
+        while (total < count)
+        {
+            ssize_t written = ::pwrite(fd, buf + total, count - total, offset + total);
+            if (written < 0)
+            {
+                if (errno == EINTR)
+                    continue;
+                throw std::runtime_error("pwrite failed");
+            }
+            total += written;
+        }
+        return total;
+    }
+
+    void fsyncRetry(int fd)
+    {
+        retryWithBackoff([&]()
+                         {
+            if (::fsync(fd) < 0) throw std::runtime_error("fsync failed");
+            return 0; });
+    }
+};
+
+#endif
\ No newline at end of file
diff --git a/archive/2025/summer/bsc_karidas/include/Writer.hpp b/archive/2025/summer/bsc_karidas/include/Writer.hpp
new file mode 100644
index 000000000..3bc9e1672
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/include/Writer.hpp
@@ -0,0 +1,40 @@
+#ifndef WRITER_HPP
+#define WRITER_HPP
+
+#include <thread>
+#include <atomic>
+#include <memory>
+#include <vector>
+#include "QueueItem.hpp"
+#include "BufferQueue.hpp"
+#include "SegmentedStorage.hpp"
+
+class Writer
+{
+public:
+    explicit Writer(BufferQueue &queue,
+                    std::shared_ptr<SegmentedStorage> storage,
+                    size_t batchSize = 100,
+                    bool useEncryption = true,
+                    int m_compressionLevel = 9);
+
+    ~Writer();
+
+    void start();
+    void stop();
+    bool isRunning() const;
+
+private:
+    void processLogEntries();
+
+    BufferQueue &m_queue;
+    std::shared_ptr<SegmentedStorage> m_storage;
+    std::unique_ptr<std::thread> m_writerThread;
+    std::atomic<bool> m_running{false};
+    const size_t m_batchSize;
+    const bool m_useEncryption;
+    const int m_compressionLevel;
+
+    BufferQueue::ConsumerToken m_consumerToken;
+};
+#endif
\ No newline at end of file
diff --git a/archive/2025/summer/bsc_karidas/shell.nix b/archive/2025/summer/bsc_karidas/shell.nix
new file mode 100644
index 000000000..3d480aff4
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/shell.nix
@@ -0,0 +1,12 @@
+{ pkgs ? import <nixpkgs> {} }:
+
+pkgs.mkShell {
+  buildInputs = with pkgs; [
+    gcc
+    cmake
+    gnumake
+    openssl
+    gtest
+    zlib
+  ];
+}
diff --git a/archive/2025/summer/bsc_karidas/src/BufferQueue.cpp b/archive/2025/summer/bsc_karidas/src/BufferQueue.cpp
new file mode 100644
index 000000000..3d9af1c0c
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/src/BufferQueue.cpp
@@ -0,0 +1,131 @@
+#include "BufferQueue.hpp"
+#include <algorithm>
+#include <thread>
+#include <iostream>
+#include <chrono>
+#include <cmath>
+
+BufferQueue::BufferQueue(size_t capacity, size_t maxExplicitProducers)
+{
+    m_queue = moodycamel::ConcurrentQueue<QueueItem>(capacity, maxExplicitProducers, 0);
+}
+
+bool BufferQueue::enqueue(QueueItem item, ProducerToken &token)
+{
+    return m_queue.try_enqueue(token, std::move(item));
+}
+
+bool BufferQueue::enqueueBlocking(QueueItem item, ProducerToken &token, std::chrono::milliseconds timeout)
+{
+    auto start = std::chrono::steady_clock::now();
+    int backoffMs = 1;
+    const int maxBackoffMs = 100;
+
+    while (true)
+    {
+        QueueItem itemCopy = item;
+        if (enqueue(std::move(itemCopy), token))
+        {
+            return true;
+        }
+
+        auto elapsed = std::chrono::steady_clock::now() - start;
+        if (elapsed >= timeout)
+        {
+            return false;
+        }
+
+        int sleepTime = backoffMs;
+
+        // Make sure we don't sleep longer than our remaining timeout
+        if (timeout != std::chrono::milliseconds::max())
+        {
+            auto remainingTime = timeout - elapsed;
+            if (remainingTime <= std::chrono::milliseconds(sleepTime))
+            {
+                sleepTime = std::max(1, static_cast<int>(std::chrono::duration_cast<std::chrono::milliseconds>(remainingTime).count()));
+            }
+        }
+
+        std::this_thread::sleep_for(std::chrono::milliseconds(sleepTime));
+        backoffMs = std::min(backoffMs * 2, maxBackoffMs);
+    }
+}
+
+bool BufferQueue::enqueueBatch(std::vector<QueueItem> items, ProducerToken &token)
+{
+    return m_queue.try_enqueue_bulk(token, std::make_move_iterator(items.begin()), items.size());
+}
+
+bool BufferQueue::enqueueBatchBlocking(std::vector<QueueItem> items, ProducerToken &token,
+                                       std::chrono::milliseconds timeout)
+{
+    auto start = std::chrono::steady_clock::now();
+    int backoffMs = 1;
+    const int maxBackoffMs = 100;
+
+    while (true)
+    {
+        std::vector<QueueItem> itemsCopy = items;
+        if (enqueueBatch(std::move(itemsCopy), token))
+        {
+            return true;
+        }
+
+        auto elapsed = std::chrono::steady_clock::now() - start;
+        if (elapsed >= timeout)
+        {
+            return false;
+        }
+
+        int sleepTime = backoffMs;
+
+        // Make sure we don't sleep longer than our remaining timeout
+        if (timeout != std::chrono::milliseconds::max())
+        {
+            auto remainingTime = timeout - elapsed;
+            if (remainingTime <= std::chrono::milliseconds(sleepTime))
+            {
+                sleepTime = std::max(1, static_cast<int>(std::chrono::duration_cast<std::chrono::milliseconds>(remainingTime).count()));
+            }
+        }
+
+        std::this_thread::sleep_for(std::chrono::milliseconds(sleepTime));
+        backoffMs = std::min(backoffMs * 2, maxBackoffMs);
+    }
+}
+
+bool BufferQueue::tryDequeue(QueueItem &item, ConsumerToken &token)
+{
+    if (m_queue.try_dequeue(token, item))
+    {
+        return true;
+    }
+    return false;
+}
+
+size_t BufferQueue::tryDequeueBatch(std::vector<QueueItem> &items, size_t maxItems, ConsumerToken &token)
+{
+    items.clear();
+    items.resize(maxItems);
+
+    size_t dequeued = m_queue.try_dequeue_bulk(token, items.begin(), maxItems);
+    items.resize(dequeued);
+
+    return dequeued;
+}
+
+bool BufferQueue::flush()
+{
+    do
+    {
+        std::this_thread::sleep_for(std::chrono::milliseconds(500));
+    } while (m_queue.size_approx() != 0);
+
+    return true;
+}
+
+size_t BufferQueue::size() const
+{
+    return m_queue.size_approx();
+}
\ No newline at end of file
diff --git a/archive/2025/summer/bsc_karidas/src/Compression.cpp b/archive/2025/summer/bsc_karidas/src/Compression.cpp
new file mode 100644
index 000000000..cd25c9310
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/src/Compression.cpp
@@ -0,0 +1,109 @@
+#include "Compression.hpp"
+#include <stdexcept>
+#include <cstring>
+#include <iostream>
+
+// Helper function to compress raw data using zlib
+std::vector<uint8_t> Compression::compress(std::vector<uint8_t> &&data, int level)
+{
+    if (data.empty())
+    {
+        return std::vector<uint8_t>();
+    }
+
+    z_stream zs;
+    std::memset(&zs, 0, sizeof(zs));
+
+    // Use the provided compression level instead of hardcoded Z_BEST_COMPRESSION
+    if (deflateInit(&zs, level) != Z_OK)
+    {
+        throw std::runtime_error("Failed to initialize zlib deflate");
+    }
+
+    zs.next_in = const_cast<Bytef *>(data.data());
+    zs.avail_in = data.size();
+
+    int ret;
+    char outbuffer[32768];
+    std::vector<uint8_t> compressedData;
+
+    // Compress data in chunks
+    do
+    {
+        zs.next_out = reinterpret_cast<Bytef *>(outbuffer);
+        zs.avail_out = sizeof(outbuffer);
+
+        ret = deflate(&zs, Z_FINISH);
+
+        if (compressedData.size() < zs.total_out)
+        {
+            compressedData.insert(compressedData.end(),
+                                  outbuffer,
+                                  outbuffer + (zs.total_out - compressedData.size()));
+        }
+    } while (ret == Z_OK);
+
+    deflateEnd(&zs);
+
+    if (ret != Z_STREAM_END)
+    {
+        throw std::runtime_error("Exception during zlib compression");
+    }
+
+    return compressedData;
+}
+
+// Helper function to decompress raw data using zlib
+std::vector<uint8_t> Compression::decompress(std::vector<uint8_t> &&compressedData)
+{
+    if (compressedData.empty())
+    {
+        return std::vector<uint8_t>();
+    }
+
+    z_stream zs;
+    std::memset(&zs, 0, sizeof(zs));
+
+    if (inflateInit(&zs) != Z_OK)
+    {
+        throw std::runtime_error("Failed to initialize zlib inflate");
+    }
+
+    zs.next_in = const_cast<Bytef *>(compressedData.data());
+    zs.avail_in = compressedData.size();
+
+    int ret;
+    char outbuffer[32768];
+    std::vector<uint8_t> decompressedData;
+
+    // Decompress data in chunks
+    do
+    {
+        zs.next_out = reinterpret_cast<Bytef *>(outbuffer);
+        zs.avail_out = sizeof(outbuffer);
+
+        ret = inflate(&zs, Z_NO_FLUSH);
+
+        if (ret == Z_NEED_DICT || ret == Z_DATA_ERROR || ret == Z_MEM_ERROR)
+        {
+            inflateEnd(&zs);
+            throw std::runtime_error("Exception during zlib decompression");
+        }
+
+        if (decompressedData.size() < zs.total_out)
+        {
+            decompressedData.insert(decompressedData.end(),
+                                    outbuffer,
+                                    outbuffer + (zs.total_out - decompressedData.size()));
+        }
+    } while (ret == Z_OK);
+
+    inflateEnd(&zs);
+
+    if (ret != Z_STREAM_END)
+    {
+        throw std::runtime_error("Exception during zlib decompression");
+    }
+
+    return decompressedData;
+}
\ No newline at end of file
diff --git a/archive/2025/summer/bsc_karidas/src/Crypto.cpp b/archive/2025/summer/bsc_karidas/src/Crypto.cpp
new file mode 100644
index 000000000..0e7912721
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/src/Crypto.cpp
@@ -0,0 +1,211 @@
+#include "Crypto.hpp"
+#include <openssl/evp.h>
+#include <openssl/rand.h>
+#include <openssl/err.h>
+#include <stdexcept>
+#include <cstring>
+#include <iostream>
+
+Crypto::Crypto()
+{
+    // Initialize OpenSSL
+    OpenSSL_add_all_algorithms();
+    m_encryptCtx = EVP_CIPHER_CTX_new();
+    if (!m_encryptCtx)
+    {
+        throw std::runtime_error("Failed to create encryption context");
+    }
+
+    m_decryptCtx = EVP_CIPHER_CTX_new();
+    if (!m_decryptCtx)
+    {
+        EVP_CIPHER_CTX_free(m_encryptCtx);
+        throw std::runtime_error("Failed to create decryption context");
+    }
+}
+
+Crypto::~Crypto()
+{
+    // Free contexts
+    if (m_encryptCtx)
+    {
+        EVP_CIPHER_CTX_free(m_encryptCtx);
+    }
+
+    if (m_decryptCtx)
+    {
+        EVP_CIPHER_CTX_free(m_decryptCtx);
+    }
+
+    // Clean up OpenSSL
+    EVP_cleanup();
+}
+
+// Encrypt data using AES-256-GCM with provided IV
+std::vector<uint8_t> Crypto::encrypt(std::vector<uint8_t> &&plaintext,
+                                     const std::vector<uint8_t> &key,
+                                     const std::vector<uint8_t> &iv)
+{
+    if (plaintext.empty())
+        return {};
+    if (key.size() != KEY_SIZE)
+        throw std::runtime_error("Invalid key size");
+    if (iv.size() != GCM_IV_SIZE)
+        throw std::runtime_error("Invalid IV size");
+
+    // Reset the existing context instead of creating a new one
+    EVP_CIPHER_CTX_reset(m_encryptCtx);
+
+    // Initialize encryption operation
+    if (EVP_EncryptInit_ex(m_encryptCtx, EVP_aes_256_gcm(), nullptr, key.data(), iv.data()) != 1)
+    {
+        throw std::runtime_error("Failed to initialize encryption");
+    }
+
+    // Calculate the exact output size: size_field + ciphertext + tag
+    // For GCM mode, ciphertext size equals plaintext size (no padding)
+    const size_t sizeFieldSize = sizeof(uint32_t);
+    const size_t ciphertextSize = plaintext.size();
+    const size_t totalSize = sizeFieldSize + ciphertextSize + GCM_TAG_SIZE;
+
+    // Pre-allocate result buffer with exact final size
+    std::vector<uint8_t> result(totalSize);
+
+    // Reserve space for data size field
+    uint32_t dataSize = ciphertextSize;
+    std::memcpy(result.data(), &dataSize, sizeFieldSize);
+
+    // Perform encryption directly into the result buffer (after the size field)
+    int encryptedLen = 0;
+    if (EVP_EncryptUpdate(m_encryptCtx, result.data() + sizeFieldSize, &encryptedLen,
+                          plaintext.data(), plaintext.size()) != 1)
+    {
+        throw std::runtime_error("Failed during encryption update");
+    }
+
+    // Finalize encryption (writing to the buffer right after the existing encrypted data)
+    int finalLen = 0;
+    if (EVP_EncryptFinal_ex(m_encryptCtx, result.data() + sizeFieldSize + encryptedLen, &finalLen) != 1)
+    {
+        throw std::runtime_error("Failed to finalize encryption");
+    }
+
+    // Sanity check: for GCM, encryptedLen + finalLen should equal plaintext.size()
+    if (encryptedLen + finalLen != static_cast<int>(plaintext.size()))
+    {
+        throw std::runtime_error("Unexpected encryption output size");
+    }
+
+    // Get the authentication tag and write it directly to the result buffer
+    if (EVP_CIPHER_CTX_ctrl(m_encryptCtx, EVP_CTRL_GCM_GET_TAG, GCM_TAG_SIZE,
+                            result.data() + sizeFieldSize + ciphertextSize) != 1)
+    {
+        throw std::runtime_error("Failed to get authentication tag");
+    }
+
+    return result;
+}
+
+// Decrypt data using AES-256-GCM with provided IV
+std::vector<uint8_t> Crypto::decrypt(const std::vector<uint8_t> &encryptedData,
+                                     const std::vector<uint8_t> &key,
+                                     const std::vector<uint8_t> &iv)
+{
+    try
+    {
+        if (encryptedData.empty())
+        {
+            return std::vector<uint8_t>();
+        }
+
+        // Validate key size
+        if (key.size() != KEY_SIZE)
+        {
+            throw std::runtime_error("Invalid key size. Expected 32 bytes for AES-256");
+        }
+
+        // Validate IV size
+        if (iv.size() != GCM_IV_SIZE)
+        {
+            throw std::runtime_error("Invalid IV size. Expected 12 bytes for GCM");
+        }
+
+        // Ensure we have at least enough data for the data size field
+        if (encryptedData.size() < sizeof(uint32_t))
+        {
+            throw std::runtime_error("Encrypted data too small - missing data size");
+        }
+
+        // Extract the encrypted data size
+        uint32_t dataSize;
+        std::memcpy(&dataSize, encryptedData.data(), sizeof(dataSize));
+        size_t position = sizeof(dataSize);
+
+        // Validate data size
+        if (position + dataSize > encryptedData.size())
+        {
+            throw std::runtime_error("Encrypted data too small - missing complete data");
+        }
+
+        // Extract the encrypted data
+        std::vector<uint8_t> ciphertext(dataSize);
+        std::memcpy(ciphertext.data(), encryptedData.data() + position, dataSize);
+        position += dataSize;
+
+        // Extract the authentication tag
+        if (position + GCM_TAG_SIZE > encryptedData.size())
+        {
+            throw std::runtime_error("Encrypted data too small - missing authentication tag");
+        }
+
+        std::vector<uint8_t> tag(GCM_TAG_SIZE);
+        std::memcpy(tag.data(), encryptedData.data() + position, GCM_TAG_SIZE);
+
+        // Reset the existing context instead of creating a new one
+        EVP_CIPHER_CTX_reset(m_decryptCtx);
+
+        // Initialize decryption operation
+        if (EVP_DecryptInit_ex(m_decryptCtx, EVP_aes_256_gcm(), nullptr, key.data(), iv.data()) != 1)
+        {
+            throw std::runtime_error("Failed to initialize decryption");
+        }
+
+        // Set expected tag value
+        if (EVP_CIPHER_CTX_ctrl(m_decryptCtx, EVP_CTRL_GCM_SET_TAG, GCM_TAG_SIZE, tag.data()) != 1)
+        {
+            throw std::runtime_error("Failed to set authentication tag");
+        }
+
+        // Prepare output buffer for plaintext
+        std::vector<uint8_t> decryptedData(ciphertext.size());
+        int decryptedLen = 0;
+
+        // Perform decryption
+        if (EVP_DecryptUpdate(m_decryptCtx, decryptedData.data(), &decryptedLen,
+                              ciphertext.data(), ciphertext.size()) != 1)
+        {
+            throw std::runtime_error("Failed during decryption update");
+        }
+
+        // Finalize decryption and verify tag
+        int finalLen = 0;
+        int ret = EVP_DecryptFinal_ex(m_decryptCtx, decryptedData.data() + decryptedLen, &finalLen);
+
+        if (ret != 1)
+        {
+            throw std::runtime_error("Authentication failed: data may have been tampered with");
+        }
+
+        // Resize the decrypted data to the actual length
+        decryptedData.resize(decryptedLen + finalLen);
+
+        return decryptedData;
+    }
+    catch (const std::exception &e)
+    {
+        std::cerr << "Error decrypting data: " << e.what() << std::endl;
+        // Print OpenSSL error queue
+        ERR_print_errors_fp(stderr);
+        return std::vector<uint8_t>();
+    }
+}
\ No newline at end of file
diff --git a/archive/2025/summer/bsc_karidas/src/LogEntry.cpp b/archive/2025/summer/bsc_karidas/src/LogEntry.cpp
new file mode 100644
index 000000000..af487a29d
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/src/LogEntry.cpp
@@ -0,0 +1,375 @@
+#include "LogEntry.hpp"
+#include <cstring>
+#include <stdexcept>
+#include <iostream>
+
+LogEntry::LogEntry()
+    : m_actionType(ActionType::CREATE),
+      m_dataLocation(""),
+      m_dataControllerId(""),
+      m_dataProcessorId(""),
+      m_dataSubjectId(""),
+      m_timestamp(std::chrono::system_clock::now()),
+      m_payload() {}
+
+LogEntry::LogEntry(ActionType actionType,
+                   std::string dataLocation,
+                   std::string dataControllerId,
+                   std::string dataProcessorId,
+                   std::string dataSubjectId,
+                   std::vector<uint8_t> payload)
+    : m_actionType(actionType),
+      m_dataLocation(std::move(dataLocation)),
+      m_dataControllerId(std::move(dataControllerId)),
+      m_dataProcessorId(std::move(dataProcessorId)),
+      m_dataSubjectId(std::move(dataSubjectId)),
+      m_timestamp(std::chrono::system_clock::now()),
+      m_payload(std::move(payload))
+{
+}
+
+// Move version that consumes the LogEntry
+std::vector<uint8_t> LogEntry::serialize() &&
+{
+    // Calculate required size upfront
+    size_t totalSize =
+        sizeof(int) +                                  // ActionType
+        sizeof(uint32_t) + m_dataLocation.size() +     // Size + data location
+        sizeof(uint32_t) + m_dataControllerId.size() + // Size + data controller ID
+        sizeof(uint32_t) + m_dataProcessorId.size() +  // Size + data processor ID
+        sizeof(uint32_t) + m_dataSubjectId.size() +    // Size + data subject ID
+        sizeof(int64_t) +                              // Timestamp
+        sizeof(uint32_t) + m_payload.size();           // Size + payload data
+
+    // Pre-allocate the vector
+    std::vector<uint8_t> result;
+    result.reserve(totalSize);
+
+    // Push ActionType
+    int actionType = static_cast<int>(m_actionType);
+    appendToVector(result, &actionType, sizeof(actionType));
+
+    // Move strings
+    appendStringToVector(result, std::move(m_dataLocation));
+    appendStringToVector(result, std::move(m_dataControllerId));
+    appendStringToVector(result, std::move(m_dataProcessorId));
+    appendStringToVector(result, std::move(m_dataSubjectId));
+
+    // Push timestamp
+    int64_t timestamp = std::chrono::duration_cast<std::chrono::milliseconds>(
+                            m_timestamp.time_since_epoch())
+                            .count();
+    appendToVector(result, &timestamp, sizeof(timestamp));
+
+    // Move payload
+    uint32_t payloadSize = static_cast<uint32_t>(m_payload.size());
+    appendToVector(result, &payloadSize, sizeof(payloadSize));
+    if (!m_payload.empty())
+    {
+        result.insert(result.end(),
+                      std::make_move_iterator(m_payload.begin()),
+                      std::make_move_iterator(m_payload.end()));
+    }
+
+    return result;
+}
+
+// Const version for when you need to keep the LogEntry
+std::vector<uint8_t> LogEntry::serialize() const &
+{
+    // Calculate required size upfront
+    size_t totalSize =
+        sizeof(int) +                                  // ActionType
+        sizeof(uint32_t) + m_dataLocation.size() +     // Size + data location
+        sizeof(uint32_t) + m_dataControllerId.size() + // Size + data controller  ID
+        sizeof(uint32_t) + m_dataProcessorId.size() +  // Size + data processor  ID
+        sizeof(uint32_t) + m_dataSubjectId.size() +    // Size + data subject ID
+        sizeof(int64_t) +                              // Timestamp
+        sizeof(uint32_t) + m_payload.size();           // Size + payload data
+
+    // Pre-allocate the vector
+    std::vector<uint8_t> result;
+    result.reserve(totalSize);
+
+    // Push ActionType
+    int actionType = static_cast<int>(m_actionType);
+    appendToVector(result, &actionType, sizeof(actionType));
+
+    // Copy strings
+    appendStringToVector(result, m_dataLocation);
+    appendStringToVector(result, m_dataControllerId);
+    appendStringToVector(result, m_dataProcessorId);
+    appendStringToVector(result, m_dataSubjectId);
+
+    // Push timestamp
+    int64_t timestamp = std::chrono::duration_cast<std::chrono::milliseconds>(
+                            m_timestamp.time_since_epoch())
+                            .count();
+    appendToVector(result, &timestamp, sizeof(timestamp));
+
+    // Copy payload
+    uint32_t payloadSize = static_cast<uint32_t>(m_payload.size());
+    appendToVector(result, &payloadSize, sizeof(payloadSize));
+    if (!m_payload.empty())
+    {
+        appendToVector(result, m_payload.data(), m_payload.size());
+    }
+
+    return result;
+}
+
+bool LogEntry::deserialize(std::vector<uint8_t> &&data)
+{
+    try
+    {
+        size_t offset = 0;
+
+        // Check if we have enough data for the basic structure
+        if (data.size() < sizeof(int))
+            return false;
+
+        // Extract action type
+        int actionType;
+        std::memcpy(&actionType, data.data() + offset, sizeof(actionType));
+        offset += sizeof(actionType);
+        m_actionType = static_cast<ActionType>(actionType);
+
+        // Extract data location
+        if (!extractStringFromVector(data, offset, m_dataLocation))
+            return false;
+
+        // Extract data controller ID
+        if (!extractStringFromVector(data, offset, m_dataControllerId))
+            return false;
+
+        // Extract data processor ID
+        if (!extractStringFromVector(data, offset, m_dataProcessorId))
+            return false;
+
+        // Extract data subject ID
+        if (!extractStringFromVector(data, offset, m_dataSubjectId))
+            return false;
+
+        // Extract timestamp
+        if (offset + sizeof(int64_t) > data.size())
+            return false;
+
+        int64_t timestamp;
+        std::memcpy(&timestamp, data.data() + offset, sizeof(timestamp));
+        offset += sizeof(timestamp);
+        m_timestamp = std::chrono::system_clock::time_point(std::chrono::milliseconds(timestamp));
+
+        // Extract payload
+        if (offset + sizeof(uint32_t) > data.size())
+            return false;
+
+        uint32_t payloadSize;
+        std::memcpy(&payloadSize, data.data() + offset, sizeof(payloadSize));
+        offset += sizeof(payloadSize);
+
+        if (offset + payloadSize > data.size())
+            return false;
+
+        if (payloadSize > 0)
+        {
+            m_payload.clear();
+            m_payload.reserve(payloadSize);
+
+            auto start_it = data.begin() + offset;
+            auto end_it = start_it + payloadSize;
+            m_payload.assign(std::make_move_iterator(start_it),
+                             std::make_move_iterator(end_it));
+            offset += payloadSize;
+        }
+        else
+        {
+            m_payload.clear();
+        }
+
+        return true;
+    }
+    catch (const std::exception &)
+    {
+        return false;
+    }
+}
+
+std::vector<uint8_t> LogEntry::serializeBatch(std::vector<LogEntry> &&entries)
+{
+    if (entries.empty())
+    {
+        // Just return a vector with count = 0
+        std::vector<uint8_t> batchData(sizeof(uint32_t));
+        uint32_t numEntries = 0;
+        std::memcpy(batchData.data(), &numEntries, sizeof(numEntries));
+        return batchData;
+    }
+
+    // Pre-calculate approximate total size to minimize reallocations
+    size_t estimatedSize = sizeof(uint32_t); // Number of entries
+    for (const auto &entry : entries)
+    {
+        // Rough estimate: header size + string sizes + payload size
+        estimatedSize += sizeof(uint32_t) +     // Entry size field
+                         sizeof(int) +          // ActionType
+                         3 * sizeof(uint32_t) + // 3 string length fields
+                         entry.getDataLocation().size() +
+                         entry.getDataControllerId().size() +
+                         entry.getDataProcessorId().size() +
+                         entry.getDataSubjectId().size() +
+                         sizeof(int64_t) +  // Timestamp
+                         sizeof(uint32_t) + // Payload size
+                         entry.getPayload().size();
+    }
+
+    std::vector<uint8_t> batchData;
+    batchData.reserve(estimatedSize);
+
+    // Store the number of entries
+    uint32_t numEntries = static_cast<uint32_t>(entries.size());
+    batchData.resize(sizeof(numEntries));
+    std::memcpy(batchData.data(), &numEntries, sizeof(numEntries));
+
+    // Serialize and append each entry using move semantics
+    for (auto &entry : entries)
+    {
+        // Move-serialize the entry
+        std::vector<uint8_t> entryData = std::move(entry).serialize();
+
+        // Store the size of the serialized entry
+        uint32_t entrySize = static_cast<uint32_t>(entryData.size());
+        size_t currentSize = batchData.size();
+        batchData.resize(currentSize + sizeof(entrySize));
+        std::memcpy(batchData.data() + currentSize, &entrySize, sizeof(entrySize));
+
+        // Move the serialized entry data
+        batchData.insert(batchData.end(),
+                         std::make_move_iterator(entryData.begin()),
+                         std::make_move_iterator(entryData.end()));
+    }
+
+    return batchData;
+}
+
+std::vector<LogEntry> LogEntry::deserializeBatch(std::vector<uint8_t> &&batchData)
+{
+    std::vector<LogEntry> entries;
+
+    try
+    {
+        // Read the number of entries
+        if (batchData.size() < sizeof(uint32_t))
+        {
+            throw std::runtime_error("Batch data too small to contain entry count");
+        }
+
+        uint32_t numEntries;
+        std::memcpy(&numEntries, batchData.data(), sizeof(numEntries));
+
+        // Reserve space for entries to avoid reallocations
+        entries.reserve(numEntries);
+
+        // Position in the batch data
+        size_t position = sizeof(numEntries);
+
+        // Extract each entry
+        for (uint32_t i = 0; i < numEntries; ++i)
+        {
+            // Check if we have enough data left to read the entry size
+            if (position + sizeof(uint32_t) > batchData.size())
+            {
+                throw std::runtime_error("Unexpected end of batch data");
+            }
+
+            // Read the size of the entry
+            uint32_t entrySize;
+            std::memcpy(&entrySize, batchData.data() + position, sizeof(entrySize));
+            position += sizeof(entrySize);
+
+            // Check if we have enough data left to read the entry
+            if (position + entrySize > batchData.size())
+            {
+                throw std::runtime_error("Unexpected end of batch data");
+            }
+
+            // Create entry data by moving a slice from the batch data
+            std::vector<uint8_t> entryData;
+            entryData.reserve(entrySize);
+
+            auto start_it = batchData.begin() + position;
+            auto end_it = start_it + entrySize;
+            entryData.assign(std::make_move_iterator(start_it),
+                             std::make_move_iterator(end_it));
+            position += entrySize;
+
+            // Deserialize the entry using move semantics
+            LogEntry entry;
+            if (entry.deserialize(std::move(entryData)))
+            {
+                entries.emplace_back(std::move(entry));
+            }
+            else
+            {
+                throw std::runtime_error("Failed to deserialize log entry");
+            }
+        }
+    }
+    catch (const std::exception &e)
+    {
+        std::cerr << "Error deserializing log batch: " << e.what() << std::endl;
+    }
+
+    return entries;
+}
+
+// Helper method to append data to a vector
+void LogEntry::appendToVector(std::vector<uint8_t> &vec, const void *data, size_t size) const
+{
+    const uint8_t *bytes = static_cast<const uint8_t *>(data);
+    vec.insert(vec.end(), bytes, bytes + size);
+}
+
+// Helper method to append a string with its length (const version)
+void LogEntry::appendStringToVector(std::vector<uint8_t> &vec, const std::string &str) const
+{
+    uint32_t length = static_cast<uint32_t>(str.size());
+    appendToVector(vec, &length, sizeof(length));
+
+    if (length > 0)
+    {
+        appendToVector(vec, str.data(), str.size());
+    }
+}
+
+// Helper method to append a string with its length (move version)
+void LogEntry::appendStringToVector(std::vector<uint8_t> &vec, std::string &&str)
+{
+    uint32_t length = static_cast<uint32_t>(str.size());
+    appendToVector(vec, &length, sizeof(length));
+
+    if (length > 0)
+    {
+        vec.insert(vec.end(), str.begin(), str.end());
+    }
+}
+
+// Helper method to extract a string from a vector
+bool LogEntry::extractStringFromVector(std::vector<uint8_t> &vec, size_t &offset, std::string &str)
+{
+    // Check if we have enough data for the string length
+    if (offset + sizeof(uint32_t) > vec.size())
+        return false;
+
+    uint32_t length;
+    std::memcpy(&length, vec.data() + offset, sizeof(length));
+    offset += sizeof(length);
+
+    // Check if we have enough data for the string content
+    if (offset + length > vec.size())
+        return false;
+
+    str.assign(reinterpret_cast<const char *>(vec.data() + offset), length);
+    offset += length;
+
+    return true;
+}
\ No newline at end of file
diff --git a/archive/2025/summer/bsc_karidas/src/Logger.cpp b/archive/2025/summer/bsc_karidas/src/Logger.cpp
new file mode 100644
index 000000000..5db75f303
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/src/Logger.cpp
@@ -0,0 +1,139 @@
+#include "Logger.hpp"
+#include "QueueItem.hpp"
+#include <iostream>
+
+// Initialize static members
+std::unique_ptr<Logger> Logger::s_instance = nullptr;
+std::mutex Logger::s_instanceMutex;
+
+Logger &Logger::getInstance()
+{
+    std::lock_guard<std::mutex> lock(s_instanceMutex);
+    if (s_instance == nullptr)
+    {
+        s_instance.reset(new Logger());
+    }
+    return *s_instance;
+}
+
+Logger::Logger()
+    : m_logQueue(nullptr),
+      m_appendTimeout(std::chrono::milliseconds::max()),
+      m_initialized(false)
+{
+}
+
+Logger::~Logger()
+{
+    if (m_initialized)
+    {
+        reset();
+    }
+}
+
+bool Logger::initialize(std::shared_ptr<BufferQueue> queue,
+                        std::chrono::milliseconds appendTimeout)
+{
+    if (m_initialized)
+    {
+        reportError("Logger already initialized");
+        return false;
+    }
+
+    if (!queue)
+    {
+        reportError("Cannot initialize with a null queue");
+        return false;
+    }
+
+    m_logQueue = std::move(queue);
+    m_appendTimeout = appendTimeout;
+    m_initialized = true;
+
+    return true;
+}
+
+BufferQueue::ProducerToken Logger::createProducerToken()
+{
+    if (!m_initialized)
+    {
+        reportError("Logger not initialized");
+        throw std::runtime_error("Logger not initialized");
+    }
+
+    return m_logQueue->createProducerToken();
+}
+
+bool Logger::append(LogEntry entry,
+                    BufferQueue::ProducerToken &token,
+                    const std::optional<std::string> &filename)
+{
+    if (!m_initialized)
+    {
+        reportError("Logger not initialized");
+        return false;
+    }
+
+    QueueItem item{std::move(entry), filename};
+    return m_logQueue->enqueueBlocking(std::move(item), token, m_appendTimeout);
+}
+
+bool Logger::appendBatch(std::vector<LogEntry> entries,
+                         BufferQueue::ProducerToken &token,
+                         const std::optional<std::string> &filename)
+{
+    if (!m_initialized)
+    {
+        reportError("Logger not initialized");
+        return false;
+    }
+
+    if (entries.empty())
+    {
+        return true;
+    }
+
+    std::vector<QueueItem> batch;
+    batch.reserve(entries.size());
+    for (auto &entry : entries)
+    {
+        batch.emplace_back(std::move(entry), filename);
+    }
+    return m_logQueue->enqueueBatchBlocking(std::move(batch), token, m_appendTimeout);
+}
+
+bool Logger::reset()
+{
+    if (!m_initialized)
+    {
+        return false;
+    }
+
+    // Reset state
+    m_initialized = false;
+    m_logQueue.reset();
+
+    return true;
+}
+
+bool Logger::exportLogs(
+    const std::string &outputPath,
+    std::chrono::system_clock::time_point fromTimestamp,
+    std::chrono::system_clock::time_point toTimestamp)
+{
+    if (!m_initialized)
+    {
+        reportError("Logger not initialized");
+        return false;
+    }
+
+    // This functionality would typically be handled by a separate component,
+    // such as a log storage or retrieval system
+    reportError("Export logs functionality not implemented in Logger");
+    return false;
+}
+
+void Logger::reportError(const std::string &message)
+{
+    std::cerr << "Logger Error: " << message << std::endl;
+}
\ No newline at end of file
diff --git a/archive/2025/summer/bsc_karidas/src/LoggingManager.cpp b/archive/2025/summer/bsc_karidas/src/LoggingManager.cpp
new file mode 100644
index 000000000..a1d69bd5c
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/src/LoggingManager.cpp
@@ -0,0 +1,145 @@
+#include "LoggingManager.hpp"
+#include "Crypto.hpp"
+#include "Compression.hpp"
+#include <iostream>
+#include <filesystem>
+
+LoggingManager::LoggingManager(const LoggingConfig &config)
+    : m_numWriterThreads(config.numWriterThreads),
+      m_batchSize(config.batchSize),
+      m_useEncryption(config.useEncryption),
+      m_compressionLevel(config.compressionLevel)
+{
+    if (!std::filesystem::create_directories(config.basePath) &&
+        !std::filesystem::exists(config.basePath))
+    {
+        throw std::runtime_error("Failed to create log directory: " + config.basePath);
+    }
+
+    m_queue = std::make_shared<BufferQueue>(config.queueCapacity, config.maxExplicitProducers);
+    m_storage = std::make_shared<SegmentedStorage>(
+        config.basePath, config.baseFilename,
+        config.maxSegmentSize,
+        config.maxAttempts,
+        config.baseRetryDelay,
+        config.maxOpenFiles);
+
+    Logger::getInstance().initialize(m_queue, config.appendTimeout);
+
+    m_writers.reserve(m_numWriterThreads);
+}
+
+LoggingManager::~LoggingManager()
+{
+    stop();
+}
+
+bool LoggingManager::start()
+{
+    std::lock_guard<std::mutex> lock(m_systemMutex);
+
+    if (m_running.load(std::memory_order_acquire))
+    {
+        std::cerr << "LoggingSystem: Already running" << std::endl;
+        return false;
+    }
+
+    m_running.store(true, std::memory_order_release);
+    m_acceptingEntries.store(true, std::memory_order_release);
+
+    for (size_t i = 0; i < m_numWriterThreads; ++i)
+    {
+        auto writer = std::make_unique<Writer>(*m_queue, m_storage, m_batchSize, m_useEncryption, m_compressionLevel);
+        writer->start();
+        m_writers.push_back(std::move(writer));
+    }
+
+    std::cout << "LoggingSystem: Started " << m_numWriterThreads << " writer threads";
+    std::cout << " (Encryption: " << (m_useEncryption ? "Enabled" : "Disabled");
+    std::cout << ", Compression: " << (m_compressionLevel != 0 ? "Enabled" : "Disabled") << ")" << std::endl;
+    return true;
+}
+
+bool LoggingManager::stop()
+{
+    std::lock_guard<std::mutex> lock(m_systemMutex);
+
+    if (!m_running.load(std::memory_order_acquire))
+    {
+        return false;
+    }
+
+    m_acceptingEntries.store(false, std::memory_order_release);
+
+    if (m_queue)
+    {
+        std::cout << "LoggingSystem: Waiting for queue to empty..." << std::endl;
+        m_queue->flush();
+    }
+
+    for (auto &writer : m_writers)
+    {
+        writer->stop();
+    }
+    m_writers.clear();
+
+    // Flush storage to ensure all data is written
+    if (m_storage)
+    {
+        m_storage->flush();
+    }
+
+    m_running.store(false, std::memory_order_release);
+
+    Logger::getInstance().reset();
+
+    std::cout << "LoggingSystem: Stopped" << std::endl;
+    return true;
+}
+
+BufferQueue::ProducerToken LoggingManager::createProducerToken()
+{
+    return Logger::getInstance().createProducerToken();
+}
+
+bool LoggingManager::append(LogEntry entry,
+                            BufferQueue::ProducerToken &token,
+                            const std::optional<std::string> &filename)
+{
+    if (!m_acceptingEntries.load(std::memory_order_acquire))
+    {
+        std::cerr << "LoggingSystem: Not accepting entries" << std::endl;
+        return false;
+    }
+
+    return Logger::getInstance().append(std::move(entry), token, filename);
+}
+
+bool LoggingManager::appendBatch(std::vector<LogEntry> entries,
+                                 BufferQueue::ProducerToken &token,
+                                 const std::optional<std::string> &filename)
+{
+    if (!m_acceptingEntries.load(std::memory_order_acquire))
+    {
+        std::cerr << "LoggingSystem: Not accepting entries" << std::endl;
+        return false;
+    }
+
+    return Logger::getInstance().appendBatch(std::move(entries), token, filename);
+}
+
+bool LoggingManager::exportLogs(
+    const std::string &outputPath,
+    std::chrono::system_clock::time_point fromTimestamp,
+    std::chrono::system_clock::time_point toTimestamp)
+{
+    // This is a placeholder implementation for log export
+    // A complete solution would:
+    // 1. Read the encrypted segments from storage
+    // 2. Decrypt and decompress them
+    // 3. Filter by timestamp if requested
+    // 4. Write to the output path
+
+    std::cerr << "LoggingSystem: Export logs not fully implemented" << std::endl;
+    return false;
+}
\ No newline at end of file
diff --git a/archive/2025/summer/bsc_karidas/src/SegmentedStorage.cpp b/archive/2025/summer/bsc_karidas/src/SegmentedStorage.cpp
new file mode 100644
index 000000000..6b2a853e9
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/src/SegmentedStorage.cpp
@@ -0,0 +1,303 @@
+#include "SegmentedStorage.hpp"
+#include <iomanip>
+#include <sstream>
+#include <algorithm>
+#include <sys/stat.h>
+
+SegmentedStorage::SegmentedStorage(const std::string &basePath,
+                                   const std::string &baseFilename,
+                                   size_t maxSegmentSize,
+                                   size_t maxAttempts,
+                                   std::chrono::milliseconds baseRetryDelay,
+                                   size_t maxOpenFiles)
+    : m_basePath(basePath),
+      m_baseFilename(baseFilename),
+      m_maxSegmentSize(maxSegmentSize),
+      m_maxAttempts(maxAttempts),
+      m_baseRetryDelay(baseRetryDelay),
+      m_maxOpenFiles(maxOpenFiles),
+      m_cache(maxOpenFiles, this)
+{
+    std::filesystem::create_directories(m_basePath);
+    // Pre-warm the cache with the base filename
+    m_cache.get(m_baseFilename);
+}
+
+SegmentedStorage::~SegmentedStorage()
+{
+    m_cache.closeAll();
+}
+
+// LRUCache methods
+std::shared_ptr<SegmentedStorage::CacheEntry> SegmentedStorage::LRUCache::get(const std::string &filename)
+{
+    std::lock_guard<std::mutex> lock(m_mutex);
+
+    auto it = m_cache.find(filename);
+    if (it != m_cache.end())
+    {
+        // Found in cache, move to front (most recently used)
+        m_lruList.erase(it->second.lruIt);
+        m_lruList.push_front(filename);
+        it->second.lruIt = m_lruList.begin();
+        return it->second.entry;
+    }
+
+    // Not in cache, need to reconstruct state
+    auto entry = reconstructState(filename);
+
+    // Check if we need to evict
+    if (m_cache.size() >= m_capacity)
+    {
+        evictLRU();
+    }
+
+    // Add to cache
+    m_lruList.push_front(filename);
+    m_cache[filename] = {entry, m_lruList.begin()};
+
+    return entry;
+}
+
+void SegmentedStorage::LRUCache::evictLRU()
+{
+    // Called with m_mutex already locked
+    if (m_lruList.empty())
+        return;
+
+    const std::string &lru_filename = m_lruList.back();
+    auto it = m_cache.find(lru_filename);
+    if (it != m_cache.end())
+    {
+        // Close the file descriptor if it's open
+        if (it->second.entry->fd >= 0)
+        {
+            m_parent->fsyncRetry(it->second.entry->fd);
+            ::close(it->second.entry->fd);
+        }
+        m_cache.erase(it);
+    }
+    m_lruList.pop_back();
+}
+
+std::shared_ptr<SegmentedStorage::CacheEntry> SegmentedStorage::LRUCache::reconstructState(const std::string &filename)
+{
+    // Called with m_mutex already locked
+    auto entry = std::make_shared<CacheEntry>();
+
+    // Find the latest segment index for this filename
+    size_t latestIndex = m_parent->findLatestSegmentIndex(filename);
+    entry->segmentIndex.store(latestIndex, std::memory_order_release);
+
+    // Generate the path for the current segment
+    std::string segmentPath = m_parent->generateSegmentPath(filename, latestIndex);
+    entry->currentSegmentPath = segmentPath;
+
+    // Open the file and get its current size
+    entry->fd = m_parent->openWithRetry(segmentPath.c_str(), O_CREAT | O_RDWR | O_APPEND, 0644);
+
+    // Get the current file size to set as the offset
+    size_t fileSize = m_parent->getFileSize(segmentPath);
+    entry->currentOffset.store(fileSize, std::memory_order_release);
+
+    return entry;
+}
+
+void SegmentedStorage::LRUCache::flush(const std::string &filename)
+{
+    std::lock_guard<std::mutex> lock(m_mutex);
+    auto it = m_cache.find(filename);
+    if (it != m_cache.end() && it->second.entry->fd >= 0)
+    {
+        m_parent->fsyncRetry(it->second.entry->fd);
+    }
+}
+
+void SegmentedStorage::LRUCache::flushAll()
+{
+    std::lock_guard<std::mutex> lock(m_mutex);
+    for (const auto &pair : m_cache)
+    {
+        if (pair.second.entry->fd >= 0)
+        {
+            m_parent->fsyncRetry(pair.second.entry->fd);
+        }
+    }
+}
+
+void SegmentedStorage::LRUCache::closeAll()
+{
+    std::lock_guard<std::mutex> lock(m_mutex);
+    for (const auto &pair : m_cache)
+    {
+        if (pair.second.entry->fd >= 0)
+        {
+            m_parent->fsyncRetry(pair.second.entry->fd);
+            ::close(pair.second.entry->fd);
+        }
+    }
+    m_cache.clear();
+    m_lruList.clear();
+}
+
+size_t SegmentedStorage::findLatestSegmentIndex(const std::string &filename) const
+{
+    size_t maxIndex = 0;
+    std::string pattern = filename + "_";
+
+    try
+    {
+        for (const auto &entry : std::filesystem::directory_iterator(m_basePath))
+        {
+            if (entry.is_regular_file())
+            {
+                std::string name = entry.path().filename().string();
+                if (name.find(pattern) == 0)
+                {
+                    // Extract the index from filename format: filename_YYYYMMDD_HHMMSS_NNNNNN.log
+                    size_t lastUnderscore = name.find_last_of('_');
+                    if (lastUnderscore != std::string::npos)
+                    {
+                        size_t dotPos = name.find('.', lastUnderscore);
+                        if (dotPos != std::string::npos)
+                        {
+                            std::string indexStr = name.substr(lastUnderscore + 1, dotPos - lastUnderscore - 1);
+                            try
+                            {
+                                size_t index = std::stoull(indexStr);
+                                maxIndex = std::max(maxIndex, index);
+                            }
+                            catch (...)
+                            {
+                                // Ignore files that don't match the expected format
+                            }
+                        }
+                    }
+                }
+            }
+        }
+    }
+    catch (const std::filesystem::filesystem_error &)
+    {
+        // If directory doesn't exist or other filesystem error, return 0
+    }
+
+    return maxIndex;
+}
+
+size_t SegmentedStorage::getFileSize(const std::string &path) const
+{
+    struct stat st;
+    if (::stat(path.c_str(), &st) == 0)
+    {
+        return static_cast<size_t>(st.st_size);
+    }
+    return 0;
+}
+
+size_t SegmentedStorage::write(std::vector<uint8_t> &&data)
+{
+    return writeToFile(m_baseFilename, std::move(data));
+}
+
+size_t SegmentedStorage::writeToFile(const std::string &filename, std::vector<uint8_t> &&data)
+{
+    size_t size = data.size();
+    if (size == 0)
+        return 0;
+
+    std::shared_ptr<CacheEntry> entry = m_cache.get(filename);
+    size_t writeOffset;
+
+    // This loop handles race conditions around rotation
+    while (true)
+    {
+        // First check if we need to rotate WITHOUT reserving space
+        size_t currentOffset = entry->currentOffset.load(std::memory_order_acquire);
+        if (currentOffset + size > m_maxSegmentSize)
+        {
+            std::unique_lock<std::shared_mutex> rotLock(entry->fileMutex);
+            // Double-check if rotation is still needed
+            if (entry->currentOffset.load(std::memory_order_acquire) + size > m_maxSegmentSize)
+            {
+                rotateSegment(filename, entry);
+                // After rotation, entry has been updated with new fd and path
+                continue;
+            }
+        }
+
+        // Now safely reserve space
+        writeOffset = entry->currentOffset.fetch_add(size, std::memory_order_acq_rel);
+
+        // Double-check we didn't cross the boundary after reservation
+        if (writeOffset + size > m_maxSegmentSize)
+        {
+            // Another thread increased the offset past our threshold, try again
+            continue;
+        }
+
+        // We have a valid offset and can proceed with the write
+        break;
+    }
+
+    // Write under shared lock to prevent racing with rotate/close
+    {
+        std::shared_lock<std::shared_mutex> writeLock(entry->fileMutex);
+
+        // Verify the fd is still valid
+        if (entry->fd < 0)
+        {
+            // This shouldn't happen, but if it does, retry
+            return writeToFile(filename, std::move(data));
+        }
+
+        pwriteFull(entry->fd, data.data(), size, static_cast<off_t>(writeOffset));
+    }
+
+    return size;
+}
+
+void SegmentedStorage::flush()
+{
+    m_cache.flushAll();
+}
+
+std::string SegmentedStorage::rotateSegment(const std::string &filename, std::shared_ptr<CacheEntry> entry)
+{
+    // exclusive lock assumed by the caller (writeToFile)
+
+    // Close the old file descriptor
+    if (entry->fd >= 0)
+    {
+        fsyncRetry(entry->fd);
+        ::close(entry->fd);
+        entry->fd = -1;
+    }
+
+    size_t newIndex = entry->segmentIndex.fetch_add(1, std::memory_order_acq_rel) + 1;
+    entry->currentOffset.store(0, std::memory_order_release);
+    std::string newPath = generateSegmentPath(filename, newIndex);
+
+    // Update the entry's path and open the new file
+    entry->currentSegmentPath = newPath;
+    entry->fd = openWithRetry(newPath.c_str(), O_CREAT | O_RDWR | O_APPEND, 0644);
+
+    return newPath;
+}
+
+std::string SegmentedStorage::generateSegmentPath(const std::string &filename, size_t segmentIndex) const
+{
+    auto now = std::chrono::system_clock::now();
+    auto now_time_t = std::chrono::system_clock::to_time_t(now);
+    std::tm time_info;
+
+    // Linux-specific thread-safe version of localtime
+    localtime_r(&now_time_t, &time_info);
+
+    std::stringstream ss;
+    ss << m_basePath << "/";
+    ss << filename << "_";
+    ss << std::put_time(&time_info, "%Y%m%d_%H%M%S") << "_";
+    ss << std::setw(6) << std::setfill('0') << segmentIndex << ".log";
+    return ss.str();
+}
\ No newline at end of file
diff --git a/archive/2025/summer/bsc_karidas/src/Writer.cpp b/archive/2025/summer/bsc_karidas/src/Writer.cpp
new file mode 100644
index 000000000..bc3fe9acd
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/src/Writer.cpp
@@ -0,0 +1,103 @@
+#include "Writer.hpp"
+#include "Crypto.hpp"
+#include "Compression.hpp"
+#include <iostream>
+#include <chrono>
+#include <map>
+
+Writer::Writer(BufferQueue &queue,
+               std::shared_ptr<SegmentedStorage> storage,
+               size_t batchSize,
+               bool useEncryption,
+               int compressionLevel)
+    : m_queue(queue),
+      m_storage(std::move(storage)),
+      m_batchSize(batchSize),
+      m_useEncryption(useEncryption),
+      m_compressionLevel(compressionLevel),
+      m_consumerToken(queue.createConsumerToken())
+{
+}
+
+Writer::~Writer()
+{
+    stop();
+}
+
+void Writer::start()
+{
+    if (m_running.exchange(true))
+    {
+        return;
+    }
+
+    m_writerThread.reset(new std::thread(&Writer::processLogEntries, this));
+}
+
+void Writer::stop()
+{
+    if (m_running.exchange(false))
+    {
+        if (m_writerThread && m_writerThread->joinable())
+        {
+            m_writerThread->join();
+        }
+    }
+}
+
+bool Writer::isRunning() const
+{
+    return m_running.load();
+}
+
+void Writer::processLogEntries()
+{
+    std::vector<QueueItem> batch;
+
+    Crypto crypto;
+    std::vector<uint8_t> encryptionKey(crypto.KEY_SIZE, 0x42); // dummy key
+    std::vector<uint8_t> dummyIV(crypto.GCM_IV_SIZE, 0x24);    // dummy IV
+
+    while (m_running)
+    {
+        size_t entriesDequeued = m_queue.tryDequeueBatch(batch, m_batchSize, m_consumerToken);
+        if (entriesDequeued == 0)
+        {
+            std::this_thread::sleep_for(std::chrono::milliseconds(5));
+            continue;
+        }
+
+        std::map<std::optional<std::string>, std::vector<LogEntry>> groupedEntries;
+        for (auto &item : batch)
+        {
+            groupedEntries[item.targetFilename].emplace_back(std::move(item.entry));
+        }
+
+        for (auto &[targetFilename, entries] : groupedEntries)
+        {
+            std::vector<uint8_t> processedData = LogEntry::serializeBatch(std::move(entries));
+
+            // Apply compression if enabled
+            if (m_compressionLevel > 0)
+            {
+                processedData = Compression::compress(std::move(processedData), m_compressionLevel);
+            }
+            // Apply encryption if enabled
+            if (m_useEncryption)
+            {
+                processedData = crypto.encrypt(std::move(processedData), encryptionKey, dummyIV);
+            }
+
+            if (targetFilename)
+            {
+                m_storage->writeToFile(*targetFilename, std::move(processedData));
+            }
+            else
+            {
+                m_storage->write(std::move(processedData));
+            }
+        }
+
+        batch.clear();
+    }
+}
\ No newline at end of file
diff --git a/archive/2025/summer/bsc_karidas/tests/integration/test_CompressionCrypto.cpp b/archive/2025/summer/bsc_karidas/tests/integration/test_CompressionCrypto.cpp
new file mode 100644
index 000000000..3d8d9c6bf
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/tests/integration/test_CompressionCrypto.cpp
@@ -0,0 +1,91 @@
+#include <gtest/gtest.h>
+#include "Compression.hpp"
+#include "Crypto.hpp"
+#include "LogEntry.hpp"
+#include <vector>
+#include <string>
+#include <memory>
+
+class CompressionCryptoTest : public ::testing::Test
+{
+protected:
+    Crypto crypto;
+
+    void SetUp() override
+    {
+        // Create sample log entries for testing
+        entry1 = LogEntry(LogEntry::ActionType::CREATE, "/data/records/1", "controller123", "processor123", "subject123");
+        entry2 = LogEntry(LogEntry::ActionType::READ, "/data/records/2", "controller456", "processor456", "subject456");
+        entry3 = LogEntry(LogEntry::ActionType::UPDATE, "/data/records/3", "controller789", "processor789", "subject789");
+
+        // Create encryption key and IV
+        key = std::vector<uint8_t>(32, 0x42);      // Fixed key for reproducibility
+        wrongKey = std::vector<uint8_t>(32, 0x24); // Different key for testing
+        dummyIV = std::vector<uint8_t>(12, 0x24);  // Fixed IV for reproducibility
+    }
+
+    // Helper function to compare two LogEntry objects
+    bool LogEntriesEqual(const LogEntry &a, const LogEntry &b)
+    {
+        return a.serialize() == b.serialize();
+    }
+
+    LogEntry entry1, entry2, entry3;
+    std::vector<uint8_t> key;
+    std::vector<uint8_t> wrongKey;
+    std::vector<uint8_t> dummyIV;
+};
+
+// Batch processing - original -> compress -> encrypt -> decrypt -> decompress -> recovered
+TEST_F(CompressionCryptoTest, BatchProcessing)
+{
+    std::vector<LogEntry> batch = {entry1, entry2, entry3};
+    std::vector<uint8_t> serializedBatch = LogEntry::serializeBatch(std::move(batch));
+    std::vector<uint8_t> compressed = Compression::compress(std::move(serializedBatch));
+    ASSERT_GT(compressed.size(), 0);
+
+    std::vector<uint8_t> encrypted = crypto.encrypt(std::move(compressed), key, dummyIV);
+    ASSERT_GT(encrypted.size(), 0);
+    EXPECT_NE(encrypted, compressed);
+
+    std::vector<uint8_t> decrypted = crypto.decrypt(encrypted, key, dummyIV);
+    ASSERT_GT(decrypted.size(), 0);
+    EXPECT_EQ(decrypted, compressed);
+
+    std::vector<uint8_t> decompressed = Compression::decompress(std::move(decrypted));
+    std::vector<LogEntry> recovered = LogEntry::deserializeBatch(std::move(decompressed));
+    ASSERT_EQ(batch.size(), recovered.size());
+
+    for (size_t i = 0; i < batch.size(); i++)
+    {
+        EXPECT_TRUE(LogEntriesEqual(batch[i], recovered[i]))
+            << "Entries at index " << i << " don't match";
+    }
+
+    // Test with empty batch
+    std::vector<LogEntry> emptyBatch;
+    std::vector<uint8_t> emptySerializedBatch = LogEntry::serializeBatch(std::move(emptyBatch));
+    std::vector<uint8_t> emptyCompressed = Compression::compress(std::move(emptySerializedBatch));
+    std::vector<uint8_t> emptyEncrypted = crypto.encrypt(std::move(emptyCompressed), key, dummyIV);
+    std::vector<uint8_t> emptyDecrypted = crypto.decrypt(emptyEncrypted, key, dummyIV);
+    std::vector<uint8_t> emptyDecompressed = Compression::decompress(std::move(emptyDecrypted));
+    std::vector<LogEntry> emptyRecovered = LogEntry::deserializeBatch(std::move(emptyDecompressed));
+    EXPECT_TRUE(emptyRecovered.empty());
+
+    // Test with single entry batch
+    std::vector<LogEntry> singleBatch = {entry1};
+    std::vector<uint8_t> singleSerializedBatch = LogEntry::serializeBatch(std::move(singleBatch));
+    std::vector<uint8_t> singleCompressed = Compression::compress(std::move(singleSerializedBatch));
+    std::vector<uint8_t> singleEncrypted = crypto.encrypt(std::move(singleCompressed), key, dummyIV);
+    std::vector<uint8_t> singleDecrypted = crypto.decrypt(singleEncrypted, key, dummyIV);
+    std::vector<uint8_t> singleDecompressed = Compression::decompress(std::move(singleDecrypted));
+    std::vector<LogEntry> singleRecovered = LogEntry::deserializeBatch(std::move(singleDecompressed));
+    ASSERT_EQ(1, singleRecovered.size());
+    EXPECT_TRUE(LogEntriesEqual(entry1, singleRecovered[0]));
+}
+
+int main(int argc, char **argv)
+{
+    ::testing::InitGoogleTest(&argc, argv);
+    return RUN_ALL_TESTS();
+}
\ No newline at end of file
diff --git a/archive/2025/summer/bsc_karidas/tests/integration/test_WriterQueue.cpp b/archive/2025/summer/bsc_karidas/tests/integration/test_WriterQueue.cpp
new file mode 100644
index 000000000..00fff50f0
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/tests/integration/test_WriterQueue.cpp
@@ -0,0 +1,117 @@
+#include <gtest/gtest.h>
+#include "Writer.hpp"
+#include "BufferQueue.hpp"
+#include "SegmentedStorage.hpp"
+#include <chrono>
+#include <thread>
+#include <vector>
+#include <filesystem>
+
+class WriterIntegrationTest : public ::testing::Test
+{
+protected:
+    void SetUp() override
+    {
+        // Create a temporary directory for test log segments
+        testDir = "test_logs";
+        std::filesystem::create_directories(testDir);
+        logQueue = std::make_unique<BufferQueue>(1024, 4);
+
+        // Create a SegmentedStorage instance with reduced sizes for testing
+        storage = std::make_shared<SegmentedStorage>(
+            testDir,
+            "test_logsegment",
+            1024 * 1024 // Maximum segment size (1 MB for testing)
+        );
+
+        writer = std::make_unique<Writer>(*logQueue, storage);
+    }
+
+    void TearDown() override
+    {
+        if (writer)
+        {
+            writer->stop();
+        }
+        std::filesystem::remove_all(testDir);
+    }
+
+    std::unique_ptr<BufferQueue> logQueue;
+    std::unique_ptr<Writer> writer;
+    std::shared_ptr<SegmentedStorage> storage;
+    std::string testDir;
+
+    QueueItem createTestItem(int id)
+    {
+        QueueItem item;
+        item.entry = LogEntry(
+            LogEntry::ActionType::UPDATE,
+            "location" + std::to_string(id),
+            "controller" + std::to_string(id),
+            "processor" + std::to_string(id),
+            "subject" + std::to_string(id % 10));
+        return item;
+    }
+};
+
+// Test basic processing functionality
+TEST_F(WriterIntegrationTest, BasicWriteOperation)
+{
+    BufferQueue::ProducerToken producerToken = logQueue->createProducerToken();
+    const int NUM_ENTRIES = 500;
+    for (int i = 0; i < NUM_ENTRIES; ++i)
+    {
+        ASSERT_TRUE(logQueue->enqueueBlocking(createTestItem(i), producerToken, std::chrono::milliseconds(100)))
+            << "Failed to enqueue entry " << i;
+    }
+
+    EXPECT_EQ(logQueue->size(), 500);
+
+    writer->start();
+
+    std::this_thread::sleep_for(std::chrono::milliseconds(100));
+
+    writer->stop();
+
+    EXPECT_EQ(logQueue->size(), 0) << "Not all entries were processed";
+}
+
+// Test concurrent writing and processing
+TEST_F(WriterIntegrationTest, ConcurrentWriteAndProcess)
+{
+    const int NUM_ENTRIES = 1000;
+    const int NUM_PRODUCERS = 4;
+
+    // Function to simulate producers adding log entries
+    auto producer = [this](int start, int count)
+    {
+        BufferQueue::ProducerToken producerToken = logQueue->createProducerToken();
+        for (int i = start; i < start + count; ++i)
+        {
+            // Introduce a small delay to simulate variability
+            std::this_thread::sleep_for(std::chrono::milliseconds(rand() % 10));
+            logQueue->enqueueBlocking(createTestItem(i), producerToken, std::chrono::milliseconds(500));
+        }
+    };
+
+    writer->start();
+
+    std::vector<std::thread> producerThreads;
+    for (int i = 0; i < NUM_PRODUCERS; ++i)
+    {
+        producerThreads.emplace_back(producer, i * (NUM_ENTRIES / NUM_PRODUCERS),
+                                     NUM_ENTRIES / NUM_PRODUCERS);
+    }
+
+    // Wait for all producer threads to finish
+    for (auto &t : producerThreads)
+    {
+        t.join();
+    }
+
+    // Allow some time for the writer to process the entries
+    std::this_thread::sleep_for(std::chrono::milliseconds(500));
+    writer->stop();
+
+    EXPECT_EQ(logQueue->size(), 0) << "Not all entries were processed";
+}
\ No newline at end of file
diff --git a/archive/2025/summer/bsc_karidas/tests/unit/test_BufferQueue.cpp b/archive/2025/summer/bsc_karidas/tests/unit/test_BufferQueue.cpp
new file mode 100644
index 000000000..ee573cac6
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/tests/unit/test_BufferQueue.cpp
@@ -0,0 +1,920 @@
+#include <gtest/gtest.h>
+#include "BufferQueue.hpp"
+#include <thread>
+#include <vector>
+#include <atomic>
+#include <chrono>
+#include <future>
+#include <random>
+
+// Basic functionality tests
+class BufferQueueBasicTest : public ::testing::Test
+{
+protected:
+    void SetUp() override
+    {
+        // Create a new queue for each test
+        queue = std::make_unique<BufferQueue>(QUEUE_SIZE, 1);
+    }
+
+    void TearDown() override
+    {
+        queue.reset();
+    }
+
+    // Helper to create a test queue item with log entry
+    QueueItem createTestItem(int id)
+    {
+        QueueItem item;
+        item.entry = LogEntry(
+            LogEntry::ActionType::READ,
+            "data/location/" + std::to_string(id),
+            "controller" + std::to_string(id),
+            "processor" + std::to_string(id),
+            "subject" + std::to_string(id % 10));
+        return item;
+    }
+
+    // Helper to create a test queue item with log entry and target filename
+    QueueItem createTestItemWithTarget(int id, const std::string &filename)
+    {
+        QueueItem item = createTestItem(id);
+        item.targetFilename = filename;
+        return item;
+    }
+
+    const size_t QUEUE_SIZE = 1; // leads to capacity being block_size
+    const size_t QUEUE_BLOCK_SIZE = 64;
+    const size_t QUEUE_CAPACITY = QUEUE_BLOCK_SIZE;
+    std::unique_ptr<BufferQueue> queue;
+};
+
+// Test enqueue and dequeue operations
+TEST_F(BufferQueueBasicTest, EnqueueDequeue)
+{
+    QueueItem item = createTestItem(1);
+    QueueItem retrievedItem;
+
+    // Queue should be empty initially
+    EXPECT_EQ(queue->size(), 0);
+
+    BufferQueue::ProducerToken producerToken = queue->createProducerToken();
+    BufferQueue::ConsumerToken consumerToken = queue->createConsumerToken();
+
+    // Enqueue one item
+    EXPECT_TRUE(queue->enqueueBlocking(item, producerToken, std::chrono::milliseconds(100)));
+    EXPECT_EQ(queue->size(), 1);
+
+    // Dequeue the item
+    EXPECT_TRUE(queue->tryDequeue(retrievedItem, consumerToken));
+    EXPECT_EQ(queue->size(), 0);
+
+    // Verify the item matches
+    EXPECT_EQ(retrievedItem.entry.getDataControllerId(), item.entry.getDataControllerId());
+    EXPECT_EQ(retrievedItem.entry.getDataProcessorId(), item.entry.getDataProcessorId());
+    EXPECT_EQ(retrievedItem.entry.getDataLocation(), item.entry.getDataLocation());
+    EXPECT_EQ(retrievedItem.entry.getDataSubjectId(), item.entry.getDataSubjectId());
+    EXPECT_EQ(retrievedItem.entry.getActionType(), item.entry.getActionType());
+    EXPECT_EQ(retrievedItem.targetFilename, item.targetFilename);
+}
+
+TEST_F(BufferQueueBasicTest, EnqueueUntilFull)
+{
+    // Test now verifies that we can enqueue up to capacity and then operations fail
+    BufferQueue::ProducerToken producerToken = queue->createProducerToken();
+
+    // Fill the queue up to capacity
+    for (size_t i = 0; i < QUEUE_CAPACITY; i++)
+    {
+        EXPECT_TRUE(queue->enqueueBlocking(createTestItem(i), producerToken, std::chrono::milliseconds(100)));
+    }
+
+    // Queue should be full now
+    EXPECT_EQ(queue->size(), QUEUE_CAPACITY);
+
+    // Testing that enqueue fails
+    EXPECT_FALSE(queue->enqueueBlocking(createTestItem(123), producerToken));
+
+    // With longer timeout, enqueue should block and eventually fail too since no consumer
+    auto start = std::chrono::steady_clock::now();
+    auto timeout = std::chrono::milliseconds(50);
+    EXPECT_FALSE(queue->enqueueBlocking(createTestItem(123), producerToken, timeout));
+    auto elapsed = std::chrono::steady_clock::now() - start;
+    EXPECT_GE(elapsed, timeout); // Verify that it blocked for at least the timeout period
+}
+
+// Test enqueue with consumer thread
+TEST_F(BufferQueueBasicTest, EnqueueWithConsumer)
+{
+    BufferQueue::ProducerToken producerToken = queue->createProducerToken();
+
+    // Fill the queue to capacity
+    for (size_t i = 0; i < QUEUE_CAPACITY; i++)
+    {
+        EXPECT_TRUE(queue->enqueueBlocking(createTestItem(i), producerToken, std::chrono::milliseconds(100)));
+    }
+
+    EXPECT_EQ(queue->size(), QUEUE_CAPACITY);
+
+    // Testing that enqueue fails
+    EXPECT_FALSE(queue->enqueueBlocking(createTestItem(123), producerToken));
+
+    BufferQueue::ConsumerToken consumerToken = queue->createConsumerToken();
+    QueueItem retrievedItem;
+    EXPECT_TRUE(queue->tryDequeue(retrievedItem, consumerToken));
+    EXPECT_EQ(retrievedItem.entry.getActionType(), LogEntry::ActionType::READ);
+    EXPECT_EQ(retrievedItem.entry.getDataLocation(), "data/location/0");
+
+    // block of size 64 only becomes free again after entire block has been dequeued.
+    EXPECT_FALSE(queue->enqueueBlocking(createTestItem(99999), producerToken, std::chrono::seconds(1)));
+
+    EXPECT_EQ(queue->size(), QUEUE_CAPACITY - 1);
+
+    // Batch dequeue
+    std::vector<QueueItem> items;
+    size_t count = queue->tryDequeueBatch(items, QUEUE_CAPACITY - 1, consumerToken);
+    // Verify we got all items
+    EXPECT_EQ(count, QUEUE_CAPACITY - 1);
+    EXPECT_EQ(items.size(), QUEUE_CAPACITY - 1);
+    EXPECT_EQ(queue->size(), 0);
+
+    // now enqueue should work again after entire block has been freed
+    EXPECT_TRUE(queue->enqueueBlocking(createTestItem(99999), producerToken, std::chrono::seconds(1)));
+}
+
+// Test dequeue from empty queue
+TEST_F(BufferQueueBasicTest, DequeueFromEmpty)
+{
+    BufferQueue::ConsumerToken consumerToken = queue->createConsumerToken();
+    QueueItem item;
+    EXPECT_FALSE(queue->tryDequeue(item, consumerToken));
+}
+
+// Test batch dequeue
+TEST_F(BufferQueueBasicTest, BatchDequeue)
+{
+    const size_t numEntries = 5;
+
+    BufferQueue::ProducerToken producerToken = queue->createProducerToken();
+    BufferQueue::ConsumerToken consumerToken = queue->createConsumerToken();
+
+    // Enqueue several items
+    for (size_t i = 0; i < numEntries; i++)
+    {
+        EXPECT_TRUE(queue->enqueueBlocking(createTestItem(i), producerToken, std::chrono::milliseconds(100)));
+    }
+
+    // Batch dequeue
+    std::vector<QueueItem> items;
+    size_t count = queue->tryDequeueBatch(items, numEntries, consumerToken);
+
+    // Verify we got all items
+    EXPECT_EQ(count, numEntries);
+    EXPECT_EQ(items.size(), numEntries);
+    EXPECT_EQ(queue->size(), 0);
+
+    // Verify entries match what we enqueued
+    for (size_t i = 0; i < numEntries; i++)
+    {
+        EXPECT_EQ(items[i].entry.getDataLocation(), "data/location/" + std::to_string(i));
+    }
+}
+
+// Test batch dequeue with more items requested than available
+TEST_F(BufferQueueBasicTest, BatchDequeuePartial)
+{
+    const size_t numEntries = 3;
+    const size_t requestSize = 5;
+
+    BufferQueue::ProducerToken producerToken = queue->createProducerToken();
+    BufferQueue::ConsumerToken consumerToken = queue->createConsumerToken();
+
+    // Enqueue a few items
+    for (size_t i = 0; i < numEntries; i++)
+    {
+        EXPECT_TRUE(queue->enqueueBlocking(createTestItem(i), producerToken, std::chrono::milliseconds(100)));
+    }
+
+    // Try to dequeue more than available
+    std::vector<QueueItem> items;
+    size_t count = queue->tryDequeueBatch(items, requestSize, consumerToken);
+
+    // Verify we got what was available
+    EXPECT_EQ(count, numEntries);
+    EXPECT_EQ(items.size(), numEntries);
+    EXPECT_EQ(queue->size(), 0);
+}
+
+// Test batch enqueue functionality
+TEST_F(BufferQueueBasicTest, BatchEnqueue)
+{
+    const size_t numEntries = 5;
+    std::vector<QueueItem> itemsToEnqueue;
+
+    BufferQueue::ProducerToken producerToken = queue->createProducerToken();
+    BufferQueue::ConsumerToken consumerToken = queue->createConsumerToken();
+
+    for (size_t i = 0; i < numEntries; i++)
+    {
+        itemsToEnqueue.push_back(createTestItem(i));
+    }
+
+    EXPECT_TRUE(queue->enqueueBatchBlocking(itemsToEnqueue, producerToken));
+    EXPECT_EQ(queue->size(), numEntries);
+
+    std::vector<QueueItem> retrievedItems;
+    size_t dequeued = queue->tryDequeueBatch(retrievedItems, numEntries, consumerToken);
+
+    EXPECT_EQ(dequeued, numEntries);
+    EXPECT_EQ(retrievedItems.size(), numEntries);
+
+    for (size_t i = 0; i < numEntries; i++)
+    {
+        EXPECT_EQ(retrievedItems[i].entry.getDataLocation(), itemsToEnqueue[i].entry.getDataLocation());
+        EXPECT_EQ(retrievedItems[i].entry.getDataControllerId(), itemsToEnqueue[i].entry.getDataControllerId());
+        EXPECT_EQ(retrievedItems[i].entry.getDataProcessorId(), itemsToEnqueue[i].entry.getDataProcessorId());
+        EXPECT_EQ(retrievedItems[i].entry.getDataSubjectId(), itemsToEnqueue[i].entry.getDataSubjectId());
+    }
+
+    EXPECT_EQ(queue->size(), 0);
+}
+
+TEST_F(BufferQueueBasicTest, BatchEnqueueWhenAlmostFull)
+{
+    BufferQueue::ProducerToken producerToken = queue->createProducerToken();
+    BufferQueue::ConsumerToken consumerToken = queue->createConsumerToken();
+
+    // Fill most of the queue
+    for (size_t i = 0; i < QUEUE_CAPACITY - 3; i++)
+    {
+        EXPECT_TRUE(queue->enqueueBlocking(createTestItem(i), producerToken, std::chrono::milliseconds(100)));
+    }
+
+    // Queue has 3 spaces left
+    std::vector<QueueItem> smallBatch;
+    for (size_t i = 0; i < 3; i++)
+    {
+        smallBatch.push_back(createTestItem(100 + i));
+    }
+
+    // This should succeed (exactly fits available space)
+    EXPECT_TRUE(queue->enqueueBatchBlocking(smallBatch, producerToken));
+    EXPECT_EQ(queue->size(), QUEUE_CAPACITY);
+
+    // Create a batch larger than available space
+    std::vector<QueueItem> largeBatch;
+    for (size_t i = 0; i < 4; i++)
+    {
+        largeBatch.push_back(createTestItem(200 + i));
+    }
+
+    // This should fail with a short timeout
+    EXPECT_FALSE(queue->enqueueBatchBlocking(largeBatch, producerToken, std::chrono::milliseconds(1)));
+
+    // Remove ALL items to make space (to free the entire block)
+    std::vector<QueueItem> retrievedItems;
+    size_t removed = queue->tryDequeueBatch(retrievedItems, QUEUE_CAPACITY, consumerToken);
+    EXPECT_EQ(removed, QUEUE_CAPACITY);
+    EXPECT_EQ(queue->size(), 0);
+
+    // Now batch enqueue should succeed
+    EXPECT_TRUE(queue->enqueueBatchBlocking(largeBatch, producerToken, std::chrono::milliseconds(100)));
+    EXPECT_EQ(queue->size(), 4);
+}
+
+TEST_F(BufferQueueBasicTest, BatchEnqueueBlocking)
+{
+    BufferQueue::ProducerToken producerToken = queue->createProducerToken();
+
+    // Fill the queue to capacity
+    for (size_t i = 0; i < QUEUE_CAPACITY; i++)
+    {
+        EXPECT_TRUE(queue->enqueueBlocking(createTestItem(i), producerToken, std::chrono::milliseconds(100)));
+    }
+    EXPECT_EQ(queue->size(), QUEUE_CAPACITY);
+
+    std::vector<QueueItem> batch;
+    for (size_t i = 0; i < 3; i++)
+    {
+        batch.push_back(createTestItem(100 + i));
+    }
+
+    // Create a producer thread that will block until space is available
+    std::atomic<bool> producerSucceeded{false};
+    std::thread producerThread([this, &batch, &producerSucceeded, &producerToken]() { // Pass producerToken by reference
+        // Use the same producer token instead of creating a new one
+        if (queue->enqueueBatchBlocking(batch, producerToken, std::chrono::seconds(1)))
+        {
+            producerSucceeded.store(true);
+        }
+    });
+
+    // Give producer thread a chance to start and block
+    std::this_thread::sleep_for(std::chrono::milliseconds(5));
+
+    // Should still be false because queue is full
+    EXPECT_FALSE(producerSucceeded.load());
+
+    // Create a consumer thread to empty the entire queue
+    std::thread consumerThread([this]()
+                               {
+        BufferQueue::ConsumerToken consumerToken = queue->createConsumerToken();
+        std::vector<QueueItem> items;
+        // Dequeue all items to free the entire block
+        queue->tryDequeueBatch(items, QUEUE_CAPACITY, consumerToken); });
+
+    // Wait for both threads
+    consumerThread.join();
+    producerThread.join();
+
+    // Now producer should have succeeded
+    EXPECT_TRUE(producerSucceeded.load());
+    EXPECT_EQ(queue->size(), 3);
+}
+
+// Test flush method
+TEST_F(BufferQueueBasicTest, Flush)
+{
+    BufferQueue::ProducerToken producerToken = queue->createProducerToken();
+
+    const size_t numEntries = 5;
+    // Enqueue several items
+    for (size_t i = 0; i < numEntries; i++)
+    {
+        EXPECT_TRUE(queue->enqueueBlocking(createTestItem(i), producerToken, std::chrono::milliseconds(100)));
+    }
+
+    // Start a thread to dequeue all items
+    std::thread consumer([&]
+                         {
+        BufferQueue::ConsumerToken consumerToken = queue->createConsumerToken();
+        std::vector<QueueItem> items;
+        std::this_thread::sleep_for(std::chrono::milliseconds(100));
+        queue->tryDequeueBatch(items, numEntries, consumerToken); });
+
+    // Flush should wait until queue is empty
+    EXPECT_TRUE(queue->flush());
+    EXPECT_EQ(queue->size(), 0);
+
+    consumer.join();
+}
+
+// Test for QueueItem with targetFilename
+TEST_F(BufferQueueBasicTest, QueueItemWithTargetFilename)
+{
+    BufferQueue::ProducerToken producerToken = queue->createProducerToken();
+    BufferQueue::ConsumerToken consumerToken = queue->createConsumerToken();
+    // Create items with target filenames
+    QueueItem item1 = createTestItemWithTarget(1, "file1.log");
+    QueueItem item2 = createTestItemWithTarget(2, "file2.log");
+    QueueItem item3 = createTestItem(3); // No target filename
+
+    // Enqueue items
+    EXPECT_TRUE(queue->enqueueBlocking(item1, producerToken, std::chrono::milliseconds(100)));
+    EXPECT_TRUE(queue->enqueueBlocking(item2, producerToken, std::chrono::milliseconds(100)));
+    EXPECT_TRUE(queue->enqueueBlocking(item3, producerToken, std::chrono::milliseconds(100)));
+
+    // Dequeue and verify
+    QueueItem retrievedItem1, retrievedItem2, retrievedItem3;
+
+    EXPECT_TRUE(queue->tryDequeue(retrievedItem1, consumerToken));
+    EXPECT_TRUE(queue->tryDequeue(retrievedItem2, consumerToken));
+    EXPECT_TRUE(queue->tryDequeue(retrievedItem3, consumerToken));
+
+    // Check targetFilename is preserved correctly
+    EXPECT_TRUE(retrievedItem1.targetFilename.has_value());
+    EXPECT_EQ(*retrievedItem1.targetFilename, "file1.log");
+
+    EXPECT_TRUE(retrievedItem2.targetFilename.has_value());
+    EXPECT_EQ(*retrievedItem2.targetFilename, "file2.log");
+
+    EXPECT_FALSE(retrievedItem3.targetFilename.has_value());
+}
+
+// Thread safety tests
+class BufferQueueThreadTest : public ::testing::Test
+{
+protected:
+    void SetUp() override
+    {
+        // Create a new queue for each test with larger capacity
+        queue = std::make_unique<BufferQueue>(QUEUE_CAPACITY - 1, 8);
+    }
+
+    void TearDown() override
+    {
+        queue.reset();
+    }
+
+    // Helper to create a test queue item with log entry
+    QueueItem createTestItem(int id)
+    {
+        QueueItem item;
+        item.entry = LogEntry(
+            LogEntry::ActionType::READ,
+            "data/location/" + std::to_string(id),
+            "controller" + std::to_string(id),
+            "processor" + std::to_string(id),
+            "subject" + std::to_string(id % 10));
+        return item;
+    }
+
+    const size_t QUEUE_BLOCK_SIZE = 64;
+    const size_t QUEUE_CAPACITY = 4096;
+    std::unique_ptr<BufferQueue> queue;
+};
+
+// Test dynamic queue behavior - Modified for non-growing queue
+TEST_F(BufferQueueThreadTest, QueueCapacityTest)
+{
+    const size_t SMALL_CAPACITY = 128;
+    auto smallQueue = std::make_unique<BufferQueue>(SMALL_CAPACITY - QUEUE_BLOCK_SIZE, 1);
+    BufferQueue::ProducerToken smallQueueProducer = smallQueue->createProducerToken();
+    BufferQueue::ConsumerToken smallQueueConsumer = smallQueue->createConsumerToken();
+
+    // Fill the queue up to capacity
+    for (size_t i = 0; i < SMALL_CAPACITY; i++)
+    {
+        EXPECT_TRUE(smallQueue->enqueueBlocking(createTestItem(i), smallQueueProducer, std::chrono::milliseconds(100)));
+    }
+
+    EXPECT_EQ(smallQueue->size(), SMALL_CAPACITY);
+
+    // Queue is full, enqueue with short timeout should fail
+    EXPECT_FALSE(smallQueue->enqueueBlocking(createTestItem(SMALL_CAPACITY),
+                                             smallQueueProducer,
+                                             std::chrono::milliseconds(1)));
+
+    // Dequeue all items
+    std::vector<QueueItem> items;
+    size_t count = smallQueue->tryDequeueBatch(items, SMALL_CAPACITY, smallQueueConsumer);
+    EXPECT_EQ(count, SMALL_CAPACITY);
+    EXPECT_EQ(smallQueue->size(), 0);
+
+    // Now enqueue should succeed
+    EXPECT_TRUE(smallQueue->enqueueBlocking(createTestItem(SMALL_CAPACITY),
+                                            smallQueueProducer,
+                                            std::chrono::milliseconds(100)));
+    EXPECT_EQ(smallQueue->size(), 1);
+}
+
+// Test multiple producers, single consumer
+TEST_F(BufferQueueThreadTest, MultipleProducersSingleConsumer)
+{
+    const int NUM_PRODUCERS = 4;
+    const int ENTRIES_PER_PRODUCER = 1000;
+    const int TOTAL_ENTRIES = NUM_PRODUCERS * ENTRIES_PER_PRODUCER;
+
+    std::atomic<int> totalEnqueued(0);
+    std::atomic<int> totalDequeued(0);
+
+    // Start consumer thread
+    std::thread consumer([&]
+                         {
+        BufferQueue::ConsumerToken consumerToken = queue->createConsumerToken();
+        QueueItem item;
+        while (totalDequeued.load() < TOTAL_ENTRIES) {
+            if (queue->tryDequeue(item, consumerToken)) {
+                totalDequeued++;
+            } else {
+                std::this_thread::yield();
+            }
+        } });
+
+    // Start producer threads
+    std::vector<std::thread> producers;
+    for (int i = 0; i < NUM_PRODUCERS; i++)
+    {
+        producers.emplace_back([&, i]
+                               {
+            BufferQueue::ProducerToken producerToken = queue->createProducerToken();
+            for (int j = 0; j < ENTRIES_PER_PRODUCER; j++) {
+                int id = i * ENTRIES_PER_PRODUCER + j;
+                QueueItem item = createTestItem(id);
+
+                // Try until enqueue succeeds
+                while (!queue->enqueueBlocking(item, producerToken, std::chrono::milliseconds(100)))
+                {
+                    std::this_thread::yield();
+                }
+
+                totalEnqueued++;
+            } });
+    }
+
+    // Wait for producers to finish
+    for (auto &t : producers)
+    {
+        t.join();
+    }
+
+    // Wait for consumer
+    consumer.join();
+
+    // Verify counts
+    EXPECT_EQ(totalEnqueued.load(), TOTAL_ENTRIES);
+    EXPECT_EQ(totalDequeued.load(), TOTAL_ENTRIES);
+    EXPECT_EQ(queue->size(), 0);
+}
+
+// Test single producer, multiple consumers
+TEST_F(BufferQueueThreadTest, SingleProducerMultipleConsumers)
+{
+    const int NUM_CONSUMERS = 4;
+    const int TOTAL_ENTRIES = 10000;
+
+    std::atomic<int> totalDequeued(0);
+
+    // Start consumer threads
+    std::vector<std::thread> consumers;
+    for (int i = 0; i < NUM_CONSUMERS; i++)
+    {
+        consumers.emplace_back([&]
+                               {
+            BufferQueue::ConsumerToken consumerToken = queue->createConsumerToken();
+            QueueItem item;
+            while (totalDequeued.load() < TOTAL_ENTRIES) {
+                if (queue->tryDequeue(item, consumerToken))
+                {
+                    totalDequeued++;
+                }
+                else
+                {
+                    std::this_thread::yield();
+                }
+            } });
+    }
+
+    BufferQueue::ProducerToken producerToken = queue->createProducerToken();
+    // Producer thread
+    for (int i = 0; i < TOTAL_ENTRIES; i++)
+    {
+        QueueItem item = createTestItem(i);
+
+        // Try until enqueue succeeds
+        while (!queue->enqueueBlocking(item, producerToken, std::chrono::milliseconds(100)))
+        {
+            std::this_thread::yield();
+        }
+    }
+
+    // Wait for consumers
+    for (auto &t : consumers)
+    {
+        t.join();
+    }
+
+    // Verify counts
+    EXPECT_EQ(totalDequeued.load(), TOTAL_ENTRIES);
+    EXPECT_EQ(queue->size(), 0);
+}
+
+// Test multiple producers with batch enqueue
+TEST_F(BufferQueueThreadTest, MultipleBatchProducers)
+{
+    const int NUM_PRODUCERS = 4;
+    const int BATCHES_PER_PRODUCER = 50;
+    const int ENTRIES_PER_BATCH = 20;
+    const int TOTAL_ENTRIES = NUM_PRODUCERS * BATCHES_PER_PRODUCER * ENTRIES_PER_BATCH;
+
+    std::atomic<int> totalEnqueued(0);
+    std::atomic<int> totalDequeued(0);
+
+    std::thread consumer([&]()
+                         {
+        BufferQueue::ConsumerToken consumerToken = queue->createConsumerToken();
+        std::vector<QueueItem> items;
+        while (totalDequeued.load() < TOTAL_ENTRIES) {
+            size_t count = queue->tryDequeueBatch(items, 50, consumerToken);
+            if (count > 0) {
+                totalDequeued += count;
+            } else {
+                std::this_thread::yield();
+            }
+        } });
+
+    std::vector<std::thread> producers;
+    for (int i = 0; i < NUM_PRODUCERS; i++)
+    {
+        producers.emplace_back([&, i]()
+                               {
+            BufferQueue::ProducerToken producerToken = queue->createProducerToken();
+            std::vector<QueueItem> batchToEnqueue;
+
+            for (int b = 0; b < BATCHES_PER_PRODUCER; b++) {
+                batchToEnqueue.clear();
+                for (int j = 0; j < ENTRIES_PER_BATCH; j++) {
+                    int id = (i * BATCHES_PER_PRODUCER * ENTRIES_PER_BATCH) +
+                             (b * ENTRIES_PER_BATCH) + j;
+                    batchToEnqueue.push_back(createTestItem(id));
+                }
+
+                queue->enqueueBatchBlocking(batchToEnqueue, producerToken, std::chrono::milliseconds(500));
+
+                totalEnqueued += ENTRIES_PER_BATCH;
+            } });
+    }
+
+    for (auto &t : producers)
+    {
+        t.join();
+    }
+
+    consumer.join();
+
+    EXPECT_EQ(totalEnqueued.load(), TOTAL_ENTRIES);
+    EXPECT_EQ(totalDequeued.load(), TOTAL_ENTRIES);
+    EXPECT_EQ(queue->size(), 0);
+}
+
+// Test mixed batch and single item operations
+TEST_F(BufferQueueThreadTest, MixedBatchOperations)
+{
+    const int NUM_THREADS = 6;
+    const int OPS_PER_THREAD = 200;
+    const int MAX_BATCH_SIZE = 10;
+
+    std::atomic<int> totalEnqueued(0);
+    std::atomic<int> totalDequeued(0);
+
+    std::vector<std::thread> threads;
+    for (int i = 0; i < NUM_THREADS; i++)
+    {
+        threads.emplace_back([&, i]()
+                             {
+            BufferQueue::ProducerToken producerToken = queue->createProducerToken();
+            BufferQueue::ConsumerToken consumerToken = queue->createConsumerToken();
+            std::random_device rd;
+            std::mt19937 gen(rd());
+            std::uniform_int_distribution<> opType(0, 3);  // 0-1: single enqueue, 2: batch enqueue, 3: dequeue
+            std::uniform_int_distribution<> batchSize(2, MAX_BATCH_SIZE);
+
+            for (int j = 0; j < OPS_PER_THREAD; j++) {
+                int id = i * OPS_PER_THREAD + j;
+                int op = opType(gen);
+
+                if (op <= 1 || totalDequeued.load() >= totalEnqueued.load()) {
+                    // Single enqueue
+                    QueueItem item = createTestItem(id);
+                    if (queue->enqueueBlocking(item, producerToken, std::chrono::milliseconds(50))) {
+                        totalEnqueued++;
+                    }
+                } else if (op == 2) {
+                    // Batch enqueue
+                    int size = batchSize(gen);
+                    std::vector<QueueItem> batch;
+                    for (int k = 0; k < size; k++) {
+                        batch.push_back(createTestItem(id * 1000 + k));
+                    }
+
+                    if (queue->enqueueBatchBlocking(batch, producerToken, std::chrono::milliseconds(50))) {
+                        totalEnqueued += size;
+                    }
+                } else {
+                    if (gen() % 2 == 0) {
+                        // Single dequeue
+                        QueueItem item;
+                        if (queue->tryDequeue(item, consumerToken)) {
+                            totalDequeued++;
+                        }
+                    } else {
+                        // Batch dequeue
+                        std::vector<QueueItem> items;
+                        size_t count = queue->tryDequeueBatch(items, batchSize(gen), consumerToken);
+                        if (count > 0) {
+                            totalDequeued += count;
+                        }
+                    }
+                }
+            } });
+    }
+
+    for (auto &t : threads)
+    {
+        t.join();
+    }
+
+    // Verify that enqueued >= dequeued and size matches the difference
+    EXPECT_GE(totalEnqueued.load(), totalDequeued.load());
+    EXPECT_EQ(queue->size(), totalEnqueued.load() - totalDequeued.load());
+
+    BufferQueue::ConsumerToken consumerToken = queue->createConsumerToken();
+    // Dequeue remaining entries
+    std::vector<QueueItem> items;
+    while (queue->tryDequeueBatch(items, MAX_BATCH_SIZE, consumerToken) > 0)
+    {
+        totalDequeued += items.size();
+    }
+
+    EXPECT_EQ(totalEnqueued.load(), totalDequeued.load());
+    EXPECT_EQ(queue->size(), 0);
+}
+
+// Test batch dequeue with multiple threads
+TEST_F(BufferQueueThreadTest, BatchDequeueMultipleThreads)
+{
+    const int NUM_PRODUCERS = 4;
+    const int NUM_CONSUMERS = 2;
+    const int ENTRIES_PER_PRODUCER = 1000;
+    const int TOTAL_ENTRIES = NUM_PRODUCERS * ENTRIES_PER_PRODUCER;
+    const int BATCH_SIZE = 100;
+
+    std::atomic<int> totalEnqueued(0);
+    std::atomic<int> totalDequeued(0);
+
+    // Start consumer threads
+    std::vector<std::thread> consumers;
+    for (int i = 0; i < NUM_CONSUMERS; i++)
+    {
+        consumers.emplace_back([&]
+                               {
+            BufferQueue::ConsumerToken consumerToken = queue->createConsumerToken();
+            std::vector<QueueItem> items;
+            while (totalDequeued.load() < TOTAL_ENTRIES) {
+                size_t count = queue->tryDequeueBatch(items, BATCH_SIZE, consumerToken);
+                if (count > 0) {
+                    totalDequeued += count;
+                } else {
+                    std::this_thread::yield();
+                }
+            } });
+    }
+
+    // Start producer threads
+    std::vector<std::thread> producers;
+    for (int i = 0; i < NUM_PRODUCERS; i++)
+    {
+        producers.emplace_back([&, i]
+                               {
+            BufferQueue::ProducerToken producerToken = queue->createProducerToken();
+            for (int j = 0; j < ENTRIES_PER_PRODUCER; j++) {
+                int id = i * ENTRIES_PER_PRODUCER + j;
+                QueueItem item = createTestItem(id);
+
+                // Try until enqueue succeeds
+                while (!queue->enqueueBlocking(item, producerToken, std::chrono::milliseconds(100)))
+                {
+                    std::this_thread::yield();
+                }
+
+                totalEnqueued++;
+            } });
+    }
+
+    // Wait for producers to finish
+    for (auto &t : producers)
+    {
+        t.join();
+    }
+
+    // Wait for consumers
+    for (auto &t : consumers)
+    {
+        t.join();
+    }
+
+    // Verify counts
+    EXPECT_EQ(totalEnqueued.load(), TOTAL_ENTRIES);
+    EXPECT_EQ(totalDequeued.load(), TOTAL_ENTRIES);
+    EXPECT_EQ(queue->size(), 0);
+}
+
+// Stress test with random operations
+TEST_F(BufferQueueThreadTest, RandomizedStressTest)
+{
+    const int NUM_THREADS = 8;
+    const int OPS_PER_THREAD = 5000;
+
+    std::atomic<int> totalEnqueued(0);
+    std::atomic<int> totalDequeued(0);
+
+    // Start worker threads
+    std::vector<std::thread> threads;
+    for (int i = 0; i < NUM_THREADS; i++)
+    {
+        threads.emplace_back([&, i]
+                             {
+            BufferQueue::ProducerToken producerToken = queue->createProducerToken();
+            BufferQueue::ConsumerToken consumerToken = queue->createConsumerToken();
+            std::random_device rd;
+            std::mt19937 gen(rd());
+            std::uniform_int_distribution<> dis(0, 1);  // 0 for enqueue, 1 for dequeue
+
+            for (int j = 0; j < OPS_PER_THREAD; j++) {
+                int id = i * OPS_PER_THREAD + j;
+
+                if (dis(gen) == 0 || totalDequeued.load() >= totalEnqueued.load()) {
+                    // Enqueue
+                    QueueItem item = createTestItem(id);
+                    if (queue->enqueueBlocking(item, producerToken, std::chrono::milliseconds(100)))
+                    {
+                        totalEnqueued++;
+                    }
+                } else {
+                    // Dequeue
+                    QueueItem item;
+                    if (queue->tryDequeue(item, consumerToken)) {
+                        totalDequeued++;
+                    }
+                }
+            } });
+    }
+
+    // Wait for all threads
+    for (auto &t : threads)
+    {
+        t.join();
+    }
+
+    // Verify that enqueued >= dequeued and size matches the difference
+    EXPECT_GE(totalEnqueued.load(), totalDequeued.load());
+    EXPECT_EQ(queue->size(), totalEnqueued.load() - totalDequeued.load());
+
+    // Dequeue remaining entries
+    BufferQueue::ConsumerToken consumerToken = queue->createConsumerToken();
+    QueueItem item;
+    while (queue->tryDequeue(item, consumerToken))
+    {
+        totalDequeued++;
+    }
+
+    // Final verification
+    EXPECT_EQ(totalEnqueued.load(), totalDequeued.load());
+    EXPECT_EQ(queue->size(), 0);
+}
+
+// Test timed operations
+class BufferQueueTimingTest : public ::testing::Test
+{
+protected:
+    void SetUp() override
+    {
+        // Create a new queue for each test
+        queue = std::make_unique<BufferQueue>(QUEUE_CAPACITY, 1);
+    }
+
+    void TearDown() override
+    {
+        queue.reset();
+    }
+
+    // Helper to create a test queue item with log entry
+    QueueItem createTestItem(int id)
+    {
+        QueueItem item;
+        item.entry = LogEntry(
+            LogEntry::ActionType::READ,
+            "data/location/" + std::to_string(id),
+            "controller" + std::to_string(id),
+            "processor" + std::to_string(id),
+            "subject" + std::to_string(id % 10));
+        return item;
+    }
+
+    const size_t QUEUE_CAPACITY = 1024;
+    std::unique_ptr<BufferQueue> queue;
+};
+
+// Test flush timeout
+TEST_F(BufferQueueTimingTest, FlushWithTimeout)
+{
+    BufferQueue::ProducerToken producerToken = queue->createProducerToken();
+    BufferQueue::ConsumerToken consumerToken = queue->createConsumerToken();
+    // Enqueue some items
+    for (int i = 0; i < 10; i++)
+    {
+        QueueItem item;
+        item.entry = LogEntry(
+            LogEntry::ActionType::READ,
+            "data/location/" + std::to_string(i),
+            "controller",
+            "processor",
+            "subject");
+        queue->enqueueBlocking(item, producerToken, std::chrono::milliseconds(100));
+    }
+
+    // Start a future to call flush
+    auto future = std::async(std::launch::async, [&]
+                             { return queue->flush(); });
+
+    // Start a thread to dequeue after a delay
+    std::thread consumer([&]
+                         {
+        std::this_thread::sleep_for(std::chrono::milliseconds(100));
+
+        std::vector<QueueItem> items;
+        queue->tryDequeueBatch(items, 10, consumerToken); });
+
+    // Flush should complete when queue is emptied
+    auto status = future.wait_for(std::chrono::milliseconds(750));
+    EXPECT_EQ(status, std::future_status::ready);
+    EXPECT_TRUE(future.get());
+
+    consumer.join();
+}
+
+int main(int argc, char **argv)
+{
+    ::testing::InitGoogleTest(&argc, argv);
+    return RUN_ALL_TESTS();
+}
\ No newline at end of file
diff --git a/archive/2025/summer/bsc_karidas/tests/unit/test_Compression.cpp b/archive/2025/summer/bsc_karidas/tests/unit/test_Compression.cpp
new file mode 100644
index 000000000..d81718d6e
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/tests/unit/test_Compression.cpp
@@ -0,0 +1,133 @@
+#include <gtest/gtest.h>
+#include "Compression.hpp"
+#include "LogEntry.hpp"
+#include <vector>
+#include <string>
+#include <algorithm>
+
+class CompressionTest : public ::testing::Test
+{
+protected:
+    void SetUp() override
+    {
+        // Create a few sample log entries for testing
+        entry1 = LogEntry(LogEntry::ActionType::CREATE, "/data/records/1", "controller123", "processor123", "subject456");
+        entry2 = LogEntry(LogEntry::ActionType::READ, "/data/records/2", "controller789", "processor789", "subject456");
+        entry3 = LogEntry(LogEntry::ActionType::UPDATE, "/data/records/3", "controller123", "processor123", "subject789");
+        entry4 = LogEntry(LogEntry::ActionType::DELETE, "/data/records/4", "controller789", "processor789", "subject123");
+    }
+
+    LogEntry entry1, entry2, entry3, entry4;
+};
+
+// Helper function to compare two LogEntry objects
+bool LogEntriesEqual(const LogEntry &a, const LogEntry &b)
+{
+    // Compare serialized representations to check equality
+    auto serializedA = a.serialize();
+    auto serializedB = b.serialize();
+
+    return serializedA == serializedB;
+}
+
+// Test compressing and decompressing a batch of log entries
+TEST_F(CompressionTest, CompressDecompressBatch)
+{
+    std::vector<LogEntry> batch = {entry1, entry2, entry3, entry4};
+    std::vector<uint8_t> serializedBatch = LogEntry::serializeBatch(std::move(batch));
+    std::vector<uint8_t> compressed = Compression::compress(std::move(serializedBatch));
+
+    // Make sure compression produced data
+    ASSERT_GT(compressed.size(), 0);
+
+    std::vector<uint8_t> decompressed = Compression::decompress(std::move(compressed));
+    std::vector<LogEntry> recoveredBatch = LogEntry::deserializeBatch(std::move(decompressed));
+
+    // Verify we got back the same number of entries
+    ASSERT_EQ(batch.size(), recoveredBatch.size());
+
+    // Verify each entry matches
+    for (size_t i = 0; i < batch.size(); i++)
+    {
+        EXPECT_TRUE(LogEntriesEqual(batch[i], recoveredBatch[i]))
+            << "Entries at index " << i << " don't match";
+    }
+}
+
+// Test with an empty batch
+TEST_F(CompressionTest, EmptyBatch)
+{
+    // Create an empty batch
+    std::vector<LogEntry> emptyBatch;
+    std::vector<uint8_t> serializedBatch = LogEntry::serializeBatch(std::move(emptyBatch));
+    std::vector<uint8_t> compressed = Compression::compress(std::move(serializedBatch));
+
+    std::vector<uint8_t> decompressed = Compression::decompress(std::move(compressed));
+    std::vector<LogEntry> recoveredBatch = LogEntry::deserializeBatch(std::move(decompressed));
+
+    // Verify we still have an empty vector
+    EXPECT_TRUE(recoveredBatch.empty());
+}
+
+// Test with invalid compressed data
+TEST_F(CompressionTest, InvalidCompressedData)
+{
+    // Create some invalid compressed data
+    std::vector<uint8_t> invalidData = {0x01, 0x02, 0x03, 0x04};
+
+    // Verify that decompression failed
+    EXPECT_THROW(
+        Compression::decompress(std::move(invalidData)),
+        std::runtime_error);
+}
+
+// Test batch compression ratio
+TEST_F(CompressionTest, BatchCompressionRatio)
+{
+    // Create a batch of log entries with repetitive data which should compress well
+    const int batchSize = 50;
+    std::string repetitiveData(1000, 'X');
+    LogEntry repetitiveEntry(LogEntry::ActionType::CREATE, repetitiveData, repetitiveData, repetitiveData, repetitiveData);
+
+    std::vector<LogEntry> repetitiveBatch(batchSize, repetitiveEntry);
+    std::vector<uint8_t> serializedBatch = LogEntry::serializeBatch(std::move(repetitiveBatch));
+    std::vector<uint8_t> compressed = Compression::compress(std::move(serializedBatch));
+
+    // Check that batch compression significantly reduced the size
+    double compressionRatio = static_cast<double>(compressed.size()) / static_cast<double>(serializedBatch.size());
+    EXPECT_LT(compressionRatio, 0.05); // Expect at least 95% compression for batch
+
+    std::vector<uint8_t> decompressed = Compression::decompress(std::move(compressed));
+    std::vector<LogEntry> recoveredBatch = LogEntry::deserializeBatch(std::move(decompressed));
+    // Verify the correct number of entries and their content
+    ASSERT_EQ(repetitiveBatch.size(), recoveredBatch.size());
+    for (size_t i = 0; i < repetitiveBatch.size(); i++)
+    {
+        EXPECT_TRUE(LogEntriesEqual(repetitiveBatch[i], recoveredBatch[i]));
+    }
+}
+
+// Test with a large batch of entries
+TEST_F(CompressionTest, LargeBatch)
+{
+    std::vector<LogEntry> largeBatch(100, entry1);
+    std::vector<uint8_t> serializedBatch = LogEntry::serializeBatch(std::move(largeBatch));
+    std::vector<uint8_t> compressed = Compression::compress(std::move(serializedBatch));
+    std::vector<uint8_t> decompressed = Compression::decompress(std::move(compressed));
+    std::vector<LogEntry> recoveredBatch = LogEntry::deserializeBatch(std::move(decompressed));
+
+    // Verify the correct number of entries
+    ASSERT_EQ(largeBatch.size(), recoveredBatch.size());
+
+    // Verify the entries match
+    for (size_t i = 0; i < largeBatch.size(); i++)
+    {
+        EXPECT_TRUE(LogEntriesEqual(largeBatch[i], recoveredBatch[i]));
+    }
+}
+
+int main(int argc, char **argv)
+{
+    ::testing::InitGoogleTest(&argc, argv);
+    return RUN_ALL_TESTS();
+}
\ No newline at end of file
diff --git a/archive/2025/summer/bsc_karidas/tests/unit/test_Crypto.cpp b/archive/2025/summer/bsc_karidas/tests/unit/test_Crypto.cpp
new file mode 100644
index 000000000..1dc30da93
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/tests/unit/test_Crypto.cpp
@@ -0,0 +1,328 @@
+#include <gtest/gtest.h>
+#include "Crypto.hpp"
+#include <string>
+#include <vector>
+#include <algorithm>
+
+class CryptoTest : public ::testing::Test
+{
+protected:
+    Crypto crypto;
+
+    // Helper method to create a random key of proper size
+    std::vector<uint8_t> createRandomKey()
+    {
+        std::vector<uint8_t> key(Crypto::KEY_SIZE);
+        for (size_t i = 0; i < key.size(); ++i)
+        {
+            key[i] = static_cast<uint8_t>(rand() % 256);
+        }
+        return key;
+    }
+
+    // Helper method to create a dummy IV
+    std::vector<uint8_t> createDummyIV()
+    {
+        return std::vector<uint8_t>(Crypto::GCM_IV_SIZE, 0x24);
+    }
+
+    // Helper method to convert string to byte vector
+    std::vector<uint8_t> stringToBytes(const std::string &str)
+    {
+        return std::vector<uint8_t>(str.begin(), str.end());
+    }
+
+    // Helper method to convert byte vector to string
+    std::string bytesToString(const std::vector<uint8_t> &bytes)
+    {
+        return std::string(bytes.begin(), bytes.end());
+    }
+
+    void SetUp() override
+    {
+        // Seed random number generator for consistent test results
+        srand(42);
+    }
+};
+
+// Test empty data encryption and decryption
+TEST_F(CryptoTest, EmptyData)
+{
+    std::vector<uint8_t> emptyData;
+    std::vector<uint8_t> key = createRandomKey();
+    std::vector<uint8_t> iv = createDummyIV();
+
+    // Encrypt empty data
+    std::vector<uint8_t> encrypted = crypto.encrypt(std::move(emptyData), key, iv);
+    EXPECT_TRUE(encrypted.empty());
+
+    // Decrypt empty data
+    std::vector<uint8_t> decrypted = crypto.decrypt(encrypted, key, iv);
+    EXPECT_TRUE(decrypted.empty());
+}
+
+// Test basic encryption and decryption
+TEST_F(CryptoTest, BasicEncryptDecrypt)
+{
+    std::string testMessage = "This is a test message for encryption";
+    std::vector<uint8_t> data = stringToBytes(testMessage);
+    std::vector<uint8_t> key = createRandomKey();
+    std::vector<uint8_t> iv = createDummyIV();
+
+    // Encrypt the data
+    std::vector<uint8_t> encrypted = crypto.encrypt(std::move(data), key, iv);
+    EXPECT_FALSE(encrypted.empty());
+
+    // The encrypted data should be different from the original
+    EXPECT_NE(data, encrypted);
+
+    // Decrypt the data
+    std::vector<uint8_t> decrypted = crypto.decrypt(encrypted, key, iv);
+
+    // The decrypted data should match the original
+    EXPECT_EQ(data, decrypted);
+    EXPECT_EQ(testMessage, bytesToString(decrypted));
+}
+
+// Test encryption with various data sizes
+TEST_F(CryptoTest, VariousDataSizes)
+{
+    std::vector<size_t> sizes = {10, 100, 1000, 10000};
+    std::vector<uint8_t> key = createRandomKey();
+    std::vector<uint8_t> iv = createDummyIV();
+
+    for (size_t size : sizes)
+    {
+        // Create data of specified size
+        std::vector<uint8_t> data(size);
+        for (size_t i = 0; i < size; ++i)
+        {
+            data[i] = static_cast<uint8_t>(i % 256);
+        }
+
+        // Encrypt the data
+        std::vector<uint8_t> encrypted = crypto.encrypt(std::move(data), key, iv);
+        EXPECT_FALSE(encrypted.empty());
+
+        // Decrypt the data
+        std::vector<uint8_t> decrypted = crypto.decrypt(encrypted, key, iv);
+
+        // The decrypted data should match the original
+        EXPECT_EQ(data, decrypted);
+    }
+}
+
+// Test encryption with invalid key size
+TEST_F(CryptoTest, InvalidKeySize)
+{
+    std::string testMessage = "Testing invalid key size";
+    std::vector<uint8_t> data = stringToBytes(testMessage);
+    std::vector<uint8_t> iv = createDummyIV();
+
+    // Create keys with invalid sizes
+    std::vector<uint8_t> shortKey(16); // Too short
+    std::vector<uint8_t> longKey(64);  // Too long
+
+    // Encryption with short key should throw
+    EXPECT_THROW(crypto.encrypt(std::move(data), shortKey, iv), std::runtime_error);
+
+    // Encryption with long key should throw
+    EXPECT_THROW(crypto.encrypt(std::move(data), longKey, iv), std::runtime_error);
+}
+
+// Test encryption with invalid IV size
+TEST_F(CryptoTest, InvalidIVSize)
+{
+    std::string testMessage = "Testing invalid IV size";
+    std::vector<uint8_t> data = stringToBytes(testMessage);
+    std::vector<uint8_t> key = createRandomKey();
+
+    // Create IVs with invalid sizes
+    std::vector<uint8_t> shortIV(8); // Too short
+    std::vector<uint8_t> longIV(16); // Too long
+
+    // Encryption with short IV should throw
+    EXPECT_THROW(crypto.encrypt(std::move(data), key, shortIV), std::runtime_error);
+
+    // Encryption with long IV should throw
+    EXPECT_THROW(crypto.encrypt(std::move(data), key, longIV), std::runtime_error);
+}
+
+// Test decryption with wrong key
+TEST_F(CryptoTest, WrongKey)
+{
+    std::string testMessage = "This should not decrypt correctly with wrong key";
+    std::vector<uint8_t> data = stringToBytes(testMessage);
+    std::vector<uint8_t> iv = createDummyIV();
+
+    // Create two different keys
+    std::vector<uint8_t> correctKey = createRandomKey();
+    std::vector<uint8_t> wrongKey = createRandomKey();
+
+    // Make sure the keys are different
+    ASSERT_NE(correctKey, wrongKey);
+
+    // Encrypt with the correct key
+    std::vector<uint8_t> encrypted = crypto.encrypt(std::move(data), correctKey, iv);
+
+    // Attempt to decrypt with the wrong key
+    std::vector<uint8_t> decrypted = crypto.decrypt(encrypted, wrongKey, iv);
+
+    // The decryption should fail (return empty vector) or the result should be different
+    // from the original data
+    EXPECT_TRUE(decrypted.empty() || decrypted != data);
+}
+
+// Test decryption with wrong IV
+TEST_F(CryptoTest, WrongIV)
+{
+    std::string testMessage = "This should not decrypt correctly with wrong IV";
+    std::vector<uint8_t> data = stringToBytes(testMessage);
+    std::vector<uint8_t> key = createRandomKey();
+
+    // Create two different IVs
+    std::vector<uint8_t> correctIV = createDummyIV();
+    std::vector<uint8_t> wrongIV(Crypto::GCM_IV_SIZE, 0x42); // Different value
+
+    // Make sure the IVs are different
+    ASSERT_NE(correctIV, wrongIV);
+
+    // Encrypt with the correct IV
+    std::vector<uint8_t> encrypted = crypto.encrypt(std::move(data), key, correctIV);
+
+    // Attempt to decrypt with the wrong IV
+    std::vector<uint8_t> decrypted = crypto.decrypt(encrypted, key, wrongIV);
+
+    // The decryption should fail (return empty vector) or the result should be different
+    // from the original data
+    EXPECT_TRUE(decrypted.empty() || decrypted != data);
+}
+
+// Test tampering detection
+TEST_F(CryptoTest, TamperingDetection)
+{
+    std::string testMessage = "This message should be protected against tampering";
+    std::vector<uint8_t> data = stringToBytes(testMessage);
+    std::vector<uint8_t> key = createRandomKey();
+    std::vector<uint8_t> iv = createDummyIV();
+
+    // Encrypt the data
+    std::vector<uint8_t> encrypted = crypto.encrypt(std::move(data), key, iv);
+    ASSERT_FALSE(encrypted.empty());
+
+    // Tamper with the encrypted data (modify a byte in the middle)
+    if (encrypted.size() > 20)
+    {
+        encrypted[encrypted.size() / 2] ^= 0xFF; // Flip all bits in one byte
+
+        // Decryption should now fail or produce incorrect results
+        std::vector<uint8_t> decrypted = crypto.decrypt(encrypted, key, iv);
+        EXPECT_TRUE(decrypted.empty() || decrypted != data);
+    }
+}
+
+// Test binary data encryption and decryption
+TEST_F(CryptoTest, BinaryData)
+{
+    // Create binary data with all possible byte values
+    std::vector<uint8_t> binaryData(256);
+    for (int i = 0; i < 256; ++i)
+    {
+        binaryData[i] = static_cast<uint8_t>(i);
+    }
+
+    std::vector<uint8_t> key = createRandomKey();
+    std::vector<uint8_t> iv = createDummyIV();
+
+    // Encrypt the binary data
+    std::vector<uint8_t> encrypted = crypto.encrypt(std::move(binaryData), key, iv);
+    EXPECT_FALSE(encrypted.empty());
+
+    // Decrypt the data
+    std::vector<uint8_t> decrypted = crypto.decrypt(encrypted, key, iv);
+
+    // The decrypted data should match the original
+    EXPECT_EQ(binaryData, decrypted);
+}
+
+// Test large data encryption and decryption
+TEST_F(CryptoTest, LargeData)
+{
+    // Create a large data set (1MB)
+    const size_t size = 1024 * 1024;
+    std::vector<uint8_t> largeData(size);
+    for (size_t i = 0; i < size; ++i)
+    {
+        largeData[i] = static_cast<uint8_t>(i % 256);
+    }
+
+    std::vector<uint8_t> key = createRandomKey();
+    std::vector<uint8_t> iv = createDummyIV();
+
+    // Encrypt the large data
+    std::vector<uint8_t> encrypted = crypto.encrypt(std::move(largeData), key, iv);
+    EXPECT_FALSE(encrypted.empty());
+
+    // Decrypt the data
+    std::vector<uint8_t> decrypted = crypto.decrypt(encrypted, key, iv);
+
+    // The decrypted data should match the original
+    EXPECT_EQ(largeData, decrypted);
+}
+
+// Test encryption and decryption with a fixed key and IV (for reproducibility)
+TEST_F(CryptoTest, FixedKeyAndIV)
+{
+    std::string testMessage = "Testing with fixed key and IV";
+    std::vector<uint8_t> data = stringToBytes(testMessage);
+
+    // Create a fixed key and IV
+    std::vector<uint8_t> fixedKey(Crypto::KEY_SIZE, 0x42);   // Fill with the value 0x42
+    std::vector<uint8_t> fixedIV(Crypto::GCM_IV_SIZE, 0x24); // Fill with the value 0x24
+
+    // Encrypt with the fixed key and IV
+    std::vector<uint8_t> encrypted1 = crypto.encrypt(std::move(data), fixedKey, fixedIV);
+    EXPECT_FALSE(encrypted1.empty());
+
+    // Decrypt with the same key and IV
+    std::vector<uint8_t> decrypted = crypto.decrypt(encrypted1, fixedKey, fixedIV);
+    EXPECT_EQ(data, decrypted);
+
+    // The same data encrypted with the same key and IV should produce the same ciphertexts
+    // unlike the previous version with random IVs
+    std::vector<uint8_t> encrypted2 = crypto.encrypt(std::move(data), fixedKey, fixedIV);
+    EXPECT_EQ(encrypted1, encrypted2); // This test should now PASS with fixed IV
+}
+
+// Test that different IVs produce different ciphertexts
+TEST_F(CryptoTest, DifferentIVs)
+{
+    std::string testMessage = "Testing with different IVs";
+    std::vector<uint8_t> data = stringToBytes(testMessage);
+    std::vector<uint8_t> key = createRandomKey();
+
+    // Create two different IVs
+    std::vector<uint8_t> iv1(Crypto::GCM_IV_SIZE, 0x24);
+    std::vector<uint8_t> iv2(Crypto::GCM_IV_SIZE, 0x42);
+
+    // Encrypt with different IVs
+    std::vector<uint8_t> encrypted1 = crypto.encrypt(std::move(data), key, iv1);
+    std::vector<uint8_t> encrypted2 = crypto.encrypt(std::move(data), key, iv2);
+
+    // The ciphertexts should be different
+    EXPECT_NE(encrypted1, encrypted2);
+
+    // But both should decrypt correctly with their respective IVs
+    std::vector<uint8_t> decrypted1 = crypto.decrypt(encrypted1, key, iv1);
+    std::vector<uint8_t> decrypted2 = crypto.decrypt(encrypted2, key, iv2);
+
+    EXPECT_EQ(data, decrypted1);
+    EXPECT_EQ(data, decrypted2);
+}
+
+// Main function that runs all the tests
+int main(int argc, char **argv)
+{
+    ::testing::InitGoogleTest(&argc, argv);
+    return RUN_ALL_TESTS();
+}
\ No newline at end of file
diff --git a/archive/2025/summer/bsc_karidas/tests/unit/test_LogEntry.cpp b/archive/2025/summer/bsc_karidas/tests/unit/test_LogEntry.cpp
new file mode 100644
index 000000000..b3fafea8a
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/tests/unit/test_LogEntry.cpp
@@ -0,0 +1,196 @@
+#include <gtest/gtest.h>
+#include "LogEntry.hpp"
+#include <vector>
+#include <iostream>
+#include <chrono>
+
+// Test default constructor
+TEST(LogEntryTest1, DefaultConstructor_InitializesCorrectly)
+{
+    LogEntry entry;
+
+    EXPECT_EQ(entry.getActionType(), LogEntry::ActionType::CREATE);
+    EXPECT_EQ(entry.getDataLocation(), "");
+    EXPECT_EQ(entry.getDataControllerId(), "");
+    EXPECT_EQ(entry.getDataProcessorId(), "");
+    EXPECT_EQ(entry.getDataSubjectId(), "");
+    EXPECT_EQ(entry.getPayload().size(), 0);
+
+    auto now = std::chrono::system_clock::now();
+    EXPECT_NEAR(std::chrono::system_clock::to_time_t(entry.getTimestamp()),
+                std::chrono::system_clock::to_time_t(now), 1);
+}
+
+// Test parameterized constructor
+TEST(LogEntryTest2, ParameterizedConstructor_SetsFieldsCorrectly)
+{
+    std::vector<uint8_t> testPayload(128, 0xAA); // 128 bytes of 0xAA
+    LogEntry entry(LogEntry::ActionType::UPDATE, "database/users", "controller123", "processor789", "subject456", testPayload);
+
+    EXPECT_EQ(entry.getActionType(), LogEntry::ActionType::UPDATE);
+    EXPECT_EQ(entry.getDataLocation(), "database/users");
+    EXPECT_EQ(entry.getDataControllerId(), "controller123");
+    EXPECT_EQ(entry.getDataProcessorId(), "processor789");
+    EXPECT_EQ(entry.getDataSubjectId(), "subject456");
+    EXPECT_EQ(entry.getPayload().size(), testPayload.size());
+
+    // Check content matches
+    const auto &payload = entry.getPayload();
+    bool contentMatches = true;
+    for (size_t i = 0; i < payload.size(); ++i)
+    {
+        if (payload[i] != testPayload[i])
+        {
+            contentMatches = false;
+            break;
+        }
+    }
+    EXPECT_TRUE(contentMatches);
+
+    auto now = std::chrono::system_clock::now();
+    EXPECT_NEAR(std::chrono::system_clock::to_time_t(entry.getTimestamp()),
+                std::chrono::system_clock::to_time_t(now), 1);
+}
+
+// Test serialization and deserialization with empty payload
+TEST(LogEntryTest4, SerializationDeserialization_WorksCorrectly)
+{
+    LogEntry entry(LogEntry::ActionType::READ, "storage/files", "controllerABC", "processorDEF", "subjectXYZ");
+
+    std::vector<uint8_t> serializedData = entry.serialize();
+    LogEntry newEntry;
+    bool success = newEntry.deserialize(std::move(serializedData));
+
+    EXPECT_TRUE(success);
+    EXPECT_EQ(newEntry.getActionType(), LogEntry::ActionType::READ);
+    EXPECT_EQ(newEntry.getDataLocation(), "storage/files");
+    EXPECT_EQ(newEntry.getDataControllerId(), "controllerABC");
+    EXPECT_EQ(newEntry.getDataProcessorId(), "processorDEF");
+    EXPECT_EQ(newEntry.getDataSubjectId(), "subjectXYZ");
+    EXPECT_EQ(newEntry.getPayload().size(), 0); // Payload should still be empty
+
+    std::vector<uint8_t> serializedData2 = entry.serialize();
+    success = newEntry.deserialize(std::move(serializedData2));
+
+    EXPECT_TRUE(success);
+
+    EXPECT_EQ(newEntry.getActionType(), LogEntry::ActionType::READ);
+    EXPECT_EQ(newEntry.getDataLocation(), "storage/files");
+    EXPECT_EQ(newEntry.getDataControllerId(), "controllerABC");
+    EXPECT_EQ(newEntry.getDataSubjectId(), "subjectXYZ");
+    EXPECT_NEAR(std::chrono::system_clock::to_time_t(newEntry.getTimestamp()),
+                std::chrono::system_clock::to_time_t(entry.getTimestamp()), 1);
+}
+
+// Test serialization and deserialization with payload
+TEST(LogEntryTest4A, SerializationDeserializationWithPayload_WorksCorrectly)
+{
+    // Create test payload
+    std::vector<uint8_t> testPayload(64);
+    for (size_t i = 0; i < testPayload.size(); ++i)
+    {
+        testPayload[i] = static_cast<uint8_t>(i & 0xFF);
+    }
+
+    LogEntry entry(LogEntry::ActionType::READ, "storage/files", "controllerABC", "processorDEF", "subjectXYZ", testPayload);
+
+    // Serialize and deserialize
+    std::vector<uint8_t> serializedData = entry.serialize();
+    LogEntry newEntry;
+    bool success = newEntry.deserialize(std::move(serializedData));
+
+    // Verify deserialization worked
+    EXPECT_TRUE(success);
+    EXPECT_EQ(newEntry.getActionType(), LogEntry::ActionType::READ);
+    EXPECT_EQ(newEntry.getDataLocation(), "storage/files");
+    EXPECT_EQ(newEntry.getDataControllerId(), "controllerABC");
+    EXPECT_EQ(newEntry.getDataProcessorId(), "processorDEF");
+    EXPECT_EQ(newEntry.getDataSubjectId(), "subjectXYZ");
+
+    // Verify payload
+    EXPECT_EQ(newEntry.getPayload().size(), testPayload.size());
+
+    // Check payload content
+    const auto &recoveredPayload = newEntry.getPayload();
+    bool payloadMatches = true;
+    for (size_t i = 0; i < testPayload.size(); ++i)
+    {
+        if (recoveredPayload[i] != testPayload[i])
+        {
+            payloadMatches = false;
+            break;
+        }
+    }
+    EXPECT_TRUE(payloadMatches);
+}
+
+// Test batch serialization and deserialization with payloads
+TEST(LogEntryTest5, BatchSerializationDeserialization_WorksCorrectly)
+{
+    // Create a batch of log entries
+    std::vector<LogEntry> originalEntries;
+
+    // Entry with no payload
+    originalEntries.push_back(LogEntry(LogEntry::ActionType::CREATE, "db/users", "controller1", "processor1", "subject1"));
+
+    // Entry with small payload
+    std::vector<uint8_t> payload2(16, 0x22); // 16 bytes of 0x22
+    originalEntries.push_back(LogEntry(LogEntry::ActionType::READ, "files/documents", "controller2", "processor2", "subject2", payload2));
+
+    // Entry with medium payload
+    std::vector<uint8_t> payload3(128, 0x33); // 128 bytes of 0x33
+    originalEntries.push_back(LogEntry(LogEntry::ActionType::UPDATE, "cache/profiles", "controller3", "processor3", "subject3", payload3));
+
+    // Entry with large payload
+    std::vector<uint8_t> payload4(1024, 0x44); // 1024 bytes of 0x44
+    originalEntries.push_back(LogEntry(LogEntry::ActionType::DELETE, "archive/logs", "controller4", "processor4", "subject4", payload4));
+
+    // Serialize the batch
+    std::vector<uint8_t> batchData = LogEntry::serializeBatch(std::move(originalEntries));
+
+    // Check that the batch has reasonable size
+    EXPECT_GT(batchData.size(), sizeof(uint32_t)); // At least space for entry count
+
+    // Deserialize the batch
+    std::vector<LogEntry> recoveredEntries = LogEntry::deserializeBatch(std::move(batchData));
+
+    // Verify the number of entries
+    EXPECT_EQ(recoveredEntries.size(), originalEntries.size());
+
+    // Verify each entry's data
+    for (size_t i = 0; i < originalEntries.size() && i < recoveredEntries.size(); ++i)
+    {
+        EXPECT_EQ(recoveredEntries[i].getActionType(), originalEntries[i].getActionType());
+        EXPECT_EQ(recoveredEntries[i].getDataLocation(), originalEntries[i].getDataLocation());
+        EXPECT_EQ(recoveredEntries[i].getDataControllerId(), originalEntries[i].getDataControllerId());
+        EXPECT_EQ(recoveredEntries[i].getDataProcessorId(), originalEntries[i].getDataProcessorId());
+        EXPECT_EQ(recoveredEntries[i].getDataSubjectId(), originalEntries[i].getDataSubjectId());
+
+        // Verify payload size
+        EXPECT_EQ(recoveredEntries[i].getPayload().size(), originalEntries[i].getPayload().size());
+
+        // Check payload content if it's not empty
+        if (!originalEntries[i].getPayload().empty())
+        {
+            const auto &originalPayload = originalEntries[i].getPayload();
+            const auto &recoveredPayload = recoveredEntries[i].getPayload();
+
+            bool payloadMatches = true;
+            for (size_t j = 0; j < originalPayload.size(); ++j)
+            {
+                if (recoveredPayload[j] != originalPayload[j])
+                {
+                    payloadMatches = false;
+                    break;
+                }
+            }
+            EXPECT_TRUE(payloadMatches) << "Payload mismatch at entry " << i;
+        }
+
+        // Compare timestamps (allowing 1 second difference for potential precision issues)
+        EXPECT_NEAR(
+            std::chrono::system_clock::to_time_t(recoveredEntries[i].getTimestamp()),
+            std::chrono::system_clock::to_time_t(originalEntries[i].getTimestamp()),
+            1);
+    }
+}
\ No newline at end of file
diff --git a/archive/2025/summer/bsc_karidas/tests/unit/test_Logger.cpp b/archive/2025/summer/bsc_karidas/tests/unit/test_Logger.cpp
new file mode 100644
index 000000000..1cc147836
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/tests/unit/test_Logger.cpp
@@ -0,0 +1,294 @@
+#include <gtest/gtest.h>
+#include "Logger.hpp"
+#include "BufferQueue.hpp"
+#include <chrono>
+#include <thread>
+
+class LoggerTest : public ::testing::Test
+{
+protected:
+    void SetUp() override
+    {
+        // Create a fresh instance for each test
+        Logger::s_instance.reset();
+
+        // Create a BufferQueue instance
+        queue = std::make_shared<BufferQueue>(1024, 10);
+    }
+
+    void TearDown() override
+    {
+        // Clean up the singleton
+        Logger::s_instance.reset();
+    }
+
+    std::shared_ptr<BufferQueue> queue;
+};
+
+// Test getInstance returns the same instance
+TEST_F(LoggerTest, GetInstanceReturnsSingleton)
+{
+    Logger &instance1 = Logger::getInstance();
+    Logger &instance2 = Logger::getInstance();
+
+    EXPECT_EQ(&instance1, &instance2);
+}
+
+// Test initialization with valid queue
+TEST_F(LoggerTest, InitializeWithValidQueue)
+{
+    Logger &logger = Logger::getInstance();
+
+    EXPECT_TRUE(logger.initialize(queue));
+    EXPECT_TRUE(logger.reset());
+}
+
+// Test initialization with null queue
+TEST_F(LoggerTest, InitializeWithNullQueue)
+{
+    Logger &logger = Logger::getInstance();
+
+    EXPECT_FALSE(logger.initialize(nullptr));
+}
+
+// Test double initialization
+TEST_F(LoggerTest, DoubleInitialization)
+{
+    Logger &logger = Logger::getInstance();
+
+    EXPECT_TRUE(logger.initialize(queue));
+    EXPECT_FALSE(logger.initialize(queue));
+
+    EXPECT_TRUE(logger.reset());
+}
+
+// Test creating producer token
+TEST_F(LoggerTest, CreateProducerToken)
+{
+    Logger &logger = Logger::getInstance();
+
+    // Should throw when not initialized
+    EXPECT_THROW(logger.createProducerToken(), std::runtime_error);
+
+    EXPECT_TRUE(logger.initialize(queue));
+
+    // Should not throw when initialized
+    EXPECT_NO_THROW({
+        BufferQueue::ProducerToken token = logger.createProducerToken();
+    });
+
+    EXPECT_TRUE(logger.reset());
+}
+
+// Test appending log entry after initialization
+TEST_F(LoggerTest, AppendAfterInitialization)
+{
+    Logger &logger = Logger::getInstance();
+    EXPECT_TRUE(logger.initialize(queue));
+
+    BufferQueue::ProducerToken token = logger.createProducerToken();
+    LogEntry entry(LogEntry::ActionType::READ, "location", "controller", "processor", "subject");
+
+    EXPECT_TRUE(logger.append(std::move(entry), token));
+    EXPECT_TRUE(logger.reset());
+}
+
+// Test blocking append with queue eventually emptying
+TEST_F(LoggerTest, BlockingAppendWithConsumption)
+{
+    Logger &logger = Logger::getInstance();
+    auto smallQueue = std::make_shared<BufferQueue>(2, 1);
+    EXPECT_TRUE(logger.initialize(smallQueue, std::chrono::milliseconds(1000)));
+
+    BufferQueue::ProducerToken token = logger.createProducerToken();
+
+    // Since queue grows dynamically, we'll test timeout instead
+    LogEntry entry1(LogEntry::ActionType::READ, "location1", "controller1", "processor1", "subject1");
+    EXPECT_TRUE(logger.append(std::move(entry1), token));
+
+    LogEntry entry2(LogEntry::ActionType::READ, "location2", "controller2", "processor2", "subject2");
+    // With dynamic queue, this will succeed immediately
+    auto start = std::chrono::steady_clock::now();
+    EXPECT_TRUE(logger.append(std::move(entry2), token));
+    auto end = std::chrono::steady_clock::now();
+
+    // Verify it doesn't block since queue can grow
+    auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
+    EXPECT_LT(duration, 100); // Should be very fast
+
+    // Verify both items are in the queue
+    EXPECT_EQ(smallQueue->size(), 2);
+
+    EXPECT_TRUE(logger.reset());
+}
+
+// Test append timeout behavior (new test)
+TEST_F(LoggerTest, AppendTimeoutBehavior)
+{
+    Logger &logger = Logger::getInstance();
+    auto queue = std::make_shared<BufferQueue>(1024, 1);
+
+    // Initialize with a very short timeout
+    EXPECT_TRUE(logger.initialize(queue, std::chrono::milliseconds(50)));
+
+    BufferQueue::ProducerToken token = logger.createProducerToken();
+    LogEntry entry(LogEntry::ActionType::READ, "location", "controller", "processor", "subject");
+
+    auto start = std::chrono::steady_clock::now();
+    EXPECT_TRUE(logger.append(std::move(entry), token)); // Should succeed immediately since queue grows
+    auto end = std::chrono::steady_clock::now();
+
+    auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
+    EXPECT_LT(duration, 10); // Very fast operation
+
+    EXPECT_TRUE(logger.reset());
+}
+
+// Test batch append functionality
+TEST_F(LoggerTest, AppendBatch)
+{
+    Logger &logger = Logger::getInstance();
+    EXPECT_TRUE(logger.initialize(queue));
+
+    BufferQueue::ProducerToken token = logger.createProducerToken();
+
+    std::vector<LogEntry> entries;
+    for (int i = 0; i < 5; i++)
+    {
+        entries.emplace_back(
+            LogEntry::ActionType::READ,
+            "location_" + std::to_string(i),
+            "controller",
+            "processor",
+            "subject_" + std::to_string(i));
+    }
+
+    EXPECT_TRUE(logger.appendBatch(std::move(entries), token));
+    EXPECT_EQ(queue->size(), 5);
+
+    // Test empty batch
+    std::vector<LogEntry> emptyEntries;
+    EXPECT_TRUE(logger.appendBatch(std::move(emptyEntries), token));
+
+    EXPECT_TRUE(logger.reset());
+}
+
+// Test shutdown without initialization
+TEST_F(LoggerTest, ShutdownWithoutInitialization)
+{
+    Logger &logger = Logger::getInstance();
+
+    EXPECT_FALSE(logger.reset());
+}
+
+// Test shutdown with wait for completion
+TEST_F(LoggerTest, ShutdownWithWait)
+{
+    Logger &logger = Logger::getInstance();
+    EXPECT_TRUE(logger.initialize(queue));
+
+    BufferQueue::ProducerToken token = logger.createProducerToken();
+    LogEntry entry(LogEntry::ActionType::READ, "location", "controller", "processor", "subject");
+    EXPECT_TRUE(logger.append(std::move(entry), token));
+
+    // Launch an asynchronous consumer that waits briefly before draining the queue.
+    std::thread consumer([this]()
+                         {
+        std::this_thread::sleep_for(std::chrono::milliseconds(500)); // simulate delay
+        BufferQueue::ConsumerToken consumerToken = queue->createConsumerToken();
+        QueueItem dummyItem;
+        while (queue->tryDequeue(dummyItem, consumerToken))
+        {
+        } });
+
+    EXPECT_TRUE(logger.reset());
+    consumer.join();
+    EXPECT_TRUE(queue->size() == 0);
+}
+
+// Test export logs without initialization
+TEST_F(LoggerTest, ExportLogsWithoutInitialization)
+{
+    Logger &logger = Logger::getInstance();
+
+    auto now = std::chrono::system_clock::now();
+    EXPECT_FALSE(logger.exportLogs("output.log", now, now));
+}
+
+// Test export logs after initialization (unimplemented)
+TEST_F(LoggerTest, ExportLogsAfterInitialization)
+{
+    Logger &logger = Logger::getInstance();
+    EXPECT_TRUE(logger.initialize(queue));
+
+    auto now = std::chrono::system_clock::now();
+    EXPECT_FALSE(logger.exportLogs("output.log", now, now));
+
+    EXPECT_TRUE(logger.reset());
+}
+
+// Test thread safety of singleton
+TEST_F(LoggerTest, ThreadSafetySingleton)
+{
+    std::vector<std::thread> threads;
+    std::vector<Logger *> instances(10);
+
+    for (int i = 0; i < 10; i++)
+    {
+        threads.emplace_back([i, &instances]()
+                             { instances[i] = &Logger::getInstance(); });
+    }
+
+    for (auto &t : threads)
+    {
+        t.join();
+    }
+
+    // All threads should get the same instance
+    for (int i = 1; i < 10; i++)
+    {
+        EXPECT_EQ(instances[0], instances[i]);
+    }
+}
+
+// Test thread safety of API operations
+TEST_F(LoggerTest, ThreadSafetyOperations)
+{
+    Logger &logger = Logger::getInstance();
+    EXPECT_TRUE(logger.initialize(queue));
+
+    std::vector<std::thread> threads;
+    for (int i = 0; i < 10; i++)
+    {
+        threads.emplace_back([&logger, i]()
+                             {
+                                 // Create producer token for this thread
+                                 BufferQueue::ProducerToken token = logger.createProducerToken();
+
+                                 // Each thread appends 10 entries
+                                 for (int j = 0; j < 10; j++) {
+                                     LogEntry entry(
+                                         LogEntry::ActionType::READ,
+                                         "location_" + std::to_string(i),
+                                         "controller_" + std::to_string(i),
+                                         "processor_" + std::to_string(i),
+                                         "subject_" + std::to_string(j)
+                                        );
+                                     EXPECT_TRUE(logger.append(std::move(entry), token));
+                                 } });
+    }
+
+    for (auto &t : threads)
+    {
+        t.join();
+    }
+
+    EXPECT_EQ(queue->size(), 100);
+}
+
+// Main function that runs all the tests
+int main(int argc, char **argv)
+{
+    ::testing::InitGoogleTest(&argc, argv);
+    return RUN_ALL_TESTS();
+}
\ No newline at end of file
diff --git a/archive/2025/summer/bsc_karidas/tests/unit/test_SegmentedStorage.cpp b/archive/2025/summer/bsc_karidas/tests/unit/test_SegmentedStorage.cpp
new file mode 100644
index 000000000..daa8c6da9
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/tests/unit/test_SegmentedStorage.cpp
@@ -0,0 +1,637 @@
+#include <gtest/gtest.h>
+#include "SegmentedStorage.hpp"
+#include <thread>
+#include <vector>
+#include <fstream>
+#include <filesystem>
+#include <cstring>
+#include <random>
+#include <algorithm>
+#include <chrono>
+
+class SegmentedStorageTest : public ::testing::Test
+{
+protected:
+    std::string testPath;
+    std::string baseFilename;
+
+    void SetUp() override
+    {
+        testPath = "./test_storage_";
+        baseFilename = "test_file";
+
+        if (std::filesystem::exists(testPath))
+        {
+            std::filesystem::remove_all(testPath);
+        }
+    }
+
+    std::string getCurrentTimestamp()
+    {
+        auto now = std::chrono::system_clock::now();
+        auto now_time_t = std::chrono::system_clock::to_time_t(now);
+        std::stringstream ss;
+        ss << std::put_time(std::localtime(&now_time_t), "%Y%m%d%H%M%S");
+        return ss.str();
+    }
+
+    std::vector<uint8_t> generateRandomData(size_t size)
+    {
+        std::vector<uint8_t> data(size);
+        std::random_device rd;
+        std::mt19937 gen(rd());
+        std::uniform_int_distribution<> distrib(0, 255);
+
+        std::generate(data.begin(), data.end(), [&]()
+                      { return distrib(gen); });
+        return data;
+    }
+
+    std::vector<std::string> getSegmentFiles(const std::string &basePath, const std::string &baseFilename)
+    {
+        std::vector<std::string> files;
+        for (const auto &entry : std::filesystem::directory_iterator(basePath))
+        {
+            std::string filename = entry.path().filename().string();
+            if (filename.find(baseFilename) == 0 && filename.find(".log") != std::string::npos)
+            {
+                files.push_back(entry.path().string());
+            }
+        }
+        std::sort(files.begin(), files.end());
+        return files;
+    }
+
+    size_t getFileSize(const std::string &filepath)
+    {
+        std::ifstream file(filepath, std::ios::binary | std::ios::ate);
+        if (!file.is_open())
+        {
+            return 0;
+        }
+        return file.tellg();
+    }
+
+    std::vector<uint8_t> readFile(const std::string &filepath)
+    {
+        std::ifstream file(filepath, std::ios::binary | std::ios::ate);
+        if (!file.is_open())
+        {
+            return {};
+        }
+
+        size_t fileSize = file.tellg();
+        std::vector<uint8_t> buffer(fileSize);
+
+        file.seekg(0);
+        file.read(reinterpret_cast<char *>(buffer.data()), fileSize);
+
+        return buffer;
+    }
+};
+
+// Test basic writing functionality
+TEST_F(SegmentedStorageTest, BasicWriteTest)
+{
+    SegmentedStorage storage(testPath, baseFilename);
+
+    std::vector<uint8_t> data = {'H', 'e', 'l', 'l', 'o', ',', ' ', 'W', 'o', 'r', 'l', 'd', '!'};
+    // Keep a copy for verification
+    std::vector<uint8_t> dataCopy = data;
+    size_t bytesWritten = storage.write(std::move(data));
+
+    ASSERT_EQ(bytesWritten, dataCopy.size());
+
+    storage.flush();
+
+    auto files = getSegmentFiles(testPath, baseFilename);
+    ASSERT_EQ(files.size(), 1) << "Only one file should be created";
+
+    auto fileContents = readFile(files[0]);
+    ASSERT_EQ(fileContents.size(), dataCopy.size());
+    ASSERT_TRUE(std::equal(dataCopy.begin(), dataCopy.end(), fileContents.begin()));
+}
+
+// Test segment rotation based on size limit
+TEST_F(SegmentedStorageTest, SegmentRotationTest)
+{
+    size_t maxSegmentSize = 1024;
+    SegmentedStorage storage(testPath, baseFilename, maxSegmentSize);
+
+    std::vector<uint8_t> data1 = generateRandomData(maxSegmentSize - 100);
+    std::vector<uint8_t> data1Copy = data1; // Copy for verification
+    size_t bytesWritten1 = storage.write(std::move(data1));
+    ASSERT_EQ(bytesWritten1, data1Copy.size());
+
+    std::vector<uint8_t> data2 = generateRandomData(200);
+    std::vector<uint8_t> data2Copy = data2; // Copy for verification
+    size_t bytesWritten2 = storage.write(std::move(data2));
+    ASSERT_EQ(bytesWritten2, data2Copy.size());
+
+    storage.flush();
+
+    auto files = getSegmentFiles(testPath, baseFilename);
+    ASSERT_EQ(files.size(), 2) << "Two files should be created due to rotation";
+
+    auto file1Contents = readFile(files[0]);
+    ASSERT_EQ(file1Contents.size(), data1Copy.size());
+    ASSERT_TRUE(std::equal(data1Copy.begin(), data1Copy.end(), file1Contents.begin()));
+
+    auto file2Contents = readFile(files[1]);
+    ASSERT_EQ(file2Contents.size(), data2Copy.size());
+    ASSERT_TRUE(std::equal(data2Copy.begin(), data2Copy.end(), file2Contents.begin()));
+}
+
+// Test writing to a specific file
+TEST_F(SegmentedStorageTest, WriteToSpecificFileTest)
+{
+    SegmentedStorage storage(testPath, baseFilename);
+
+    std::string customFilename = "custom_file";
+    std::vector<uint8_t> data = {'C', 'u', 's', 't', 'o', 'm', ' ', 'F', 'i', 'l', 'e'};
+    std::vector<uint8_t> dataCopy = data; // Copy for verification
+    size_t bytesWritten = storage.writeToFile(customFilename, std::move(data));
+    ASSERT_EQ(bytesWritten, dataCopy.size());
+
+    storage.flush();
+
+    auto files = getSegmentFiles(testPath, customFilename);
+    ASSERT_EQ(files.size(), 1) << "One custom file should be created";
+
+    auto fileContents = readFile(files[0]);
+    ASSERT_EQ(fileContents.size(), dataCopy.size());
+    ASSERT_TRUE(std::equal(dataCopy.begin(), dataCopy.end(), fileContents.begin()));
+}
+
+// Test concurrent writing to the same file
+TEST_F(SegmentedStorageTest, ConcurrentWriteTest)
+{
+    SegmentedStorage storage(testPath, baseFilename);
+
+    size_t numThreads = 10;
+    size_t dataSize = 1000;
+    size_t totalSize = numThreads * dataSize;
+
+    std::vector<std::thread> threads;
+    std::vector<std::vector<uint8_t>> dataBlocks;
+
+    for (size_t i = 0; i < numThreads; i++)
+    {
+        dataBlocks.push_back(generateRandomData(dataSize));
+        threads.emplace_back([&storage, &dataBlocks, i]()
+                             { storage.write(std::move(dataBlocks[i])); });
+    }
+
+    for (auto &t : threads)
+    {
+        t.join();
+    }
+
+    storage.flush();
+
+    auto files = getSegmentFiles(testPath, baseFilename);
+    ASSERT_EQ(files.size(), 1) << "Only one file should be created";
+
+    size_t fileSize = getFileSize(files[0]);
+    ASSERT_EQ(fileSize, totalSize) << "File size should match total written data";
+}
+
+// Test concurrent writing with rotation
+TEST_F(SegmentedStorageTest, ConcurrentWriteWithRotationTest)
+{
+    size_t maxSegmentSize = 5000;
+    SegmentedStorage storage(testPath, baseFilename, maxSegmentSize);
+
+    size_t numThreads = 20;
+    size_t dataSize = 1000;
+
+    std::vector<std::thread> threads;
+    std::vector<std::vector<uint8_t>> dataBlocks;
+
+    for (size_t i = 0; i < numThreads; i++)
+    {
+        dataBlocks.push_back(generateRandomData(dataSize));
+        threads.emplace_back([&storage, &dataBlocks, i]()
+                             { storage.write(std::move(dataBlocks[i])); });
+    }
+
+    for (auto &t : threads)
+    {
+        t.join();
+    }
+
+    storage.flush();
+
+    auto files = getSegmentFiles(testPath, baseFilename);
+    ASSERT_GT(files.size(), 1) << "Multiple files should be created due to rotation";
+
+    size_t totalFileSize = 0;
+    for (const auto &file : files)
+    {
+        totalFileSize += getFileSize(file);
+    }
+
+    ASSERT_EQ(totalFileSize, numThreads * dataSize) << "Total file sizes should match total written data";
+}
+
+// Test flush functionality
+TEST_F(SegmentedStorageTest, FlushTest)
+{
+    SegmentedStorage storage(testPath, baseFilename);
+
+    std::vector<uint8_t> data = generateRandomData(1000);
+    std::vector<uint8_t> dataCopy = data; // Copy for verification
+    storage.write(std::move(data));
+
+    storage.flush();
+
+    auto files = getSegmentFiles(testPath, baseFilename);
+    ASSERT_EQ(files.size(), 1);
+
+    auto fileContents = readFile(files[0]);
+    ASSERT_EQ(fileContents.size(), dataCopy.size());
+    ASSERT_TRUE(std::equal(dataCopy.begin(), dataCopy.end(), fileContents.begin()));
+}
+
+// Test multiple segment files with the same base path
+TEST_F(SegmentedStorageTest, MultipleSegmentFilesTest)
+{
+    SegmentedStorage storage(testPath, baseFilename);
+
+    std::string file1 = "file1";
+    std::string file2 = "file2";
+    std::string file3 = "file3";
+
+    std::vector<uint8_t> data1 = {'F', 'i', 'l', 'e', '1'};
+    std::vector<uint8_t> data2 = {'F', 'i', 'l', 'e', '2'};
+    std::vector<uint8_t> data3 = {'F', 'i', 'l', 'e', '3'};
+    std::vector<uint8_t> data1Copy = data1; // Copies for verification
+    std::vector<uint8_t> data2Copy = data2;
+    std::vector<uint8_t> data3Copy = data3;
+
+    storage.writeToFile(file1, std::move(data1));
+    storage.writeToFile(file2, std::move(data2));
+    storage.writeToFile(file3, std::move(data3));
+
+    storage.flush();
+
+    ASSERT_EQ(getSegmentFiles(testPath, file1).size(), 1);
+    ASSERT_EQ(getSegmentFiles(testPath, file2).size(), 1);
+    ASSERT_EQ(getSegmentFiles(testPath, file3).size(), 1);
+
+    auto files1 = getSegmentFiles(testPath, file1);
+    auto files2 = getSegmentFiles(testPath, file2);
+    auto files3 = getSegmentFiles(testPath, file3);
+
+    auto content1 = readFile(files1[0]);
+    auto content2 = readFile(files2[0]);
+    auto content3 = readFile(files3[0]);
+
+    ASSERT_TRUE(std::equal(data1Copy.begin(), data1Copy.end(), content1.begin()));
+    ASSERT_TRUE(std::equal(data2Copy.begin(), data2Copy.end(), content2.begin()));
+    ASSERT_TRUE(std::equal(data3Copy.begin(), data3Copy.end(), content3.begin()));
+}
+
+// Test large files
+TEST_F(SegmentedStorageTest, LargeFileTest)
+{
+    SegmentedStorage storage(testPath, baseFilename);
+
+    size_t dataSize = 5 * 1024 * 1024;
+    std::vector<uint8_t> largeData = generateRandomData(dataSize);
+    size_t bytesWritten = storage.write(std::move(largeData));
+    ASSERT_EQ(bytesWritten, dataSize);
+
+    storage.flush();
+
+    auto files = getSegmentFiles(testPath, baseFilename);
+    ASSERT_EQ(files.size(), 1);
+
+    size_t fileSize = getFileSize(files[0]);
+    ASSERT_EQ(fileSize, dataSize);
+}
+
+// Test destructor closes files properly
+TEST_F(SegmentedStorageTest, DestructorTest)
+{
+    {
+        SegmentedStorage storage(testPath, baseFilename);
+        std::vector<uint8_t> data = {'T', 'e', 's', 't'};
+        std::vector<uint8_t> dataCopy = data; // Copy for verification
+        storage.write(std::move(data));
+        storage.flush();
+    }
+
+    auto files = getSegmentFiles(testPath, baseFilename);
+    ASSERT_EQ(files.size(), 1);
+
+    auto fileContents = readFile(files[0]);
+    ASSERT_EQ(fileContents.size(), 4);
+    ASSERT_EQ(fileContents[0], 'T');
+    ASSERT_EQ(fileContents[1], 'e');
+    ASSERT_EQ(fileContents[2], 's');
+    ASSERT_EQ(fileContents[3], 't');
+}
+
+// Test exact rotation boundary case
+TEST_F(SegmentedStorageTest, ExactRotationBoundaryTest)
+{
+    size_t maxSegmentSize = 1000;
+    SegmentedStorage storage(testPath, baseFilename, maxSegmentSize);
+
+    std::vector<uint8_t> data1 = generateRandomData(maxSegmentSize);
+    std::vector<uint8_t> data1Copy = data1; // Copy for verification
+    size_t bytesWritten1 = storage.write(std::move(data1));
+    ASSERT_EQ(bytesWritten1, data1Copy.size());
+
+    std::vector<uint8_t> data2 = {42};
+    std::vector<uint8_t> data2Copy = data2; // Copy for verification
+    size_t bytesWritten2 = storage.write(std::move(data2));
+    ASSERT_EQ(bytesWritten2, data2Copy.size());
+
+    storage.flush();
+
+    auto files = getSegmentFiles(testPath, baseFilename);
+    ASSERT_EQ(files.size(), 2) << "Two files should be created with exact boundary";
+
+    ASSERT_EQ(getFileSize(files[0]), maxSegmentSize);
+    ASSERT_EQ(getFileSize(files[1]), 1);
+}
+
+// Test concurrent writing with realistic thread count at rotation boundaries
+TEST_F(SegmentedStorageTest, RealisticConcurrencyRotationTest)
+{
+    size_t maxSegmentSize = 1000;
+    SegmentedStorage storage(testPath, baseFilename, maxSegmentSize);
+
+    size_t numThreads = 8;
+    size_t dataSize = 200;
+
+    std::vector<std::thread> threads;
+    std::vector<std::vector<uint8_t>> dataBlocks;
+
+    for (size_t i = 0; i < numThreads; i++)
+    {
+        dataBlocks.push_back(generateRandomData(dataSize));
+        threads.emplace_back([&storage, &dataBlocks, i]()
+                             { storage.write(std::move(dataBlocks[i])); });
+    }
+
+    for (auto &t : threads)
+    {
+        t.join();
+    }
+
+    storage.flush();
+
+    auto files = getSegmentFiles(testPath, baseFilename);
+    ASSERT_GT(files.size(), 1) << "Multiple files should be created due to rotation";
+
+    size_t totalFileSize = 0;
+    for (const auto &file : files)
+    {
+        totalFileSize += getFileSize(file);
+    }
+
+    ASSERT_EQ(totalFileSize, numThreads * dataSize) << "Total file sizes should match total written data";
+}
+
+// Test rotation with realistic thread count writing near segment boundary
+TEST_F(SegmentedStorageTest, RealisticRotationBoundaryTest)
+{
+    size_t maxSegmentSize = 1000;
+    SegmentedStorage storage(testPath, baseFilename, maxSegmentSize);
+
+    size_t numThreads = 6;
+    size_t dataSize = maxSegmentSize - 50;
+
+    std::vector<std::thread> threads;
+    std::vector<std::vector<uint8_t>> dataBlocks;
+
+    for (size_t i = 0; i < numThreads; i++)
+    {
+        dataBlocks.push_back(generateRandomData(dataSize));
+    }
+
+    for (size_t i = 0; i < numThreads; i++)
+    {
+        threads.emplace_back([&storage, &dataBlocks, i]()
+                             { storage.write(std::move(dataBlocks[i])); });
+    }
+
+    for (auto &t : threads)
+    {
+        t.join();
+    }
+
+    storage.flush();
+
+    auto files = getSegmentFiles(testPath, baseFilename);
+    ASSERT_GT(files.size(), 1) << "Multiple files should be created due to rotation";
+
+    size_t totalFileSize = 0;
+    for (const auto &file : files)
+    {
+        totalFileSize += getFileSize(file);
+    }
+
+    ASSERT_EQ(totalFileSize, numThreads * dataSize) << "Total file sizes should match total written data";
+}
+
+// Test writing zero bytes
+TEST_F(SegmentedStorageTest, ZeroByteWriteTest)
+{
+    SegmentedStorage storage(testPath, baseFilename);
+
+    std::vector<uint8_t> emptyData;
+    size_t bytesWritten = storage.write(std::move(emptyData));
+
+    ASSERT_EQ(bytesWritten, 0) << "Zero bytes should be written for empty data";
+
+    storage.flush();
+
+    auto files = getSegmentFiles(testPath, baseFilename);
+    ASSERT_EQ(files.size(), 1) << "One file should still be created";
+    ASSERT_EQ(getFileSize(files[0]), 0) << "File should be empty";
+}
+
+// Test concurrent writes to different files
+TEST_F(SegmentedStorageTest, ConcurrentMultiFileWriteTest)
+{
+    SegmentedStorage storage(testPath, baseFilename);
+
+    size_t numFiles = 10;
+    size_t threadsPerFile = 5;
+    size_t dataSize = 100;
+
+    std::vector<std::thread> threads;
+    std::vector<std::string> filenames;
+    std::vector<std::vector<uint8_t>> dataBlocks;
+
+    for (size_t i = 0; i < numFiles; i++)
+    {
+        filenames.push_back("file_" + std::to_string(i));
+    }
+
+    for (size_t i = 0; i < numFiles * threadsPerFile; i++)
+    {
+        dataBlocks.push_back(generateRandomData(dataSize));
+    }
+
+    for (size_t i = 0; i < numFiles; i++)
+    {
+        for (size_t j = 0; j < threadsPerFile; j++)
+        {
+            size_t dataIndex = i * threadsPerFile + j;
+            threads.emplace_back([&storage, &filenames, &dataBlocks, i, dataIndex]()
+                                 { storage.writeToFile(filenames[i], std::move(dataBlocks[dataIndex])); });
+        }
+    }
+
+    for (auto &t : threads)
+    {
+        t.join();
+    }
+
+    storage.flush();
+
+    for (const auto &filename : filenames)
+    {
+        auto files = getSegmentFiles(testPath, filename);
+        ASSERT_EQ(files.size(), 1) << "One file should be created per filename";
+
+        size_t fileSize = getFileSize(files[0]);
+        ASSERT_EQ(fileSize, threadsPerFile * dataSize) << "Each file should contain data from all its threads";
+    }
+}
+
+// Test rapid succession of writes near rotation boundary
+TEST_F(SegmentedStorageTest, RapidWritesNearRotationTest)
+{
+    size_t maxSegmentSize = 1000;
+    SegmentedStorage storage(testPath, baseFilename, maxSegmentSize);
+
+    std::vector<uint8_t> initialData = generateRandomData(maxSegmentSize - 100);
+    size_t initialSize = initialData.size();
+    storage.write(std::move(initialData));
+
+    size_t numWrites = 20;
+    size_t smallChunkSize = 10;
+
+    std::vector<std::vector<uint8_t>> dataChunks;
+    for (size_t i = 0; i < numWrites; i++)
+    {
+        dataChunks.push_back(generateRandomData(smallChunkSize));
+    }
+
+    for (auto &chunk : dataChunks)
+    {
+        storage.write(std::move(chunk));
+    }
+
+    storage.flush();
+
+    auto files = getSegmentFiles(testPath, baseFilename);
+    ASSERT_GE(files.size(), 2) << "At least two files should be created due to rotation";
+
+    size_t totalFileSize = 0;
+    for (const auto &file : files)
+    {
+        totalFileSize += getFileSize(file);
+    }
+
+    size_t expectedTotalSize = initialSize + (numWrites * smallChunkSize);
+    ASSERT_EQ(totalFileSize, expectedTotalSize) << "Total file sizes should match total written data";
+}
+
+// Test with extremely small segment size to force frequent rotations
+TEST_F(SegmentedStorageTest, FrequentRotationTest)
+{
+    size_t maxSegmentSize = 50;
+    SegmentedStorage storage(testPath, baseFilename, maxSegmentSize);
+
+    size_t numWrites = 20;
+    size_t dataSize = 30;
+
+    std::vector<std::vector<uint8_t>> dataBlocks;
+    for (size_t i = 0; i < numWrites; i++)
+    {
+        dataBlocks.push_back(generateRandomData(dataSize));
+        storage.write(std::move(dataBlocks[i]));
+    }
+
+    storage.flush();
+
+    auto files = getSegmentFiles(testPath, baseFilename);
+    ASSERT_GE(files.size(), numWrites / 2) << "Many files should be created due to frequent rotation";
+
+    size_t totalFileSize = 0;
+    for (const auto &file : files)
+    {
+        totalFileSize += getFileSize(file);
+        ASSERT_LE(getFileSize(file), maxSegmentSize);
+    }
+
+    ASSERT_EQ(totalFileSize, numWrites * dataSize) << "Total file sizes should match total written data";
+}
+
+// Test recovery after write failure
+TEST_F(SegmentedStorageTest, WriteErrorRecoveryTest)
+{
+    SegmentedStorage storage(testPath, baseFilename);
+
+    std::vector<uint8_t> data1 = {'I', 'n', 'i', 't', 'i', 'a', 'l'};
+    std::vector<uint8_t> data1Copy = data1; // Copy for verification
+    storage.write(std::move(data1));
+
+    std::vector<uint8_t> data2 = {'R', 'e', 'c', 'o', 'v', 'e', 'r', 'y'};
+    std::vector<uint8_t> data2Copy = data2; // Copy for verification
+    storage.write(std::move(data2));
+
+    storage.flush();
+
+    auto files = getSegmentFiles(testPath, baseFilename);
+    ASSERT_EQ(files.size(), 1);
+
+    auto fileContents = readFile(files[0]);
+    ASSERT_EQ(fileContents.size(), data1Copy.size() + data2Copy.size());
+
+    for (size_t i = 0; i < data1Copy.size(); i++)
+    {
+        ASSERT_EQ(fileContents[i], data1Copy[i]);
+    }
+
+    for (size_t i = 0; i < data2Copy.size(); i++)
+    {
+        ASSERT_EQ(fileContents[data1Copy.size() + i], data2Copy[i]);
+    }
+}
+
+// Test boundary case for multiple segments
+TEST_F(SegmentedStorageTest, MultiSegmentBoundaryTest)
+{
+    size_t maxSegmentSize = 100;
+    SegmentedStorage storage(testPath, baseFilename, maxSegmentSize);
+
+    for (int i = 0; i < 3; i++)
+    {
+        auto data = generateRandomData(maxSegmentSize);
+        storage.write(std::move(data));
+    }
+
+    storage.flush();
+
+    auto files = getSegmentFiles(testPath, baseFilename);
+    ASSERT_EQ(files.size(), 3) << "Should have exactly 3 segments";
+
+    for (const auto &file : files)
+    {
+        ASSERT_EQ(getFileSize(file), maxSegmentSize);
+    }
+}
+
+int main(int argc, char **argv)
+{
+    ::testing::InitGoogleTest(&argc, argv);
+    return RUN_ALL_TESTS();
+}
\ No newline at end of file
diff --git a/archive/2025/summer/bsc_karidas/tests/unit/test_Writer.cpp b/archive/2025/summer/bsc_karidas/tests/unit/test_Writer.cpp
new file mode 100644
index 000000000..2b555c87e
--- /dev/null
+++ b/archive/2025/summer/bsc_karidas/tests/unit/test_Writer.cpp
@@ -0,0 +1,108 @@
+#include <gtest/gtest.h>
+#include "Writer.hpp"
+#include "BufferQueue.hpp"
+#include "SegmentedStorage.hpp"
+#include <chrono>
+#include <thread>
+#include <filesystem>
+
+class WriterTest : public ::testing::Test
+{
+protected:
+    void SetUp() override
+    {
+        // Create a temporary directory for testing log segments
+        testDir = "test_logs";
+        std::filesystem::create_directories(testDir);
+        queue = std::make_unique<BufferQueue>(8192, 1);
+        // Create a SegmentedStorage instance with small segment size for testing
+        storage = std::make_shared<SegmentedStorage>(
+            testDir,
+            "test_logsegment",
+            1024 * 1024 // max segment size (e.g., 1 MB for test)
+        );
+    }
+
+    void TearDown() override
+    {
+        if (writer)
+        {
+            writer->stop();
+        }
+        // Cleanup test directory if desired
+        std::filesystem::remove_all(testDir);
+    }
+
+    std::unique_ptr<BufferQueue> queue;
+    std::shared_ptr<SegmentedStorage> storage;
+    std::unique_ptr<Writer> writer;
+    std::string testDir;
+};
+
+// Test that the writer starts and stops correctly
+TEST_F(WriterTest, StartAndStop)
+{
+    writer = std::make_unique<Writer>(*queue, storage);
+    EXPECT_FALSE(writer->isRunning());
+
+    writer->start();
+    EXPECT_TRUE(writer->isRunning());
+
+    writer->stop();
+    EXPECT_FALSE(writer->isRunning());
+}
+
+// Test multiple start calls
+TEST_F(WriterTest, MultipleStartCalls)
+{
+    writer = std::make_unique<Writer>(*queue, storage);
+    writer->start();
+    EXPECT_TRUE(writer->isRunning());
+
+    writer->start(); // multiple start calls should not affect the running state
+    EXPECT_TRUE(writer->isRunning());
+
+    writer->stop();
+    EXPECT_FALSE(writer->isRunning());
+}
+
+// Test batch processing with some entries
+TEST_F(WriterTest, ProcessBatchEntries)
+{
+    std::vector<QueueItem> testItems = {
+        QueueItem{LogEntry{LogEntry::ActionType::READ, "location1", "controller1", "processor1", "subject1"}},
+        QueueItem{LogEntry{LogEntry::ActionType::CREATE, "location2", "controller2", "processor2", "subject2"}},
+        QueueItem{LogEntry{LogEntry::ActionType::UPDATE, "location3", "controller3", "processor3", "subject3"}}};
+
+    BufferQueue::ProducerToken producerToken = queue->createProducerToken();
+
+    // Enqueue test entries
+    queue->enqueueBatchBlocking(testItems, producerToken, std::chrono::milliseconds(100));
+
+    // Instantiate writer with a batch size equal to number of test items
+    writer = std::make_unique<Writer>(*queue, storage, testItems.size());
+    writer->start();
+
+    // Give some time for the writer thread to process the entries.
+    std::this_thread::sleep_for(std::chrono::milliseconds(200));
+
+    // Verify that the queue is empty after processing.
+    EXPECT_EQ(queue->size(), 0);
+
+    writer->stop();
+}
+
+// Test behavior when the queue is empty
+TEST_F(WriterTest, EmptyQueue)
+{
+    EXPECT_EQ(queue->size(), 0);
+
+    writer = std::make_unique<Writer>(*queue, storage);
+    writer->start();
+
+    // Give some time to verify it handles empty queue gracefully
+    std::this_thread::sleep_for(std::chrono::milliseconds(200));
+    EXPECT_EQ(queue->size(), 0);
+
+    writer->stop();
+}
\ No newline at end of file