Compression/Decompression#

Canned Mode#

/*******************************************************************************
 * Copyright (C) 2022 Intel Corporation
 *
 * SPDX-License-Identifier: MIT
 ******************************************************************************/

//* [QPL_LOW_LEVEL_CANNED_MODE_EXAMPLE] */

#include <iostream>
#include <memory>
#include <vector>

#include "qpl/qpl.h"

#include "examples_utils.hpp" // for argument parsing function

/**
 * @brief This example requires a command line argument to set the execution path. Valid values are `software_path`
 * and `hardware_path`.
 * In QPL, @ref qpl_path_software (`Software Path`) means that computations will be done with CPU.
 * Accelerator can be used instead of CPU. In this case, @ref qpl_path_hardware (`Hardware Path`) must be specified.
 * If there is no difference where calculations should be done, @ref qpl_path_auto (`Auto Path`) can be used to allow
 * the library to chose the path to execute. The Auto Path usage is not demonstrated by this example.
 *
 * @warning ---! Important !---
 * `Hardware Path` doesn't support all features declared for `Software Path`
 *
 */
constexpr const uint32_t source_size = 1000;

auto main(int argc, char** argv) -> int {
    std::cout << "Intel(R) Query Processing Library version is " << qpl_get_library_version() << ".\n";

    qpl_path_t execution_path = qpl_path_software;

    // Get path from input argument
    const int parse_ret = parse_execution_path(argc, argv, &execution_path);
    if (parse_ret != 0) { return 1; }

    // Get compression buffer size estimate
    const uint32_t compression_size = qpl_get_safe_deflate_compression_buffer_size(source_size);
    if (compression_size == 0) {
        std::cout << "Invalid source size. Source size exceeds the maximum supported size.\n";
        return 1;
    }

    // Source and output containers
    std::vector<uint8_t> source(source_size, 5);
    std::vector<uint8_t> destination(compression_size, 4);
    std::vector<uint8_t> reference(source_size, 7);

    std::unique_ptr<uint8_t[]> job_buffer;
    uint32_t                   size = 0;
    qpl_histogram              deflate_histogram {};

    // Job initialization
    qpl_status status = qpl_get_job_size(execution_path, &size);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during job size getting.\n";
        return 1;
    }

    job_buffer   = std::make_unique<uint8_t[]>(size);
    qpl_job* job = reinterpret_cast<qpl_job*>(job_buffer.get());

    status = qpl_init_job(execution_path, job);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during job initializing.\n";
        return 1;
    }

    // Huffman table initialization
    qpl_huffman_table_t huffman_table = nullptr;

    status = qpl_deflate_huffman_table_create(combined_table_type, execution_path, DEFAULT_ALLOCATOR_C, &huffman_table);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during Huffman table creation.\n";
        return 1;
    }

    // Filling deflate histogram first
    status = qpl_gather_deflate_statistics(source.data(), source_size, &deflate_histogram, qpl_default_level,
                                           execution_path);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during gathering statistics for Huffman table.\n";
        qpl_huffman_table_destroy(huffman_table);
        return 1;
    }

    // Building the Huffman table
    status = qpl_huffman_table_init_with_histogram(huffman_table, &deflate_histogram);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during Huffman table initialization.\n";
        qpl_huffman_table_destroy(huffman_table);
        return 1;
    }

    // Now perform canned mode compression
    job->op            = qpl_op_compress;
    job->level         = qpl_default_level;
    job->next_in_ptr   = source.data();
    job->next_out_ptr  = destination.data();
    job->available_in  = source_size;
    job->available_out = static_cast<uint32_t>(destination.size());
    job->flags         = QPL_FLAG_FIRST | QPL_FLAG_LAST | QPL_FLAG_CANNED_MODE | QPL_FLAG_OMIT_VERIFY;
    job->huffman_table = huffman_table;

    // Compression
    status = qpl_execute_job(job);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during compression.\n";
        qpl_huffman_table_destroy(huffman_table);
        return 1;
    }

    const uint32_t compressed_size = job->total_out;

    // Performing a decompression operation
    job->op            = qpl_op_decompress;
    job->next_in_ptr   = destination.data();
    job->next_out_ptr  = reference.data();
    job->available_in  = compressed_size;
    job->available_out = static_cast<uint32_t>(reference.size());
    job->flags         = QPL_FLAG_FIRST | QPL_FLAG_LAST | QPL_FLAG_CANNED_MODE;
    job->huffman_table = huffman_table;

    // Decompression
    status = qpl_execute_job(job);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during decompression.\n";
        qpl_huffman_table_destroy(huffman_table);
        return 1;
    }

    // Freeing resources
    status = qpl_huffman_table_destroy(huffman_table);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during destroying Huffman table.\n";
        return 1;
    }

    status = qpl_fini_job(job);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during job finalization.\n";
        return 1;
    }

    // Compare reference functions
    for (size_t i = 0; i < source.size(); i++) {
        if (source[i] != reference[i]) {
            std::cout << "Content wasn't successfully compressed and decompressed.\n";
            return 1;
        }
    }

    std::cout << "Content was successfully compressed and decompressed.\n";
    std::cout << "Input size: " << source_size << ", compressed size: " << compressed_size
              << ", compression ratio: " << (float)source_size / (float)compressed_size << ".\n";

    return 0;
}

//* [QPL_LOW_LEVEL_CANNED_MODE_EXAMPLE] */

Canned Mode with Data#

/*******************************************************************************
 * Copyright (C) 2022 Intel Corporation
 *
 * SPDX-License-Identifier: MIT
 ******************************************************************************/

//* [QPL_LOW_LEVEL_CANNED_MODE_EXAMPLE] */

#include <filesystem>
#include <fstream>
#include <iostream>
#include <memory>
#include <vector>

#include "qpl/qpl.h"

#include "examples_utils.hpp" // for argument parsing function

/**
 * @brief This example requires a command line argument to set the execution path. Valid values are `software_path`
 * and `hardware_path`. This example also requires a second command line argument which specifies the dataset path.
 * In QPL, @ref qpl_path_software (`Software Path`) means that computations will be done with CPU.
 * Accelerator can be used instead of CPU. In this case, @ref qpl_path_hardware (`Hardware Path`) must be specified.
 * If there is no difference where calculations should be done, @ref qpl_path_auto (`Auto Path`) can be used to allow
 * the library to chose the path to execute. The Auto Path usage is not demonstrated by this example.
 *
 * @warning ---! Important !---
 * `Hardware Path` doesn't support all features declared for `Software Path`
 *
 */

auto main(int argc, char** argv) -> int {
    std::cout << "Intel(R) Query Processing Library version is " << qpl_get_library_version() << ".\n";

    qpl_path_t execution_path = qpl_path_software;

    // Get path from input argument
    const int extra_arg = 1;
    const int parse_ret = parse_execution_path(argc, argv, &execution_path, extra_arg);
    if (parse_ret) { return 1; }

    const std::string dataset_path = argv[2];

    // Source and output containers
    for (const auto& path : std::filesystem::directory_iterator(dataset_path)) {
        std::ifstream file(path.path().string(), std::ifstream::binary);

        if (!file.is_open()) {
            std::cout << "Unable to open file in " << dataset_path << '\n';
            return 1;
        }

        std::vector<uint8_t> source;
        std::vector<uint8_t> destination;
        std::vector<uint8_t> reference;

        source.reserve(path.file_size());
        source.assign(std::istreambuf_iterator<char>(file), std::istreambuf_iterator<char>());

        // Get compression buffer size estimate
        const uint32_t compression_size = qpl_get_safe_deflate_compression_buffer_size(source.size());
        if (compression_size == 0) {
            std::cout << "Invalid source size. Source size exceeds the maximum supported size.\n";
            return 1;
        }

        destination.resize(compression_size);
        reference.resize(source.size());

        std::unique_ptr<uint8_t[]> job_buffer;
        uint32_t                   size = 0;
        qpl_histogram              deflate_histogram {};

        // Job initialization
        qpl_status status = qpl_get_job_size(execution_path, &size);
        if (status != QPL_STS_OK) {
            std::cout << "An error " << status << " acquired during job size getting.\n";
            return 1;
        }

        job_buffer   = std::make_unique<uint8_t[]>(size);
        qpl_job* job = reinterpret_cast<qpl_job*>(job_buffer.get());

        status = qpl_init_job(execution_path, job);
        if (status != QPL_STS_OK) {
            std::cout << "An error " << status << " acquired during job initializing.\n";
            return 1;
        }

        // Huffman table initialization
        qpl_huffman_table_t huffman_table = nullptr;

        status = qpl_deflate_huffman_table_create(combined_table_type, execution_path, DEFAULT_ALLOCATOR_C,
                                                  &huffman_table);
        if (status != QPL_STS_OK) {
            std::cout << "An error " << status << " acquired during Huffman table creation.\n";
            return 1;
        }

        // Filling deflate histogram first
        status = qpl_gather_deflate_statistics(source.data(), static_cast<uint32_t>(source.size()), &deflate_histogram,
                                               qpl_default_level, execution_path);
        if (status != QPL_STS_OK) {
            std::cout << "An error " << status << " acquired during gathering statistics for Huffman table.\n";
            qpl_huffman_table_destroy(huffman_table);
            return 1;
        }

        status = qpl_huffman_table_init_with_histogram(huffman_table, &deflate_histogram);
        if (status != QPL_STS_OK) {
            std::cout << "An error " << status << " acquired during Huffman table initialization.\n";
            qpl_huffman_table_destroy(huffman_table);
            return 1;
        }

        // Now perform canned mode compression
        job->op            = qpl_op_compress;
        job->level         = qpl_default_level;
        job->next_in_ptr   = source.data();
        job->next_out_ptr  = destination.data();
        job->available_in  = static_cast<uint32_t>(source.size());
        job->available_out = static_cast<uint32_t>(destination.size());
        job->flags         = QPL_FLAG_FIRST | QPL_FLAG_LAST | QPL_FLAG_CANNED_MODE | QPL_FLAG_OMIT_VERIFY;
        job->huffman_table = huffman_table;

        // Compression
        status = qpl_execute_job(job);
        if (status != QPL_STS_OK) {
            std::cout << "An error " << status << " acquired during compression.\n";
            qpl_huffman_table_destroy(huffman_table);
            return 1;
        }

        const uint32_t compressed_size = job->total_out;

        // Performing a decompression operation
        job->op            = qpl_op_decompress;
        job->next_in_ptr   = destination.data();
        job->next_out_ptr  = reference.data();
        job->available_in  = compressed_size;
        job->available_out = static_cast<uint32_t>(reference.size());
        job->flags         = QPL_FLAG_FIRST | QPL_FLAG_LAST | QPL_FLAG_CANNED_MODE;
        job->huffman_table = huffman_table;

        // Decompression
        status = qpl_execute_job(job);
        if (status != QPL_STS_OK) {
            std::cout << "An error " << status << " acquired during decompression.\n";
            qpl_huffman_table_destroy(huffman_table);
            return 1;
        }

        // Freeing resources
        status = qpl_huffman_table_destroy(huffman_table);
        if (status != QPL_STS_OK) {
            std::cout << "An error " << status << " acquired during destroying Huffman table.\n";
            return 1;
        }

        status = qpl_fini_job(job);
        if (status != QPL_STS_OK) {
            std::cout << "An error " << status << " acquired during job finalization.\n";
            return 1;
        }

        // Compare reference functions
        for (size_t i = 0; i < source.size(); i++) {
            if (source[i] != reference[i]) {
                std::cout << "Content wasn't successfully compressed and decompressed.\n";
                return 1;
            }
        }

        std::cout << "Content of " << path.path().filename() << " was successfully compressed and decompressed.\n";
        std::cout << ""
                     "Input size: "
                  << source.size() << ", compressed size: " << compressed_size
                  << ", compression ratio: " << (float)source.size() / (float)compressed_size << ".\n";
    }

    return 0;
}

//* [QPL_LOW_LEVEL_CANNED_MODE_EXAMPLE] */

Canned Mode with Dictionary#

/*******************************************************************************
 * Copyright (C) 2023 Intel Corporation
 *
 * SPDX-License-Identifier: MIT
 ******************************************************************************/

//* [QPL_LOW_LEVEL_CANNED_COMPRESSION_WITH_DICTIONARY_EXAMPLE] */

#include <cstddef>
#include <cstdint>
#include <iostream>
#include <memory>
#include <vector>

#include "qpl/qpl.h"

#include "examples_utils.hpp" // for argument parsing function

/**
 * @brief This example requires a command line argument to set the execution path. Valid values are `software_path`
 * and `hardware_path`.
 * In QPL, @ref qpl_path_software (`Software Path`) means that computations will be done with CPU.
 * Accelerator can be used instead of CPU. In this case, @ref qpl_path_hardware (`Hardware Path`) must be specified.
 * If there is no difference where calculations should be done, @ref qpl_path_auto (`Auto Path`) can be used to allow
 * the library to chose the path to execute. The Auto Path usage is not demonstrated by this example.
 *
 * @warning ---! Important !---
 * `Hardware Path` doesn't support all features declared for `Software Path`
 *
 */
constexpr const uint32_t source_size = 2048U;

auto main(int argc, char** argv) -> int {
    std::cout << "Intel(R) Query Processing Library version is " << qpl_get_library_version() << ".\n";

    // Default to Software Path
    qpl_path_t execution_path = qpl_path_software;

    // Get path from input argument
    const int parse_ret = parse_execution_path(argc, argv, &execution_path);
    if (parse_ret != 0) { return 1; }

    // Get compression buffer size estimate
    const uint32_t compression_size = qpl_get_safe_deflate_compression_buffer_size(source_size);
    if (compression_size == 0) {
        std::cout << "Invalid source size. Source size exceeds the maximum supported size.\n";
        return 1;
    }

    // Source and output containers
    std::vector<uint8_t> source(source_size, 5);
    std::vector<uint8_t> destination(compression_size, 4);
    std::vector<uint8_t> reference(source_size, 7);

    std::unique_ptr<uint8_t[]> job_buffer;
    uint32_t                   job_size = 0U;
    qpl_histogram              deflate_histogram {};

    // Job initialization
    qpl_status status = qpl_get_job_size(execution_path, &job_size);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during job size getting.\n";
        return 1;
    }

    job_buffer   = std::make_unique<uint8_t[]>(job_size);
    qpl_job* job = reinterpret_cast<qpl_job*>(job_buffer.get());
    status       = qpl_init_job(execution_path, job);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during compression job initializing.\n";
        return 1;
    }

    // Dictionary initialization
    std::unique_ptr<uint8_t[]> dictionary_buffer;
    qpl_dictionary*            dictionary_ptr         = nullptr;
    std::size_t                dictionary_buffer_size = 0;
    sw_compression_level       sw_compr_level         = sw_compression_level::SW_NONE;
    hw_compression_level       hw_compr_level         = hw_compression_level::HW_NONE;
    std::size_t                raw_dict_size          = 0;

    // Select dictionary levels
    if (execution_path == qpl_path_software) {
        sw_compr_level = sw_compression_level::LEVEL_1;
    } else {
        hw_compr_level = hw_compression_level::HW_LEVEL_1;
    }

    // Huffman table initialization
    qpl_huffman_table_t huffman_table = nullptr;

    status = qpl_deflate_huffman_table_create(combined_table_type, execution_path, DEFAULT_ALLOCATOR_C, &huffman_table);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during Huffman table creation.\n";
        return 1;
    }

    // Filling deflate histogram first
    status = qpl_gather_deflate_statistics(source.data(), source_size, &deflate_histogram, qpl_default_level,
                                           execution_path);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during gathering statistics for Huffman table.\n";
        qpl_huffman_table_destroy(huffman_table);
        return 1;
    }

    // Building the Huffman table
    status = qpl_huffman_table_init_with_histogram(huffman_table, &deflate_histogram);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during Huffman table initialization.\n";
        qpl_huffman_table_destroy(huffman_table);
        return 1;
    }

    // To build the dictionary, users must provide a raw dictionary.
    // To better improve the compression ratio with dictionary, users should
    // set raw_dict_size to the maximum size of the raw dictionary,
    // refer to Intel® Query Processing Library (Intel® QPL) documentation.
    // The raw dictionary should contain pieces of data that are most likely to occur in the real
    // datasets to be compressed.
    // In this example, to make things simple, we just use the source data as the raw dictionary.
    raw_dict_size               = source.size();
    const uint8_t* raw_dict_ptr = source.data();
    dictionary_buffer_size      = qpl_get_dictionary_size(sw_compr_level, hw_compr_level, raw_dict_size);

    dictionary_buffer = std::make_unique<uint8_t[]>(dictionary_buffer_size);
    dictionary_ptr    = reinterpret_cast<qpl_dictionary*>(dictionary_buffer.get());

    status = qpl_build_dictionary(dictionary_ptr, sw_compr_level, hw_compr_level, raw_dict_ptr, raw_dict_size);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during dictionary building.\n";
        qpl_huffman_table_destroy(huffman_table);
        return 1;
    }

    // Performing canned compression with dictionary
    job->op            = qpl_op_compress;
    job->level         = qpl_default_level;
    job->next_in_ptr   = source.data();
    job->next_out_ptr  = destination.data();
    job->available_in  = source_size;
    job->available_out = static_cast<uint32_t>(destination.size());
    job->flags         = QPL_FLAG_FIRST | QPL_FLAG_LAST | QPL_FLAG_CANNED_MODE | QPL_FLAG_OMIT_VERIFY;
    job->dictionary    = dictionary_ptr;
    job->huffman_table = huffman_table;

    // Compression
    status = qpl_execute_job(job);

    // On qpl_path_hardware, if the Intel® In-Memory Analytics Accelerator (Intel® IAA) hardware available does not support dictionary compression, job will exit early and return the appropriate error code
    if (execution_path == qpl_path_hardware && status == QPL_STS_NOT_SUPPORTED_MODE_ERR) {
        std::cout
                << "Compression with dictionary is not supported on qpl_path_hardware. Note that only certain generations of Intel IAA support compression with dictionary.\n";
        qpl_huffman_table_destroy(huffman_table);
        return 0;
    }

    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during compression.\n";
        qpl_huffman_table_destroy(huffman_table);
        return 1;
    }

    const uint32_t compressed_size = job->total_out;

    // Performing a decompression operation with the same dictionary used for compression
    job->op            = qpl_op_decompress;
    job->next_in_ptr   = destination.data();
    job->next_out_ptr  = reference.data();
    job->available_in  = compressed_size;
    job->available_out = static_cast<uint32_t>(reference.size());
    job->flags         = QPL_FLAG_FIRST | QPL_FLAG_LAST | QPL_FLAG_CANNED_MODE;
    job->dictionary    = dictionary_ptr;
    job->huffman_table = huffman_table;

    // Decompression
    status = qpl_execute_job(job);

    // qpl_path_hardware does not support canned decompression with dictionary
    if (execution_path == qpl_path_hardware && status == QPL_STS_NOT_SUPPORTED_MODE_ERR) {
        std::cout << "Canned decompression with dictionary is not supported on qpl_path_hardware.\n";
        qpl_huffman_table_destroy(huffman_table);
        return 0;
    }

    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during decompression.\n";
        qpl_huffman_table_destroy(huffman_table);
        return 1;
    }

    // Freeing resources
    status = qpl_huffman_table_destroy(huffman_table);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during destroying Huffman table.\n";
        return 1;
    }

    status = qpl_fini_job(job);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during job finalization.\n";
        return 1;
    }

    // Compare reference functions
    for (size_t i = 0; i < source.size(); i++) {
        if (source[i] != reference[i]) {
            std::cout << "Content wasn't successfully compressed and decompressed.\n";
            return 1;
        }
    }

    std::cout << "Content was successfully compressed and decompressed.\n";
    std::cout << "Input size: " << source.size() << ", compressed size: " << compressed_size
              << ", compression ratio: " << (float)source.size() / (float)compressed_size << ".\n";

    return 0;
}

//* [QPL_LOW_LEVEL_CANNED_COMPRESSION_WITH_DICTIONARY_EXAMPLE] */

Compression#

/*******************************************************************************
 * Copyright (C) 2022 Intel Corporation
 *
 * SPDX-License-Identifier: MIT
 ******************************************************************************/

//* [QPL_LOW_LEVEL_COMPRESSION_EXAMPLE] */

#include <iostream>
#include <memory>
#include <vector>

#include "qpl/qpl.h"

#include "examples_utils.hpp" // for argument parsing function

/**
 * @brief This example requires a command line argument to set the execution path. Valid values are `software_path`
 * and `hardware_path`.
 * In QPL, @ref qpl_path_software (`Software Path`) means that computations will be done with CPU.
 * Accelerator can be used instead of CPU. In this case, @ref qpl_path_hardware (`Hardware Path`) must be specified.
 * If there is no difference where calculations should be done, @ref qpl_path_auto (`Auto Path`) can be used to allow
 * the library to chose the path to execute. The Auto Path usage is not demonstrated by this example.
 *
 * @warning ---! Important !---
 * `Hardware Path` doesn't support all features declared for `Software Path`
 *
 */
constexpr const uint32_t source_size = 1000;

auto main(int argc, char** argv) -> int {
    std::cout << "Intel(R) Query Processing Library version is " << qpl_get_library_version() << ".\n";

    // Default to Software Path
    qpl_path_t execution_path = qpl_path_software;

    // Get path from input argument
    const int parse_ret = parse_execution_path(argc, argv, &execution_path);
    if (parse_ret != 0) { return 1; }

    // Get compression buffer size estimate
    const uint32_t compression_size = qpl_get_safe_deflate_compression_buffer_size(source_size);
    if (compression_size == 0) {
        std::cout << "Invalid source size. Source size exceeds the maximum supported size.\n";
        return 1;
    }

    // Source and output containers
    std::vector<uint8_t> source(source_size, 5);
    std::vector<uint8_t> destination(compression_size, 4);
    std::vector<uint8_t> reference(source_size, 7);

    std::unique_ptr<uint8_t[]> job_buffer;
    uint32_t                   size = 0;

    // Job initialization
    qpl_status status = qpl_get_job_size(execution_path, &size);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during job size getting.\n";
        return 1;
    }

    job_buffer   = std::make_unique<uint8_t[]>(size);
    qpl_job* job = reinterpret_cast<qpl_job*>(job_buffer.get());

    status = qpl_init_job(execution_path, job);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during job initializing.\n";
        return 1;
    }

    // Performing a compression operation
    job->op            = qpl_op_compress;
    job->level         = qpl_default_level;
    job->next_in_ptr   = source.data();
    job->next_out_ptr  = destination.data();
    job->available_in  = source_size;
    job->available_out = static_cast<uint32_t>(destination.size());
    job->flags         = QPL_FLAG_FIRST | QPL_FLAG_LAST | QPL_FLAG_DYNAMIC_HUFFMAN | QPL_FLAG_OMIT_VERIFY;

    // Compression
    status = qpl_execute_job(job);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during compression.\n";
        return 1;
    }

    const uint32_t compressed_size = job->total_out;

    // Performing a decompression operation
    job->op            = qpl_op_decompress;
    job->next_in_ptr   = destination.data();
    job->next_out_ptr  = reference.data();
    job->available_in  = compressed_size;
    job->available_out = static_cast<uint32_t>(reference.size());
    job->flags         = QPL_FLAG_FIRST | QPL_FLAG_LAST;

    // Decompression
    status = qpl_execute_job(job);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during decompression.\n";
        return 1;
    }

    // Freeing resources
    status = qpl_fini_job(job);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during job finalization.\n";
        return 1;
    }

    // Compare reference functions
    for (size_t i = 0; i < source.size(); i++) {
        if (source[i] != reference[i]) {
            std::cout << "Content wasn't successfully compressed and decompressed.\n";
            return 1;
        }
    }

    std::cout << "Content was successfully compressed and decompressed.\n";
    std::cout << "Input size: " << source.size() << ", compressed size: " << compressed_size
              << ", compression ratio: " << (float)source.size() / (float)compressed_size << ".\n";

    return 0;
}

//* [QPL_LOW_LEVEL_COMPRESSION_EXAMPLE] */

Compression with Huffman-only#

/*******************************************************************************
 * Copyright (C) 2023 Intel Corporation
 *
 * SPDX-License-Identifier: MIT
 ******************************************************************************/

//* [QPL_LOW_LEVEL_HUFFMAN_ONLY_EXAMPLE] */
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>

#include "qpl/qpl.h"

/**
 * @brief This example requires a command line argument to set the execution path. Valid values are `software_path`
 * and `hardware_path`.
 * In QPL, @ref qpl_path_software (`Software Path`) means that computations will be done with CPU.
 * Accelerator can be used instead of CPU. In this case, @ref qpl_path_hardware (`Hardware Path`) must be specified.
 * If there is no difference where calculations should be done, @ref qpl_path_auto (`Auto Path`) can be used to allow
 * the library to chose the path to execute. The Auto Path usage is not demonstrated by this example.
 *
 * @warning ---! Important !---
 * `Hardware Path` doesn't support all features declared for `Software Path`
 *
 * The C-based example demonstrates data compression/decompression using 2 separate jobs with Huffman-only encoding.
 */
#define source_size 1000U

int main(int argc, char** argv) {
    printf("Intel(R) Query Processing Library version is %s.\n", qpl_get_library_version());

    // Default to Software Path
    qpl_path_t execution_path = qpl_path_software;

    if (argc < 2) {
        printf("Parameter for execution path was not provided. Use hardware_path or software_path.\n");
        return 1;
    }

    // Get path from input argument
    if (strcmp(argv[1], "hardware_path") == 0) {
        execution_path = qpl_path_hardware;
        printf("The example will be run on the hardware path.\n");
    } else if (strcmp(argv[1], "software_path") == 0) {
        execution_path = qpl_path_software;
        printf("The example will be run on the software path.\n");
    } else if (strcmp(argv[1], "auto_path") == 0) {
        execution_path = qpl_path_auto;
        printf("The example will be run on the auto path.\n");
    } else {
        printf("argv[1] = %s", argv[1]);
        printf("Unrecognized value for execution path parameter. Use hardware_path, software_path or auto_path.\n");
        return 1;
    }

    // Source and output containers
    uint8_t source[source_size];
    uint8_t destination[source_size * 2];
    uint8_t reference[source_size];

    uint32_t size = 0;

    // Getting job size
    qpl_status status = qpl_get_job_size(execution_path, &size);
    if (status != QPL_STS_OK) {
        printf("An error acquired during job size getting. Error status = %d\n", status);
        return status;
    }

    qpl_job* compress_job = NULL;
    compress_job          = (qpl_job*)malloc(size);
    if (compress_job == NULL) {
        printf("An error acquired during allocation of compression job. Error status = %d\n", status);
        return status;
    }

    status = qpl_init_job(execution_path, compress_job);
    if (status != QPL_STS_OK) {
        printf("An error acquired during job initializing. Error status = %d\n", status);

        // Since qpl_init_job allocates and initialize internal structures,
        // it is required to call qpl_fini_job in case of an error to free all internal resources.
        qpl_fini_job(compress_job);
        free(compress_job);

        return status;
    }

    // Allocating the compression Huffman Table object for Huffman-only
    qpl_huffman_table_t c_huffman_table = NULL;

    // The next line is a workaround for DEFAULT_ALLOCATOR_C macros.
    // This macros works only with C++ code.
    allocator_t default_allocator_c = {malloc, free};
    status = qpl_huffman_only_table_create(compression_table_type, execution_path, default_allocator_c,
                                           &c_huffman_table);

    if (status != QPL_STS_OK) {
        printf("An error acquired during huffman table creation. Error status = %d\n", status);

        qpl_huffman_table_destroy(c_huffman_table);
        qpl_fini_job(compress_job);
        free(compress_job);

        return status;
    }

    // Initializing qpl_job structure before performing a compression operation.
    compress_job->op            = qpl_op_compress;
    compress_job->next_in_ptr   = source;
    compress_job->next_out_ptr  = destination;
    compress_job->available_in  = source_size;
    compress_job->available_out = (uint32_t)(source_size * 2);
    compress_job->flags         = QPL_FLAG_FIRST | QPL_FLAG_LAST | QPL_FLAG_NO_HDRS | QPL_FLAG_GEN_LITERALS |
                          QPL_FLAG_DYNAMIC_HUFFMAN | QPL_FLAG_OMIT_VERIFY;
    compress_job->huffman_table = c_huffman_table;

    // Executing compression operation
    status = qpl_execute_job(compress_job);
    if (status != QPL_STS_OK) {
        printf("Error during compression occurred. Error status = %d\n", status);

        qpl_huffman_table_destroy(c_huffman_table);
        qpl_fini_job(compress_job);
        free(compress_job);

        return status;
    }

    const uint32_t last_bit_offset = compress_job->last_bit_offset;
    const uint32_t compressed_size = compress_job->total_out;

    // Freeing compression job resources
    status = qpl_fini_job(compress_job);
    if (status != QPL_STS_OK) {
        printf("An error acquired during compression job finalization. Error status = %d\n", status);

        free(compress_job);
        qpl_huffman_table_destroy(c_huffman_table);

        return status;
    }
    free(compress_job);

    // The code below checks if a compression operation works correctly

    // Allocating the decompression Huffman Table object for Huffman-only
    qpl_huffman_table_t d_huffman_table = NULL;
    status = qpl_huffman_only_table_create(decompression_table_type, execution_path, default_allocator_c,
                                           &d_huffman_table);

    if (status != QPL_STS_OK) {
        printf("An error acquired during decompression Huffman table creation. Error status = %d\n", status);

        qpl_huffman_table_destroy(c_huffman_table);
        qpl_huffman_table_destroy(d_huffman_table);

        return status;
    }

    // Initializing decompression table with the values from compression table
    status = qpl_huffman_table_init_with_other(d_huffman_table, c_huffman_table);
    if (status != QPL_STS_OK) {
        printf("An error acquired during decompression Huffman table initialization failed. Error status = %d\n",
               status);

        qpl_huffman_table_destroy(c_huffman_table);
        qpl_huffman_table_destroy(d_huffman_table);

        return status;
    }

    // Destroying compression huffman_table
    status = qpl_huffman_table_destroy(c_huffman_table);
    if (status != QPL_STS_OK) {
        printf("An error acquired during Huffman table destroying. Error status = %d\n", status);

        qpl_huffman_table_destroy(d_huffman_table);

        return status;
    }

    qpl_job* decompress_job = NULL;
    decompress_job          = (qpl_job*)malloc(size);
    if (decompress_job == NULL) {
        printf("An error acquired during malloc function for decompress job. Error status = %d\n", status);
        return status;
    }

    status = qpl_init_job(execution_path, decompress_job);
    if (status != QPL_STS_OK) {
        printf("An error acquired during compression job initializing. Error status = %d\n", status);

        qpl_huffman_table_destroy(d_huffman_table);
        qpl_fini_job(decompress_job);
        free(decompress_job);

        return status;
    }

    // Initializing decompression qpl_job structure before performing a decompression operation
    decompress_job->op              = qpl_op_decompress;
    decompress_job->next_in_ptr     = destination;
    decompress_job->next_out_ptr    = reference;
    decompress_job->available_in    = compressed_size;
    decompress_job->available_out   = (uint32_t)(source_size);
    decompress_job->ignore_end_bits = (8 - last_bit_offset) & 7;
    decompress_job->flags           = QPL_FLAG_NO_HDRS | QPL_FLAG_FIRST | QPL_FLAG_LAST;
    decompress_job->huffman_table   = d_huffman_table;

    // Executing decompression operation
    status = qpl_execute_job(decompress_job);
    if (status != QPL_STS_OK) {
        printf("Error during decompression occurred. Error status = %d\n", status);

        qpl_huffman_table_destroy(d_huffman_table);
        qpl_fini_job(decompress_job);
        free(decompress_job);

        return status;
    }

    // Freeing decompression job resources
    status = qpl_fini_job(decompress_job);
    if (status != QPL_STS_OK) {
        printf("An error acquired during decompression job finalization. Error status = %d\n", status);

        qpl_huffman_table_destroy(d_huffman_table);
        free(decompress_job);

        return status;
    }
    free(decompress_job);

    // Destroying decompression huffman_table
    status = qpl_huffman_table_destroy(d_huffman_table);
    if (status != QPL_STS_OK) {
        printf("An error acquired during decompression Huffman table destroying. Error status = %d\n", status);
        return status;
    }

    // Compare reference functions
    for (size_t i = 0; i < source_size; i++) {
        if (source[i] != reference[i]) {
            printf("Content wasn't successfully compressed and decompressed.");
            return -1;
        }
    }

    printf("Content was successfully compressed and decompressed.\n");
    printf("Compressed size: %d\n", compressed_size);

    return 0;
}

//* [QPL_LOW_LEVEL_HUFFMAN_ONLY_EXAMPLE] */

Compression with Dictionary#

/*******************************************************************************
 * Copyright (C) 2023 Intel Corporation
 *
 * SPDX-License-Identifier: MIT
 ******************************************************************************/

//* [QPL_LOW_LEVEL_COMPRESSION_WITH_DICTIONARY_EXAMPLE] */

#include <iostream>
#include <memory>
#include <vector>

#include "qpl/qpl.h"

#include "examples_utils.hpp" // for argument parsing function

/**
 * @brief This example requires a command line argument to set the execution path. Valid values are `software_path`
 * and `hardware_path`.
 * In QPL, @ref qpl_path_software (`Software Path`) means that computations will be done with CPU.
 * Accelerator can be used instead of CPU. In this case, @ref qpl_path_hardware (`Hardware Path`) must be specified.
 * If there is no difference where calculations should be done, @ref qpl_path_auto (`Auto Path`) can be used to allow
 * the library to chose the path to execute. The Auto Path usage is not demonstrated by this example.
 *
 * @warning ---! Important !---
 * `Hardware Path` doesn't support all features declared for `Software Path`
 *
 */
constexpr const uint32_t source_size = 2048;

auto main(int argc, char** argv) -> int {
    std::cout << "Intel(R) Query Processing Library version is " << qpl_get_library_version() << ".\n";

    // Default to Software Path
    qpl_path_t execution_path = qpl_path_software;

    // Get path from input argument
    const int parse_ret = parse_execution_path(argc, argv, &execution_path);
    if (parse_ret != 0) { return 1; }

    // Get compression buffer size estimate
    const uint32_t compression_size = qpl_get_safe_deflate_compression_buffer_size(source_size);
    if (compression_size == 0) {
        std::cout << "Invalid source size. Source size exceeds the maximum supported size.\n";
        return 1;
    }

    // Source and output containers
    std::vector<uint8_t> source(source_size, 5);
    std::vector<uint8_t> destination(compression_size, 4);
    std::vector<uint8_t> reference(source_size, 7);

    std::unique_ptr<uint8_t[]> job_buffer;
    uint32_t                   job_size = 0U;

    // Job initialization
    qpl_status status = qpl_get_job_size(execution_path, &job_size);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during job size getting.\n";
        return 1;
    }

    job_buffer   = std::make_unique<uint8_t[]>(job_size);
    qpl_job* job = reinterpret_cast<qpl_job*>(job_buffer.get());
    status       = qpl_init_job(execution_path, job);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during compression job initializing.\n";
        return 1;
    }

    // Dictionary initialization
    std::unique_ptr<uint8_t[]> dictionary_buffer;
    qpl_dictionary*            dictionary_ptr         = nullptr;
    std::size_t                dictionary_buffer_size = 0;
    sw_compression_level       sw_compr_level         = sw_compression_level::SW_NONE;
    hw_compression_level       hw_compr_level         = hw_compression_level::HW_NONE;
    std::size_t                raw_dict_size          = 0;

    // Select dictionary levels
    if (execution_path == qpl_path_software) {
        sw_compr_level = sw_compression_level::LEVEL_1;
    } else {
        hw_compr_level = hw_compression_level::HW_LEVEL_1;
    }

    // To build the dictionary, users must provide a raw dictionary.
    // To better improve the compression ratio with dictionary, users should
    // set raw_dict_size to the maximum size of the raw dictionary,
    // refer to Intel® Query Processing Library (Intel® QPL) documentation.
    // The raw dictionary should contain pieces of data that are most likely to occur in the real
    // datasets to be compressed.
    // In this example, to make things simple, we just use the source data as the raw dictionary.
    raw_dict_size               = source.size();
    const uint8_t* raw_dict_ptr = source.data();
    dictionary_buffer_size      = qpl_get_dictionary_size(sw_compr_level, hw_compr_level, raw_dict_size);

    dictionary_buffer = std::make_unique<uint8_t[]>(dictionary_buffer_size);
    dictionary_ptr    = reinterpret_cast<qpl_dictionary*>(dictionary_buffer.get());

    status = qpl_build_dictionary(dictionary_ptr, sw_compr_level, hw_compr_level, raw_dict_ptr, raw_dict_size);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during dictionary building.\n";
        return 1;
    }

    // Performing a compression operation with dictionary
    job->op            = qpl_op_compress;
    job->level         = qpl_default_level;
    job->next_in_ptr   = source.data();
    job->next_out_ptr  = destination.data();
    job->available_in  = source_size;
    job->available_out = static_cast<uint32_t>(destination.size());
    job->flags         = QPL_FLAG_FIRST | QPL_FLAG_LAST | QPL_FLAG_DYNAMIC_HUFFMAN | QPL_FLAG_OMIT_VERIFY;
    job->dictionary    = dictionary_ptr;

    // Compression
    status = qpl_execute_job(job);

    // On qpl_path_hardware, if the Intel® In-Memory Analytics Accelerator (Intel® IAA) hardware available does not support dictionary compression, job will fail
    if (execution_path == qpl_path_hardware && status == QPL_STS_NOT_SUPPORTED_MODE_ERR) {
        std::cout
                << "Compression with dictionary is not supported on qpl_path_hardware. Note that only certain generations of Intel IAA support compression with dictionary.\n";
        return 0;
    }

    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during compression.\n";
        return 1;
    }

    const uint32_t compressed_size = job->total_out;

    // Performing a decompression operation with the same dictionary used for compression
    job->op            = qpl_op_decompress;
    job->next_in_ptr   = destination.data();
    job->next_out_ptr  = reference.data();
    job->available_in  = compressed_size;
    job->available_out = static_cast<uint32_t>(reference.size());
    job->flags         = QPL_FLAG_FIRST | QPL_FLAG_LAST;
    job->dictionary    = dictionary_ptr;

    // Decompression
    status = qpl_execute_job(job);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during decompression.\n";
        return 1;
    }

    // Freeing resources
    status = qpl_fini_job(job);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during job finalization.\n";
        return 1;
    }

    // Compare reference functions
    for (size_t i = 0; i < source.size(); i++) {
        if (source[i] != reference[i]) {
            std::cout << "Content wasn't successfully compressed and decompressed with dictionary.\n";
            return 1;
        }
    }

    std::cout << "Content was successfully compressed and decompressed with dictionary.\n";
    std::cout << "Input size: " << source.size() << ", compressed size: " << compressed_size
              << ", compression ratio: " << (float)source.size() / (float)compressed_size << ".\n";

    return 0;
}

//* [QPL_LOW_LEVEL_COMPRESSION_WITH_DICTIONARY_EXAMPLE] */

Mixing Paths For Compression and Decompression with Dictionary#

/*******************************************************************************
 * Copyright (C) 2024 Intel Corporation
 *
 * SPDX-License-Identifier: MIT
 ******************************************************************************/

//* [QPL_LOW_LEVEL_MIX_PATHS_COMP_DECOMP_W_DICT_EXAMPLE] */

#include <cstddef>
#include <iostream>
#include <memory>
#include <vector>

#include "qpl/qpl.h"

#include "examples_utils.hpp" // for argument parsing function

/**
 * @brief This example requires a command line argument to set the execution path. Valid values are `software_path`
 * and `hardware_path`.
 * In QPL, @ref qpl_path_software (`Software Path`) means that computations will be done with CPU.
 * Accelerator can be used instead of CPU. In this case, @ref qpl_path_hardware (`Hardware Path`) must be specified.
 * If there is no difference where calculations should be done, @ref qpl_path_auto (`Auto Path`) can be used to allow
 * the library to chose the path to execute. The Auto Path usage is not demonstrated by this example.
 *
 * @warning ---! Important !---
 * `Hardware Path` doesn't support all features declared for `Software Path`
 *
 */
constexpr const uint32_t source_size = 2048U;

// Deallocate dictionary
uint8_t destroy_dictionary(qpl_dictionary** dictionary_ptr) {
    if (*dictionary_ptr != nullptr) {
        free(*dictionary_ptr); //NOLINT(cppcoreguidelines-no-malloc)
        *dictionary_ptr = nullptr;
    }
    return 0;
}

// Create dictionary with defined path
uint8_t create_dictionary(qpl_path_t execution_path, std::vector<uint8_t>& source, qpl_dictionary** dictionary_ptr) {
    std::size_t          dictionary_buffer_size = 0;
    sw_compression_level sw_compr_level         = sw_compression_level::SW_NONE;
    hw_compression_level hw_compr_level         = hw_compression_level::HW_NONE;

    // Select dictionary levels
    if (execution_path == qpl_path_software) {
        sw_compr_level = sw_compression_level::LEVEL_1;
    } else {
        hw_compr_level = hw_compression_level::HW_LEVEL_1;
    }

    // To build the dictionary, users must provide a raw dictionary.
    // To better improve the compression ratio with dictionary, users should
    // set raw_dict_size to the maximum size of the raw dictionary,
    // refer to Intel® Query Processing Library (Intel® QPL) documentation.
    // The raw dictionary should contain pieces of data that are most likely to occur in the real
    // datasets to be compressed.
    // In this example, to make things simple, we just use the source data as the raw dictionary.
    const std::size_t raw_dict_size = source.size();
    const uint8_t*    raw_dict_ptr  = source.data();

    // Determine the size needed for the dictionary
    dictionary_buffer_size = qpl_get_dictionary_size(sw_compr_level, hw_compr_level, raw_dict_size);

    // Allocate memory for the dictionary
    *dictionary_ptr = (qpl_dictionary*)malloc(dictionary_buffer_size); //NOLINT(cppcoreguidelines-no-malloc)

    if (*dictionary_ptr == nullptr) {
        std::cout << "Failed to allocate memory for the dictionary.\n";
        return 1; // Memory allocation failed
    }

    // Build the dictionary
    const qpl_status status =
            qpl_build_dictionary(*dictionary_ptr, sw_compr_level, hw_compr_level, raw_dict_ptr, raw_dict_size);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " occurred during dictionary building.\n";
        destroy_dictionary(dictionary_ptr); // Clean up allocated memory
        return 1;
    }

    std::cout << "Dictionary was successfully built.\n";
    return 0;
}

// Dynamic Dictionary Compression with defined path
uint32_t compression(qpl_path_t execution_path, std::vector<uint8_t>& source, std::vector<uint8_t>& destination,
                     qpl_dictionary* dictionary_ptr) {
    std::unique_ptr<uint8_t[]> job_buffer;
    uint32_t                   job_size = 0U;

    // Job initialization
    qpl_status status = qpl_get_job_size(execution_path, &job_size);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during job size getting.\n";
        return 1;
    }

    job_buffer   = std::make_unique<uint8_t[]>(job_size);
    qpl_job* job = reinterpret_cast<qpl_job*>(job_buffer.get());
    status       = qpl_init_job(execution_path, job);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during compression job initializing.\n";
        return 1;
    }

    std::cout << "Job was successfully initialized.\n";

    // Performing a compression operation with dictionary
    job->op            = qpl_op_compress;
    job->level         = qpl_default_level;
    job->next_in_ptr   = source.data();
    job->next_out_ptr  = destination.data();
    job->available_in  = source_size;
    job->available_out = static_cast<uint32_t>(destination.size());
    job->flags         = QPL_FLAG_FIRST | QPL_FLAG_LAST | QPL_FLAG_DYNAMIC_HUFFMAN | QPL_FLAG_OMIT_VERIFY;
    job->dictionary    = dictionary_ptr;

    // Compression
    status = qpl_execute_job(job);

    // On qpl_path_hardware, if the Intel® In-Memory Analytics Accelerator (Intel® IAA) hardware available does not support dictionary compression, job will fail
    if (execution_path == qpl_path_hardware && status == QPL_STS_NOT_SUPPORTED_MODE_ERR) {
        std::cout
                << "Compression with dictionary is not supported on qpl_path_hardware. Note that only certain generations of Intel IAA support compression with dictionary.\n";
        return status;
    }

    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during compression.\n";
        return 1;
    }

    const uint32_t compressed_size = job->total_out;

    // Freeing resources
    status = qpl_fini_job(job);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during job finalization.\n";
        return 1;
    }

    // Update destination size
    destination.resize(compressed_size);

    std::cout << "Content was successfully compressed with dictionary on "
              << (execution_path == qpl_path_software ? "software"
                                                      : (execution_path == qpl_path_hardware ? "hardware" : "auto"))
              << " path.\n";

    return 0;
}

// Decompression with software_path
auto sw_decompression(std::vector<uint8_t>& destination, std::vector<uint8_t>& reference,
                      qpl_dictionary* dictionary_ptr) {
    std::unique_ptr<uint8_t[]> job_buffer;
    uint32_t                   job_size = 0U;

    // Get compression size from destination
    const uint32_t compressed_size = destination.size();

    // Job initialization
    qpl_status status = qpl_get_job_size(qpl_path_software, &job_size);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during job size getting.\n";
        return 1;
    }

    job_buffer   = std::make_unique<uint8_t[]>(job_size);
    qpl_job* job = reinterpret_cast<qpl_job*>(job_buffer.get());
    status       = qpl_init_job(qpl_path_software, job);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during compression job initializing.\n";
        return 1;
    }

    // Performing a decompression operation with the same dictionary used for compression
    job->op            = qpl_op_decompress;
    job->next_in_ptr   = destination.data();
    job->next_out_ptr  = reference.data();
    job->available_in  = compressed_size;
    job->available_out = static_cast<uint32_t>(reference.size());
    job->flags         = QPL_FLAG_FIRST | QPL_FLAG_LAST;
    job->dictionary    = dictionary_ptr;

    // Decompression
    status = qpl_execute_job(job);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during decompression.\n";
        return 1;
    }

    // Freeing resources
    status = qpl_fini_job(job);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during job finalization.\n";
        return 1;
    }

    std::cout << "Content was successfully decompressed with dictionary.\n";

    return 0;
}

auto main(int argc, char** argv) -> int {
    std::cout << "Intel(R) Query Processing Library version is " << qpl_get_library_version() << ".\n";

    // Default to Hardware Path
    qpl_path_t execution_path = qpl_path_hardware;

    // Get path from input argument
    const int parse_ret = parse_execution_path(argc, argv, &execution_path);
    if (parse_ret != 0) { return 1; }

    // Get compression buffer size estimate
    const uint32_t compression_size = qpl_get_safe_deflate_compression_buffer_size(source_size);
    if (compression_size == 0) {
        std::cout << "Invalid source size. Source size exceeds the maximum supported size.\n";
        return 1;
    }

    // Source and output containers
    std::vector<uint8_t> source(source_size, 5);
    std::vector<uint8_t> destination(compression_size, 4);
    std::vector<uint8_t> reference(source_size, 7);

    // Dictionary initialization
    qpl_dictionary* dictionary_ptr = nullptr;

    // Build dictionary and check if building failed
    if (create_dictionary(execution_path, source, &dictionary_ptr) != 0) { return 1; }

    // Compression and check if compression failed
    const uint8_t comp_status = compression(execution_path, source, destination, dictionary_ptr);
    if (comp_status == QPL_STS_NOT_SUPPORTED_MODE_ERR) {
        // Free dictionary
        destroy_dictionary(&dictionary_ptr);
        return 0;
    } else if (comp_status != 0) {
        // Free dictionary
        destroy_dictionary(&dictionary_ptr);
        return comp_status;
    }

    // Decompression with software_path and check if decompression failed
    const uint8_t decomp_status = sw_decompression(destination, reference, dictionary_ptr);
    if (decomp_status != 0) {
        // Free dictionary
        destroy_dictionary(&dictionary_ptr);
        return decomp_status;
    }

    // Free dictionary
    destroy_dictionary(&dictionary_ptr);

    // Compare source and reference
    for (size_t i = 0; i < source.size(); i++) {
        if (source[i] != reference[i]) {
            std::cout << "Content wasn't successfully compressed and decompressed with dictionary.\n";
            return 1;
        }
    }

    std::cout << "Content was successfully compressed and decompressed with dictionary.\n";
    std::cout << "Input size: " << source.size() << ", compressed size: " << destination.size()
              << ", compression ratio: " << (float)source.size() / (float)destination.size() << ".\n";

    return 0;
}

//* [QPL_LOW_LEVEL_MIX_PATHS_COMP_DECOMP_W_DICT_EXAMPLE] */

Multi-chunk Compression with Fixed Block#

/*******************************************************************************
 * Copyright (C) 2023 Intel Corporation
 *
 * SPDX-License-Identifier: MIT
 ******************************************************************************/

//* [QPL_LOW_LEVEL_COMPRESSION_MULTI_CHUNK_EXAMPLE] */

#include <algorithm>
#include <iostream>
#include <memory>
#include <vector>

#include "qpl/qpl.h"

#include "examples_utils.hpp" // for argument parsing function

#ifdef QPL_EXAMPLES_USE_LIBACCEL_CONFIG
#include <x86intrin.h>

extern "C" {

struct accfg_ctx;
struct accfg_device;
struct accfg_wq;

/* Instantiate a new library context */
int accfg_new(struct accfg_ctx** ctx);

/* Get first available device */
struct accfg_device* accfg_device_get_first(struct accfg_ctx* ctx);
/* Get next available device */
struct accfg_device* accfg_device_get_next(struct accfg_device* device);
/* Get numa id for device */
int accfg_device_get_numa_node(struct accfg_device* device);

/* macro to loop through all available devices */
#define accfg_device_foreach(ctx, device) \
    for (device = accfg_device_get_first(ctx); device != NULL; device = accfg_device_get_next(device))

/* Get first available workqueue on device */
struct accfg_wq* accfg_wq_get_first(struct accfg_device* device);
/* Get next available workqueue */
struct accfg_wq* accfg_wq_get_next(struct accfg_wq* wq);
/* Get max transfer size of workqueue */
uint64_t accfg_wq_get_max_transfer_size(struct accfg_wq* wq);

/* macro to loop through all available workqueues on device */
#define accfg_wq_foreach(device, wq) for (wq = accfg_wq_get_first(device); wq != NULL; wq = accfg_wq_get_next(wq))
}

/**
 * @brief This function gets the current NUMA node id.
 */
int32_t get_numa_id() noexcept {
#if defined(__linux__)
    uint32_t tsc_aux = 0;

    __rdtscp(&tsc_aux);

    // Linux encodes NUMA node into [32:12] of TSC_AUX
    return static_cast<int32_t>(tsc_aux >> 12);
#else
    return -1;
#endif // if defined(__linux__)
}
#endif // #ifdef QPL_EXAMPLES_USE_LIBACCEL_CONFIG

/**
 * @brief This function gets the values for max transfer size from all available
 * workqueues on numa node (-1 for all) and sets max_transfer_size to the minimum
 * of the values returns a status code 0 is okay, -1 is an accel-config loading error
*/
int32_t get_min_max_transfer_size(uint64_t& max_transfer_size, int32_t numa_id = -1) {
#ifdef QPL_EXAMPLES_USE_LIBACCEL_CONFIG
    accfg_ctx*    ctx_ptr    = nullptr;
    accfg_device* device_ptr = nullptr;
    accfg_wq*     wq_ptr     = nullptr;

    if (numa_id == -1) { numa_id = get_numa_id(); }

    uint64_t current_min = UINT64_MAX;
    uint64_t current_value;

    int32_t context_creation_status = accfg_new(&ctx_ptr);
    if (0u != context_creation_status) { return -1; }
    accfg_device_foreach(ctx_ptr, device_ptr) {
        if (numa_id != accfg_device_get_numa_node(device_ptr)) { continue; }
        accfg_wq_foreach(device_ptr, wq_ptr) {
            current_value = accfg_wq_get_max_transfer_size(wq_ptr);
            if (current_value < current_min) { current_min = current_value; }
        }
    }
    max_transfer_size = current_min;
    return 0;
#else
    max_transfer_size = UINT64_MAX;
    return -1;
#endif // #ifdef QPL_EXAMPLES_USE_LIBACCEL_CONFIG
}

/**
 * @brief This example requires a command line argument to set the execution path. Valid values are `software_path`
 * and `hardware_path`.
 * In QPL, @ref qpl_path_software (`Software Path`) means that computations will be done with CPU.
 * Accelerator can be used instead of CPU. In this case, @ref qpl_path_hardware (`Hardware Path`) must be specified.
 * If there is no difference where calculations should be done, @ref qpl_path_auto (`Auto Path`) can be used to allow
 * the library to chose the path to execute. The Auto Path usage is not demonstrated by this example.
 *
 * @warning ---! Important !---
 * `Hardware Path` doesn't support all features declared for `Software Path`
 *
 * The example compresses data with multi-chunk and decompresses data using single job with Deflate fixed Huffman encoding.
 * If the accel-config library is available, this example will also check to ensure that the job size does not exceed the
 * accelerator configured maximum transfer size.
 */

constexpr uint32_t source_size = 21 * 1024 * 1024;

// In this example source data is split into `chunk_count` pieces.
// Compression is then performed via multiple job submissions.
constexpr uint32_t chunk_count = 7;

auto main(int argc, char** argv) -> int {
    std::cout << "Intel(R) Query Processing Library version is " << qpl_get_library_version() << ".\n";

    // Default to Software Path.
    qpl_path_t execution_path = qpl_path_software;

    // Get path from input argument.
    const int parse_ret = parse_execution_path(argc, argv, &execution_path);
    if (parse_ret != 0) { return 1; }

    // Calculate chunk size for the compression.
    uint32_t chunk_size = source_size / chunk_count;

    if (execution_path == qpl_path_hardware) {
        uint64_t max_transfer_size = 0U;
        if (get_min_max_transfer_size(max_transfer_size) == 0) {
            if (chunk_size > max_transfer_size) {
                std::cout << "Chunk size(" << chunk_size << ") exceeds configured max transfer size ("
                          << max_transfer_size << "), reducing chunk size.\n";
                chunk_size = max_transfer_size;
            }
        }
    }

    // Get compression buffer size estimate
    const uint32_t compression_size = qpl_get_safe_deflate_compression_buffer_size(source_size);
    if (compression_size == 0) {
        std::cout << "Invalid source size. Source size exceeds the maximum supported size.\n";
        return 1;
    }

    // Source and output containers.
    std::vector<uint8_t> source(source_size, 5);
    std::vector<uint8_t> destination(compression_size, 4);
    std::vector<uint8_t> reference(source_size, 7);

    std::unique_ptr<uint8_t[]> job_buffer;
    uint32_t                   size = 0;

    // Allocate and initialize job.
    qpl_status status = qpl_get_job_size(execution_path, &size);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during job size getting.\n";
        return 1;
    }

    job_buffer   = std::make_unique<uint8_t[]>(size);
    qpl_job* job = reinterpret_cast<qpl_job*>(job_buffer.get());

    status = qpl_init_job(execution_path, job);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during job initializing.\n";
        return 1;
    }

    // Initialize qpl_job structure before performing a compression operation.
    job->op            = qpl_op_compress;
    job->level         = qpl_default_level;
    job->next_in_ptr   = source.data();
    job->next_out_ptr  = destination.data();
    job->available_in  = source_size;
    job->available_out = static_cast<uint32_t>(destination.size());
    job->flags         = QPL_FLAG_FIRST | QPL_FLAG_OMIT_VERIFY;
    job->huffman_table = NULL;

    uint32_t iteration_count   = 0U;
    uint32_t source_bytes_left = static_cast<uint32_t>(source.size());

    while (source_bytes_left > 0) {
        // Advance `next_in_ptr` pointer for the next iteration.
        // If writing into contiguous memory, this step is not necessary,
        // as the `next_in_ptr` will be updated at the end of previous execution by
        // number of bytes processed.
        job->next_in_ptr = source.data() + iteration_count * chunk_size;

        // In this example, all chunks are equal in size except for the last one.
        // So adjusting the size and setting the job to LAST.
        if (chunk_size >= source_bytes_left) {
            job->flags |= QPL_FLAG_LAST;
            chunk_size = source_bytes_left;
        }

        source_bytes_left -= chunk_size;
        job->available_in = chunk_size;

        // Hardware requires that job->available_out does not exceed max_transfer_size
        job->available_out = std::min(chunk_size, job->available_out);

        // Execute compression operation.
        status = qpl_execute_job(job);
        if (status != QPL_STS_OK) {
            std::cout << "An error " << status << " acquired during compression.\n";
            return 1;
        }

        job->flags &= ~QPL_FLAG_FIRST;
        iteration_count++;
    }

    destination.resize(job->total_out);
    const uint32_t compressed_size = job->total_out;

    // The code below checks if a compression operation works correctly.

    // Initialize qpl_job structure before performing a decompression operation.
    job->op            = qpl_op_decompress;
    job->next_in_ptr   = destination.data();
    job->next_out_ptr  = reference.data();
    job->available_in  = compressed_size;
    job->available_out = static_cast<uint32_t>(reference.size());
    job->flags         = QPL_FLAG_FIRST | QPL_FLAG_LAST;

    // Execute decompression operation.
    status = qpl_execute_job(job);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during decompression.\n";
        return 1;
    }

    // Free resources.
    status = qpl_fini_job(job);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during job finalization.\n";
        return 1;
    }

    // Compare compressed then decompressed buffer to original source.
    for (size_t i = 0; i < source.size(); i++) {
        if (source[i] != reference[i]) {
            std::cout << "Content wasn't successfully compressed and decompressed.\n";
            return 1;
        }
    }

    std::cout << "Content was successfully compressed and decompressed.\n";
    std::cout << "Input size: " << source.size() << ", compressed size: " << compressed_size
              << ", compression ratio: " << (float)source.size() / (float)compressed_size << ".\n";

    return 0;
}

//* [QPL_LOW_LEVEL_COMPRESSION_MULTI_CHUNK_EXAMPLE] */

Multi-chunk Compression with Static Block#

/*******************************************************************************
 * Copyright (C) 2023 Intel Corporation
 *
 * SPDX-License-Identifier: MIT
 ******************************************************************************/

//* [QPL_LOW_LEVEL_COMPRESSION_STATIC_MULTI_CHUNK_EXAMPLE] */

#include <iostream>
#include <memory>
#include <vector>

#include "qpl/qpl.h"

#include "examples_utils.hpp" // for argument parsing function

/**
 * @brief This example requires a command line argument to set the execution path. Valid values are `software_path`
 * and `hardware_path`.
 * In QPL, @ref qpl_path_software (`Software Path`) means that computations will be done with CPU.
 * Accelerator can be used instead of CPU. In this case, @ref qpl_path_hardware (`Hardware Path`) must be specified.
 * If there is no difference where calculations should be done, @ref qpl_path_auto (`Auto Path`) can be used to allow
 * the library to chose the path to execute. The Auto Path usage is not demonstrated by this example.
 *
 * @warning ---! Important !---
 * `Hardware Path` doesn't support all features declared for `Software Path`
 *
 * The example compresses data with multi-chunk and decompresses data using single job with Deflate static Huffman encoding.
 *
 */

uint32_t sum(std::vector<uint32_t> vector) {
    uint32_t result = 0;
    for (size_t i = 0; i < vector.size(); i++) {
        result += vector[i];
    }
    return result;
}

constexpr const uint32_t source_size = 1000;

auto main(int argc, char** argv) -> int {
    std::cout << "Intel(R) Query Processing Library version is " << qpl_get_library_version() << ".\n";

    // Default to Software Path.
    qpl_path_t execution_path = qpl_path_software;

    // Get path from input argument.
    const int parse_ret = parse_execution_path(argc, argv, &execution_path);
    if (parse_ret != 0) { return 1; }

    // Get compression buffer size estimate
    const uint32_t compression_size = qpl_get_safe_deflate_compression_buffer_size(source_size);
    if (compression_size == 0) {
        std::cout << "Invalid source size. Source size exceeds the maximum supported size.\n";
        return 1;
    }

    // Source and output containers.
    std::vector<uint8_t> source(source_size, 5);
    std::vector<uint8_t> destination(compression_size, 4);
    std::vector<uint8_t> reference(source_size, 7);

    std::unique_ptr<uint8_t[]> job_buffer;
    uint32_t                   size = 0;

    // Allocate and initialize job structure.
    qpl_status status = qpl_get_job_size(execution_path, &size);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during job size getting.\n";
        return 1;
    }

    job_buffer   = std::make_unique<uint8_t[]>(size);
    qpl_job* job = reinterpret_cast<qpl_job*>(job_buffer.get());

    status = qpl_init_job(execution_path, job);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during job initializing.\n";
        return 1;
    }

    // Allocate Huffman table object (c_huffman_table).
    qpl_huffman_table_t c_huffman_table = nullptr;
    status = qpl_deflate_huffman_table_create(compression_table_type, execution_path, DEFAULT_ALLOCATOR_C,
                                              &c_huffman_table);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during Huffman table creation.\n";
        return 1;
    }

    // Initialize Huffman table using deflate tokens histogram.
    qpl_histogram histogram {};
    status = qpl_gather_deflate_statistics(source.data(), source_size, &histogram, qpl_default_level, execution_path);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during gathering statistics for Huffman table.\n";
        qpl_huffman_table_destroy(c_huffman_table);
        return 1;
    }

    status = qpl_huffman_table_init_with_histogram(c_huffman_table, &histogram);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during Huffman table initialization.\n";
        qpl_huffman_table_destroy(c_huffman_table);
        return 1;
    }

    // Initialize qpl_job structure before performing a compression operation.
    job->op            = qpl_op_compress;
    job->level         = qpl_default_level;
    job->next_in_ptr   = source.data();
    job->next_out_ptr  = destination.data();
    job->available_in  = source_size;
    job->available_out = static_cast<uint32_t>(destination.size());
    job->flags         = QPL_FLAG_FIRST | QPL_FLAG_OMIT_VERIFY;
    job->huffman_table = c_huffman_table;

    // In this example source data is split to 5 chunks with unequal chunk sizes.
    // Sum of all chunk sizes MUST be equal to source_size.
    std::vector<uint32_t> chunk_sizes {50, 250, 150, 350, 200};
    if (sum(chunk_sizes) != source_size) {
        std::cout << "Sum of all chunk sizes isn't equal to source_size.\n";
        qpl_huffman_table_destroy(c_huffman_table);
        return 1;
    }

    uint32_t source_bytes_processed_previously = 0U;
    for (size_t iteration_count = 0; iteration_count < chunk_sizes.size(); iteration_count++) {
        // Set the job to LAST on the last iteration.
        if (iteration_count == chunk_sizes.size() - 1) { job->flags |= QPL_FLAG_LAST; }

        // Advance `next_in_ptr` pointer for the next iteration by the amount
        // of bytes processed previously.
        // If writing into contiguous memory, this step is not necessary,
        // as the `next_in_ptr` will be updated at the end of previous execution by
        // number of bytes processed.
        job->next_in_ptr  = source.data() + source_bytes_processed_previously;
        job->available_in = chunk_sizes[iteration_count];

        // Execute compression operation.
        status = qpl_execute_job(job);
        if (status != QPL_STS_OK) {
            std::cout << "An error " << status << " acquired during compression.\n";
            qpl_huffman_table_destroy(c_huffman_table);
            return 1;
        }

        job->flags &= ~QPL_FLAG_FIRST;

        // Update offset for `next_in_ptr` by the total size of previous chunks.
        source_bytes_processed_previously += chunk_sizes[iteration_count];
    }
    const uint32_t compressed_size = job->total_out;

    // The code below checks if a compression operation works correctly.

    // Initialize qpl_job structure before performing a decompression operation.
    job->op            = qpl_op_decompress;
    job->next_in_ptr   = destination.data();
    job->next_out_ptr  = reference.data();
    job->available_in  = compressed_size;
    job->available_out = static_cast<uint32_t>(reference.size());
    job->flags         = QPL_FLAG_FIRST | QPL_FLAG_LAST;

    // Execute decompression operation.
    status = qpl_execute_job(job);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during decompression.\n";
        qpl_huffman_table_destroy(c_huffman_table);
        return 1;
    }

    // Destroy c_huffman_table.
    status = qpl_huffman_table_destroy(c_huffman_table);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during destroying Huffman table.\n";
        return 1;
    }

    // Free resources.
    status = qpl_fini_job(job);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during job finalization.\n";
        return 1;
    }

    // Compare compressed then decompressed buffer with original source.
    for (size_t i = 0; i < source_size; i++) {
        if (source[i] != reference[i]) {
            std::cout << "Content wasn't successfully compressed and decompressed.\n";
            return 1;
        }
    }

    std::cout << "Content was successfully compressed and decompressed.\n";
    std::cout << "Input size: " << source_size << ", compressed size: " << compressed_size
              << ", compression ratio: " << (float)source_size / (float)compressed_size << ".\n";

    return 0;
}

//* [QPL_LOW_LEVEL_COMPRESSION_STATIC_MULTI_CHUNK_EXAMPLE] */

Decompression “Output Overflow” error#

/*******************************************************************************
 * Copyright (C) 2023 Intel Corporation
 *
 * SPDX-License-Identifier: MIT
 ******************************************************************************/

//* [QPL_LOW_LEVEL_DECOMPRESSION_OUTPUT_OVERFLOW_EXAMPLE] */

#include <iostream>
#include <memory>
#include <vector>

#include "qpl/qpl.h"

#include "examples_utils.hpp" // for argument parsing function

/**
 * @brief This example requires a command line argument to set the execution path. Valid values are `software_path`
 * and `hardware_path`.
 * In QPL, @ref qpl_path_software (`Software Path`) means that computations will be done with CPU.
 * Accelerator can be used instead of CPU. In this case, @ref qpl_path_hardware (`Hardware Path`) must be specified.
 * If there is no difference where calculations should be done, @ref qpl_path_auto (`Auto Path`) can be used to allow
 * the library to chose the path to execute. The Auto Path usage is not demonstrated by this example.
 *
 * @warning ---! Important !---
 * `Hardware Path` doesn't support all features declared for `Software Path`
 *
 */
constexpr const uint32_t source_size = 1000;

auto main(int argc, char** argv) -> int {
    std::cout << "Intel(R) Query Processing Library version is " << qpl_get_library_version() << ".\n";

    // Default to Software Path
    qpl_path_t execution_path = qpl_path_software;

    // Get path from input argument
    const int parse_ret = parse_execution_path(argc, argv, &execution_path);
    if (parse_ret != 0) { return 1; }

    // Get compression buffer size estimate
    const uint32_t compression_size = qpl_get_safe_deflate_compression_buffer_size(source_size);
    if (compression_size == 0) {
        std::cout << "Invalid source size. Source size exceeds the maximum supported size.\n";
        return 1;
    }

    // Source and output containers
    std::vector<uint8_t> source(source_size, 5);
    std::vector<uint8_t> destination(compression_size, 4);
    std::vector<uint8_t> reference(source_size, 7);

    std::unique_ptr<uint8_t[]> job_buffer;
    uint32_t                   size = 0;

    // Job initialization
    qpl_status status = qpl_get_job_size(execution_path, &size);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during job size getting.\n";
        return 1;
    }

    job_buffer   = std::make_unique<uint8_t[]>(size);
    qpl_job* job = reinterpret_cast<qpl_job*>(job_buffer.get());
    status       = qpl_init_job(execution_path, job);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during compression job initializing.\n";
        return 1;
    }

    // Performing a compression operation
    job->op            = qpl_op_compress;
    job->level         = qpl_default_level;
    job->next_in_ptr   = source.data();
    job->next_out_ptr  = destination.data();
    job->available_in  = source_size;
    job->available_out = static_cast<uint32_t>(destination.size());
    job->flags         = QPL_FLAG_FIRST | QPL_FLAG_LAST | QPL_FLAG_DYNAMIC_HUFFMAN | QPL_FLAG_OMIT_VERIFY;

    // Compression
    status = qpl_execute_job(job);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during compression.\n";
        return 1;
    }

    const uint32_t compressed_size = job->total_out;

    // First, we submit decompression operation with insufficient size,
    // in order to get a QPL_STS_MORE_OUTPUT_NEEDED error.
    const uint32_t insufficient_decompression_size = source_size / 2;

    job->op            = qpl_op_decompress;
    job->next_in_ptr   = destination.data();
    job->next_out_ptr  = reference.data();
    job->available_in  = compressed_size;
    job->available_out = insufficient_decompression_size;
    job->flags         = QPL_FLAG_FIRST | QPL_FLAG_LAST;

    // Decompression
    status = qpl_execute_job(job);
    if (status != QPL_STS_OK) {
        if (status == QPL_STS_MORE_OUTPUT_NEEDED) {
            // Need to unset QPL_FLAG_FIRST, since it is a re-submission.
            job->flags &= ~QPL_FLAG_FIRST;

            // Library returns job->next_in_ptr and job->available_in
            // updated for re-submission, it is required to reset/update
            // job->next_out_ptr and job->available_out on application side.
            //
            // In this example, since reference size is in fact enough,
            // we're going to use contiguous memory with a correct offset.
            job->next_out_ptr  = reference.data() + job->total_out;
            job->available_out = static_cast<uint32_t>(reference.size()) - job->total_out;

            status = qpl_execute_job(job);

            if (status != QPL_STS_OK) {
                std::cout << "An error " << status << " acquired during decompression re-submission.\n";
                return 1;
            }
        } else {
            std::cout << "An unexpected error " << status << " acquired during decompression submission.\n";
            return 1;
        }
    }

    // Freeing resources
    status = qpl_fini_job(job);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during job finalization.\n";
        return 1;
    }

    // Compare reference functions
    for (size_t i = 0; i < source.size(); i++) {
        if (source[i] != reference[i]) {
            std::cout << "Content wasn't successfully compressed and decompressed.\n";
            return 1;
        }
    }

    std::cout << "Content was successfully compressed and decompressed.\n";
    std::cout << "Compressed size: " << compressed_size << '\n';

    return 0;
}

Serialization#

/*******************************************************************************
 * Copyright (C) 2022 Intel Corporation
 *
 * SPDX-License-Identifier: MIT
 ******************************************************************************/

//* [QPL_LOW_LEVEL_SERIALIZATION_EXAMPLE] */

#include <cstdint>
#include <iostream>
#include <memory>
#include <vector>

#include "qpl/qpl.h"

#include "examples_utils.hpp" // for argument parsing function

/**
 * @brief This example requires a command line argument to set the execution path. Valid values are `software_path`
 * and `hardware_path`.
 * In QPL, @ref qpl_path_software (`Software Path`) means that computations will be done with CPU.
 * Accelerator can be used instead of CPU. In this case, @ref qpl_path_hardware (`Hardware Path`) must be specified.
 * If there is no difference where calculations should be done, @ref qpl_path_auto (`Auto Path`) can be used to allow
 * the library to chose the path to execute. The Auto Path usage is not demonstrated by this example.
 *
 * @warning ---! Important !---
 * `Hardware Path` doesn't support all features declared for `Software Path`
 *
 */
constexpr const uint32_t source_size = 1000;

auto main(int argc, char** argv) -> int {
    std::cout << "Intel(R) Query Processing Library version is " << qpl_get_library_version() << ".\n";

    // Default to Software Path
    qpl_path_t execution_path = qpl_path_software;

    // Get path from input argument
    const int parse_ret = parse_execution_path(argc, argv, &execution_path);
    if (parse_ret != 0) { return 1; }

    std::vector<uint8_t> source(source_size, 5);

    qpl_status status = QPL_STS_OK;

    // Memory allocation for Huffman table
    qpl_huffman_table_t huffman_table = nullptr;

    status = qpl_deflate_huffman_table_create(combined_table_type, execution_path, DEFAULT_ALLOCATOR_C, &huffman_table);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during Huffman table creation.\n";
        return 1;
    }

    // Creation of deflate histogram
    qpl_histogram deflate_histogram {};

    status = qpl_gather_deflate_statistics(source.data(), source_size, &deflate_histogram, qpl_default_level,
                                           execution_path);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during gathering statistics for Huffman table.\n";
        qpl_huffman_table_destroy(huffman_table);
        return 1;
    }

    // Initialization of Huffman table with deflate histogram
    status = qpl_huffman_table_init_with_histogram(huffman_table, &deflate_histogram);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during Huffman table initialization.\n";
        qpl_huffman_table_destroy(huffman_table);
        return 1;
    }

    size_t serialized_size = 0U;

    // Getting size of a buffer to store serialized table and allocating memory for it
    status = qpl_huffman_table_get_serialized_size(huffman_table, DEFAULT_SERIALIZATION_OPTIONS, &serialized_size);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during getting serialized size of Huffman table.\n";
        qpl_huffman_table_destroy(huffman_table);
        return 1;
    }

    const std::unique_ptr<uint8_t[]> unique_buffer = std::make_unique<uint8_t[]>(serialized_size);
    uint8_t*                         buffer        = reinterpret_cast<uint8_t*>(unique_buffer.get());

    // Serialization of a table
    status = qpl_huffman_table_serialize(huffman_table, buffer, serialized_size, DEFAULT_SERIALIZATION_OPTIONS);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during serializing Huffman table.\n";
        qpl_huffman_table_destroy(huffman_table);
        return 1;
    }

    // Deserialization of a table
    qpl_huffman_table_t other_huffman_table = nullptr;
    status = qpl_huffman_table_deserialize(buffer, serialized_size, DEFAULT_ALLOCATOR_C, &other_huffman_table);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during deserializing Huffman table.\n";
        qpl_huffman_table_destroy(huffman_table);
        return 1;
    }

    // Freeing resources
    status = qpl_huffman_table_destroy(huffman_table);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during destroying Huffman table.\n";
        return 1;
    }

    status = qpl_huffman_table_destroy(other_huffman_table);
    if (status != QPL_STS_OK) {
        std::cout << "An error " << status << " acquired during destroying Huffman table.\n";
        return 1;
    }

    std::cout << "Huffman table was successfully serialized and deserialized.\n";
    std::cout << "Serialized size: " << serialized_size << '\n';

    return 0;
}

//* [QPL_LOW_LEVEL_SERIALIZATION_EXAMPLE] */