Skip to content

ONNX runtime will coredump in qt6 with version from 1.21.0 to 1.23.2 #26861

@mathnovel

Description

@mathnovel

Describe the issue

env

platform OS: Windows 11 ,version :22631.6060
qt version:qt6,c++17

detail

I use the ONNX Runtime library to load BRIA RMBG-2.0 for image matting,The program will crash.
Using the same code,i tested ONNX runtime libs from 1.19 to 1.23.2,the viersion 1.20 and 1.19 are ok,other version will crash.
I searched the issues,didn't find the reason.
At the same time ,it is ok using ONNX 1.23.2 in python(onnx is installed by pip,i confirmed its version by printing out the ONNX version).
The attached image shows the stack trace from the crash when running with the debug mode using version 1.23.2 of the library; I hope it provides useful information.

Image

To reproduce

the implement code

#include "BriaRMBGInfer.h"
#include <onnxruntime_cxx_api.h>
#include <opencv2/opencv.hpp>
#include <iostream>
#include <memory>
#include <chrono>
#define NOMINMAX

#include <windows.h>

//  std::string (UTF-8) → std::wstring
std::wstring utf8_to_wstring(const std::string& utf8_str) {
    if (utf8_str.empty()) return std::wstring();
    int wlen = MultiByteToWideChar(CP_UTF8, 0, utf8_str.data(), (int)utf8_str.size(), nullptr, 0);
    std::wstring wstr(wlen, 0);
    MultiByteToWideChar(CP_UTF8, 0, utf8_str.data(), (int)utf8_str.size(), &wstr[0], wlen);
    return wstr;
}


class BriaRMBGInferImpl {
public:
    Ort::Env env;
    Ort::Session session;

    BriaRMBGInferImpl(const std::string& model_path)
        //: env(ORT_LOGGING_LEVEL_WARNING, "BRIA-RMBG"),
        : env(ORT_LOGGING_LEVEL_FATAL, "BRIA-RMBG"),
        session(nullptr) {
        Ort::SessionOptions session_options;
        session_options.SetIntraOpNumThreads(8);
        session_options.SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_ALL);

        std::wstring str = utf8_to_wstring(model_path);
        session = Ort::Session(env, str.c_str(), session_options);

 
        auto input_count = session.GetInputCount();
        auto output_count = session.GetOutputCount();
        if (input_count != 1 || output_count != 1) {
            throw std::runtime_error("dismatch");
        }
    }
};

 
static std::unique_ptr<BriaRMBGInferImpl> g_impl = nullptr;

BriaRMBGInfer::BriaRMBGInfer(const std::string& model_path) {
    std::string version = Ort::GetVersionString();
    std::cout << "ONNX Runtime Version: " << version << std::endl;
    g_impl = std::make_unique<BriaRMBGInferImpl>(model_path);
}

BriaRMBGInfer::~BriaRMBGInfer() {
    g_impl.reset();
}

bool BriaRMBGInfer::processImage(const std::string& inputImagePath, const std::string& outputImagePath) {
    try {
        auto model_start = std::chrono::system_clock::now();
     
        cv::Mat src = cv::imread(inputImagePath);
        if (src.empty()) {
            std::cerr << "read fail: " << inputImagePath << std::endl;
            return false;
        }

        cv::Mat orig = src.clone();
        int orig_h = src.rows;
        int orig_w = src.cols;

 
        cv::cvtColor(src, src, cv::COLOR_BGR2RGB);
        cv::resize(src, src, cv::Size(inputWidth_, inputHeight_));
        src.convertTo(src, CV_32F, 1.0f / 255.0f); // [0,1]

  
        std::vector<float> input_tensor_values(1 * 3 * inputHeight_ * inputWidth_);
        float* data = reinterpret_cast<float*>(src.data);
        for (int c = 0; c < 3; ++c) {
            for (int i = 0; i < inputHeight_ * inputWidth_; ++i) {
                input_tensor_values[c * inputHeight_ * inputWidth_ + i] = data[i * 3 + c];
            }
        }

     
        std::vector<int64_t> input_shape = {1, 3, inputHeight_, inputWidth_};
        Ort::MemoryInfo memory_info{"Cpu", OrtDeviceAllocator, 0, OrtMemTypeDefault};
        Ort::Value input_tensor = Ort::Value::CreateTensor<float>(
            memory_info, input_tensor_values.data(), input_tensor_values.size(),
            input_shape.data(), input_shape.size()
            );

 
        const char* input_names[] = {"pixel_values"};
        const char* output_names[] = {"alphas"};

        auto start = std::chrono::high_resolution_clock::now();
        auto output_tensors = g_impl->session.Run(
            Ort::RunOptions{nullptr},
            input_names, &input_tensor, 1,
            output_names, 1
            );
        auto end = std::chrono::high_resolution_clock::now();
        auto ms = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
        std::cout << "Inference time: " << ms << " ms\n";
 
 
        auto output_tensor = output_tensors[0].GetTensorMutableData<float>();
        cv::Mat alpha_mat(inputHeight_, inputWidth_, CV_32F, output_tensor);
 
        alpha_mat = alpha_mat.clone();  

 
        cv::Mat alpha_orig;
        cv::resize(alpha_mat, alpha_orig, cv::Size(orig_w, orig_h), 0, 0, cv::INTER_LINEAR);



 
        cv::Mat alpha_uint8;
        alpha_orig.convertTo(alpha_uint8, CV_8U, 255.0);

        auto model_finish = std::chrono::system_clock::now();
         auto elapsed_time = std::chrono::duration_cast<std::chrono::milliseconds>(model_finish.time_since_epoch())
            -std::chrono::duration_cast<std::chrono::milliseconds>(model_start.time_since_epoch());
        std::cerr<<"model use time = "<<elapsed_time.count()<<std::endl;

 

        int save_type = 1; 
        if (0 == save_type)
        {
            std::cerr <<"save transent img"<<std::endl;
 
            cv::cvtColor(orig, orig, cv::COLOR_BGR2BGRA); 
            std::vector<cv::Mat> channels;
            cv::split(orig, channels);
            channels[3] = alpha_uint8;  
            cv::merge(channels, orig); 

       
            bool saved = cv::imwrite(outputImagePath, orig);
            if (!saved) {
                std::cerr << "save fail: " << outputImagePath << std::endl;
                return false;
            }
            std::cout << "save ok: " << outputImagePath << std::endl;
            return true;
        } else
        {
         
            cv::Mat orig_bgra;
            cv::cvtColor(orig, orig_bgra, cv::COLOR_BGR2BGRA);  

        
            std::vector<cv::Mat> channels;
            cv::split(orig_bgra, channels);  

   
            channels[3] = alpha_uint8;

 
            cv::Scalar bgColor = cv::Scalar(200, 200, 0, 255);  
            cv::Mat bg_mat(orig_bgra.size(), CV_8UC4, bgColor);

 
            for(int y = 0; y < orig_bgra.rows; ++y) {
                for(int x = 0; x < orig_bgra.cols; ++x) {
                    float alpha = channels[3].at<uchar>(y, x) / 255.0f;  
                    for(int c = 0; c < 3; ++c) {  
       
                        bg_mat.at<cv::Vec4b>(y, x)[c] =
                            channels[c].at<uchar>(y, x) * alpha +
                            bg_mat.at<cv::Vec4b>(y, x)[c] * (1.0f - alpha);
                    }
          
                    if(0.9< alpha || 0.2> alpha) 
                    {
                        bg_mat.at<cv::Vec4b>(y, x)[3] = 255;  
                    }
                    //bg_mat.at<cv::Vec4b>(y, x)[3] = 255;  
                }
            }

     
            cv::merge(channels, orig_bgra);

          
            bool saved = cv::imwrite(outputImagePath, bg_mat); 
            if (!saved) {
                std::cerr << "save failed " << std::endl;
                return false;
            }
            std::cout << "save ok: "<<outputImagePath << std::endl;
        }
         auto save_finish = std::chrono::system_clock::now();
        elapsed_time = std::chrono::duration_cast<std::chrono::milliseconds>(save_finish.time_since_epoch())
                       -std::chrono::duration_cast<std::chrono::milliseconds>(model_finish.time_since_epoch());
        std::cerr<<" save file use time = "<<elapsed_time.count()<<std::endl;


    } catch (const std::exception& e) {
        std::cerr << "fatal error: " << e.what() << std::endl;
        return false;
    }
}

Urgency

No response

Platform

Windows

OS Version

windows 11 23H2

ONNX Runtime Installation

Released Package

ONNX Runtime Version or Commit ID

1.23.2

ONNX Runtime API

C++

Architecture

X64

Execution Provider

Default CPU

Execution Provider Library Version

No response

Metadata

Metadata

Assignees

No one assigned

    Labels

    No labels
    No labels

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions