Encountering an NVBuffer error when sending frames to the AppSrc element

Hello,

In a system I’m trying to build with Jetson Nano and an IMX219 camera, I’m encountering an issue where I process the frames captured from the camera using GStreamer and OpenCV. Initially, I direct the captured frame to OpenCV for processing in the on_new_sample function. Then, I convert the processed frame back to a GStreamer buffer within another function (displayFrame) and send it to appsrc to display it on the screen. However, I’m encountering the error “nvbuf_utils: nvbuffer Payload Type not supported gst_nvvconv_transform: NvBufferGetParams Failed” when sending the frame to appsrc. How can I resolve this issue?

Thank you.

It is my code;

/*
    capsfilter1 = gst_element_factory_make("capsfilter", "cfilter1");
    tee0 = gst_element_factory_make("tee", "tee0");
    tee1 = gst_element_factory_make("tee", "tee1");
    q0 = gst_element_factory_make("queue", "q0");
    q1 = gst_element_factory_make("queue", "q1");
    q2 = gst_element_factory_make("queue", "q2");
    q3 = gst_element_factory_make("queue", "q3");
    compositor = gst_element_factory_make("compositor", "compositor0");
    GstElement *testsource = gst_element_factory_make("videotestsrc", "src_test");
    GstElement *videorate = gst_element_factory_make("videorate", "vrt");
*/

#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <gst/gst.h>
#include <gst/video/videooverlay.h>
#include <gst/app/gstappsink.h>
#include <gst/app/gstappsrc.h>
#include <gdk/gdkx.h>
#include <gtk/gtk.h>
#include <thread>
#include <mutex>
#include <X11/Xlib.h>
#include <iostream>
// Gerekli diğer başlıkları buraya ekleyin
static int frame_counter = 0;
static int frameCounter = 0;
cv::Mat next_frame = cv::Mat::zeros(1280, 720, CV_8UC3);

std::mutex frameMutex;
GMainLoop *loop = nullptr;
GstFlowReturn ret;

GstElement *pipeline, *source, *converter0, *sink0, *capsfilter0, *sink1, *pipelineFromOpenCV, *sourceFromOpenCV, *converter1, *capsfilter1;

void displayFrame();

static gboolean bus_call(GstBus *bus, GstMessage *msg, gpointer data)
{
    switch (GST_MESSAGE_TYPE(msg))
    {
    case GST_MESSAGE_EOS:
        g_print("End of stream\n");
        g_main_loop_quit(loop);
        break;
    case GST_MESSAGE_ERROR:
    {
        gchar *debug;
        GError *error;

        gst_message_parse_error(msg, &error, &debug);
        g_free(debug);

        g_printerr("Error: %s\n", error->message);
        g_error_free(error);

        g_main_loop_quit(loop);
        break;
    }
    default:
        break;
    }

    return TRUE;
}

static GstFlowReturn on_new_sample(GstElement *sink, gpointer user_data)
{
    frame_counter++; // Her çağrıldığında sayaç değerini artır
    g_print("frame_counter : %d\n", frame_counter);
    GstSample *sample = gst_app_sink_pull_sample(GST_APP_SINK(sink));
    if (sample)
    {

        GstCaps *caps = gst_sample_get_caps(sample);
        GstStructure *structure = gst_caps_get_structure(caps, 0);
        gint width, height;
        gst_structure_get_int(structure, "width", &width);
        gst_structure_get_int(structure, "height", &height);
        GstBuffer *buffer = gst_sample_get_buffer(sample);
        GstMapInfo map;
        gst_buffer_map(buffer, &map, GST_MAP_READ);

        cv::Mat current_frame = cv::Mat(height, width, CV_8UC3, map.data);
        // cv::cvtColor(cv::Mat(height + height / 2, width, CV_8UC1, map.data), current_frame, cv::COLOR_YUV2BGR_NV12);

        // std::cout << "frame1 rows: " << current_frame.rows << ", cols: " << current_frame.cols << std::endl;
        g_print("Frame1 rows: %d, cols: %d\n", current_frame.rows, current_frame.cols);
        std::cout << "map.data: " << static_cast<void *>(map.data) << std::endl;

        if (!current_frame.empty())
        {
            // Klonlama işlemi yerine doğrudan atama
            frameMutex.lock();
            current_frame.copyTo(next_frame);
            frameMutex.unlock();
            // std::cout << "frame2 rows: " << next_frame.rows << ", cols: " << next_frame.cols << std::endl;
            g_print("Frame2 rows %d, cols: %d\n", next_frame.rows, next_frame.cols);
            if (next_frame.data == nullptr)
            {
                // std::cerr << "Error: next_frame.data is nullptr after cloning." << std::endl;
                g_printerr("Error: next_frame.data is nullptr after cloning.");
            }

            cv::Point pt1(100, 100);
            cv::Point pt2(300, 300);

            cv::Scalar color(0, 255, 0); // Yeşil

            cv::rectangle(next_frame, pt1, pt2, color, 2);
            g_print("Frame Received - Total Frames: %d\n", frame_counter);


            std::thread displayThread(displayFrame);            
            displayThread.detach();
        }

        gst_buffer_unmap(buffer, &map);
        gst_sample_unref(sample);
    }

    return GST_FLOW_OK;
}

void displayFrame(cv::Mat frame)
{

    g_print("Display İnsideee!!! \n");
    cv::Mat readyFrame = cv::Mat::zeros(1280, 720, CV_8UC3);
    cv::Mat nv12_frame;
    frameCounter++;

    if (!next_frame.empty())
    {
        g_print("Here: \n");
        frameMutex.lock();
        next_frame.copyTo(readyFrame);
        frameMutex.unlock();
        g_print("Coming Frame: %d\n", frameCounter);
       

        GstBuffer *buffer = gst_buffer_new_wrapped_full(
            GST_MEMORY_FLAG_READONLY,
            readyFrame.data,
            readyFrame.total() * readyFrame.elemSize(),
            0,
            readyFrame.total() * readyFrame.elemSize(),
            nullptr,
            nullptr);

        g_signal_emit_by_name(sourceFromOpenCV, "push-buffer", buffer, &ret);

        gst_buffer_unref(buffer);
    }

}

int main(int argc, char *argv[])
{

    gst_init(&argc, &argv);

    pipeline = gst_element_factory_make("pipeline", "pipeline0");
    pipelineFromOpenCV = gst_element_factory_make("pipeline", "pipeline1");
    sourceFromOpenCV = gst_element_factory_make("appsrc", "src1");

    source = gst_element_factory_make("nvarguscamerasrc", "src");
    sink0 = gst_element_factory_make("autovideosink", "sink0");//autovideosink ximagesink nvoverlaysink
    sink1 = gst_element_factory_make("appsink", "sink1");
    converter0 = gst_element_factory_make("nvvideoconvert", "conv0"); //nvvidconv videoconvert  nvvideoconvert
    converter1 = gst_element_factory_make("nvvidconv", "conv1");
    capsfilter0 = gst_element_factory_make("capsfilter", "cfilter0");
    capsfilter1 = gst_element_factory_make("capsfilter", "cfilter1");

    gst_app_src_set_emit_signals(GST_APP_SRC(sourceFromOpenCV), true);

    g_object_set(G_OBJECT(capsfilter0),
                 "caps",
                 gst_caps_from_string("video/x-raw(memory:NVMM), width=1280, height=720, format=NV12, framerate=30/1"),
                 nullptr);

//    g_object_set(G_OBJECT(capsfilter1),
//                 "caps",
//                 gst_caps_from_string("video/x-raw(memory:NVMM), width=1280, height=720, format=NV12, framerate=30/1"),
//                 nullptr);

    g_object_set(G_OBJECT(source), "sensor-id", 0, "do-timestamp", TRUE, NULL);
    g_object_set(G_OBJECT(sink1), "async", FALSE);

    g_object_set(sink1, "emit-signals", TRUE, "sync", false, "max-buffers", 30, "drop", true, nullptr);
    g_signal_connect(sink1, "new-sample", G_CALLBACK(on_new_sample), nullptr);


    g_signal_connect(sourceFromOpenCV, "need-data", G_CALLBACK(displayFrame), NULL);



    if (!pipeline || !source || !capsfilter0 || !converter0 || !sink1)
    {
        g_printerr("Not all elements could be created. Exiting.\n");
        return -1;
    }

    if (!pipelineFromOpenCV || !sourceFromOpenCV || !converter1 || !sink0)
    {
        g_printerr("Not all elements could be created. Exiting1.\n");
        return -1;
    }


    gst_bin_add_many(GST_BIN(pipeline), source, capsfilter0, sink1, converter0, NULL);
    gst_bin_add_many(GST_BIN(pipelineFromOpenCV), sourceFromOpenCV, converter1, sink0, NULL);


    if (!gst_element_link_many(source, capsfilter0, converter0, sink1, NULL))
    {
        g_printerr("Link problem0.\n");
        return -1;
    }

    if (!gst_element_link_many(sourceFromOpenCV, converter1, sink0, NULL))
    {
        g_printerr("Link problem1.\n");
        return -1;
    }

    GstBus *bus = gst_pipeline_get_bus(GST_PIPELINE(pipeline));
    gst_bus_add_watch(bus, (GstBusFunc)bus_call, NULL);
    gst_object_unref(bus);

    bus = gst_pipeline_get_bus(GST_PIPELINE(pipelineFromOpenCV));
    gst_bus_add_watch(bus, (GstBusFunc)bus_call, NULL);
    gst_object_unref(bus);

    int cnt = 0;
    GstStateChangeReturn ret;

    do
    {
        ret = gst_element_set_state(pipeline, GST_STATE_PLAYING);
        if (ret == GST_STATE_CHANGE_FAILURE)
        {
            g_printerr("Unable to set the pipeline to the playing state. Exiting.\n");
            gst_object_unref(pipeline);
            return -1;
        }

    } while (ret != GST_STATE_CHANGE_SUCCESS);

    do
    {
        ret = gst_element_set_state(pipelineFromOpenCV, GST_STATE_PLAYING);
        if (ret == GST_STATE_CHANGE_FAILURE)
        {
            g_printerr("Unable to set the pipeline to the playing state. Exiting1.\n");
            gst_object_unref(pipelineFromOpenCV);
            return -1;
        }

    } while (ret != GST_STATE_CHANGE_SUCCESS);

    loop = g_main_loop_new(NULL, FALSE);
    g_main_loop_run(loop);             

    gst_element_set_state(pipeline, GST_STATE_NULL);
    gst_object_unref(pipeline);

    gst_element_set_state(sourceFromOpenCV, GST_STATE_NULL);
    gst_object_unref(sourceFromOpenCV);

    g_main_loop_unref(loop);

    return 0;
}

And It is gst_debug log;
debug_output.txt (5.6 MB)

Hi,
Please use only nvvidconv plugin. The nvvideoconvert plugin is used in DeepStream SDK. And please use latest Jetpack 4.6.4

There are some samples and please give it a try:
Gstreamer (python) with appsrc need-data is called more times than FPS is set - #6 by DaneLLL
Displaying to the screen with OpenCV and GStreamer - #9 by DaneLLL

Thank you, I will examine and try the examples you provided. I’m not sure if you’ve looked into it, but if you’ve had a chance to glance at my code, is there any noticeable deficiency or structural flaw that caught your eye? If you’ve looked into it, could you please let me know?

As my second question, how reasonable would it be to progress with GPU-based operations in OpenCV? Would it benefit me in any way? I’ve been using gstreamer to avoid FPS issues. If I directly try GPU usage in OpenCV, would I encounter FPS problems or how efficiently would my system operate?

And my final question is, how can I update the JetPack version on my Jetson Nano Developer Kit from 4.6 to 4.6.4? Can I handle this process without experiencing any data loss?

Thank you for sharing your insights with me.

Hi,
We would suggest use VPI and NvBufSurface APIs to get optimal performance. You may check
VPI - Vision Programming Interface: Main Page
Jetson Linux API Reference: Main Page | NVIDIA Docs

Samples are in

/opt/nvidia/vpi2/
/usr/src/jetson_multimedia_api

The gstreamer plugins like nvarguscamerasrc, nvvidconv are open source. These are implemented based on NvBufSurface APIs. If you would like to use gstreamer, can also customize the plugins to run your use-case.

Hello,

Thank you. I will try to use them. While waiting for your response, I didn’t stand idle either. I tried to add CUDA usage to my code, but I’m getting the following error when compiling my code.

main.cpp: In function ‘void cudaProcessFrame(const cv::Mat&, cv::Mat&)’:
main.cpp:135:19: error: expected primary-expression before ‘<’ token
     invertColors<<<grid, block>>>(d_input, d_output, input.cols, input.rows, input.channels());
                   ^
main.cpp:135:33: error: expected primary-expression before ‘>’ token
     invertColors<<<grid, block>>>(d_input, d_output, input.cols, input.rows, input.channels());

It is my main code;


#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <gst/gst.h>
#include <gst/video/videooverlay.h>
#include <gst/app/gstappsink.h>
#include <gst/app/gstappsrc.h>

#include <thread>
#include <mutex>
#include <X11/Xlib.h>
#include <iostream>

#include <cuda.h>
#include <cuda_runtime.h>
#include <cudaEGL.h>
#include <cudnn.h>


//#include "cuda_file.cu"

cv::Mat next_frame = cv::Mat::zeros(1280, 720, CV_8UC3);
std::mutex frameMutex;
GMainLoop *loop = nullptr;

GstElement *pipeline, *source, *appsrc, *sink;
GstFlowReturn ret;

void displayFrame();
void cudaProcessFrame(const cv::Mat &input, cv::Mat &output);

extern "C" void invertColors(unsigned char *input, unsigned char *output, int width, int height, int channels);

static gboolean bus_call(GstBus *bus, GstMessage *msg, gpointer data)
{
    switch (GST_MESSAGE_TYPE(msg))
    {
    case GST_MESSAGE_EOS:
        g_print("End of stream\n");
        g_main_loop_quit(loop);
        break;
    case GST_MESSAGE_ERROR:
    {
        gchar *debug;
        GError *error;

        gst_message_parse_error(msg, &error, &debug);
        g_free(debug);

        g_printerr("Error: %s\n", error->message);
        g_error_free(error);

        g_main_loop_quit(loop);
        break;
    }
    default:
        break;
    }

    return TRUE;
}

static GstFlowReturn on_new_sample(GstElement *sink, gpointer user_data)
{
    GstSample *sample = gst_app_sink_pull_sample(GST_APP_SINK(sink));
    if (sample)
    {
        GstCaps *caps = gst_sample_get_caps(sample);
        GstStructure *structure = gst_caps_get_structure(caps, 0);
        gint width, height;
        gst_structure_get_int(structure, "width", &width);
        gst_structure_get_int(structure, "height", &height);
        GstBuffer *buffer = gst_sample_get_buffer(sample);
        GstMapInfo map;
        gst_buffer_map(buffer, &map, GST_MAP_READ);

        cv::Mat current_frame = cv::Mat(height, width, CV_8UC3, map.data);
        if (!current_frame.empty())
        {
            frameMutex.lock();
            current_frame.copyTo(next_frame);
            frameMutex.unlock();

            // Process the frame with CUDA here
            cv::Mat processed_frame(height, width, CV_8UC3);
            cudaProcessFrame(current_frame, processed_frame);

            // Display the processed frame
            std::thread displayThread(displayFrame);
            displayThread.detach();
        }

        gst_buffer_unmap(buffer, &map);
        gst_sample_unref(sample);
    }

    return GST_FLOW_OK;
}

void displayFrame()
{
    cv::Mat readyFrame;
    frameMutex.lock();
    next_frame.copyTo(readyFrame);
    frameMutex.unlock();

    if (!readyFrame.empty())
    {
        GstBuffer *buffer = gst_buffer_new_allocate(NULL, readyFrame.total() * readyFrame.elemSize(), NULL);
        gst_buffer_fill(buffer, 0, readyFrame.data, readyFrame.total() * readyFrame.elemSize());

        GstFlowReturn ret;
        g_signal_emit_by_name(appsrc, "push-buffer", buffer, &ret);
        gst_buffer_unref(buffer);
    }
}

void cudaProcessFrame(const cv::Mat &input, cv::Mat &output)
{
    // Allocate memory on GPU
    unsigned char *d_input, *d_output;
    size_t bytes = input.step * input.rows;
    cudaMalloc(&d_input, bytes);
    cudaMalloc(&d_output, bytes);

    // Copy input data from CPU to GPU
    cudaMemcpy(d_input, input.data, bytes, cudaMemcpyHostToDevice);

    // Calculate block and grid dimensions
    dim3 block(32, 32);
    dim3 grid((input.cols + block.x - 1) / block.x, (input.rows + block.y - 1) / block.y);

    // Run kernel
    invertColors<<<grid, block>>>(d_input, d_output, input.cols, input.rows, input.channels());

    // Copy output data from GPU to CPU
    cudaMemcpy(output.data, d_output, bytes, cudaMemcpyDeviceToHost);

    // Free GPU memory
    cudaFree(d_input);
    cudaFree(d_output);
}

int main(int argc, char *argv[])
{
    gst_init(&argc, &argv);

    pipeline = gst_pipeline_new("pipeline");
    source = gst_element_factory_make("nvarguscamerasrc", "src");
    appsrc = gst_element_factory_make("appsrc", "appsrc");
    sink = gst_element_factory_make("autovideosink", "sink");

    GstCaps *caps = gst_caps_from_string("video/x-raw, format=BGR");
    gst_app_src_set_caps(GST_APP_SRC(appsrc), caps);
    gst_caps_unref(caps);

    GstElement *conv = gst_element_factory_make("videoconvert", "conv");

    g_object_set(G_OBJECT(source), "sensor-id", 0, "do-timestamp", TRUE, "max-resolution", "1920x1080", "max-fps", 60, NULL);

    if (!pipeline || !source || !appsrc || !sink || !conv)
    {
        g_printerr("Not all elements could be created. Exiting.\n");
        return -1;
    }

    gst_bin_add_many(GST_BIN(pipeline), source, appsrc, conv, sink, NULL);

    if (!gst_element_link_many(source, appsrc, conv, sink, NULL))
    {
        g_printerr("Elements could not be linked.\n");
        gst_object_unref(pipeline);
        return -1;
    }

    GstBus *bus = gst_pipeline_get_bus(GST_PIPELINE(pipeline));
    gst_bus_add_watch(bus, (GstBusFunc)bus_call, NULL);
    gst_object_unref(bus);

    int cnt = 0;
    GstStateChangeReturn ret;

    do
    {
        ret = gst_element_set_state(pipeline, GST_STATE_PLAYING);
        if (ret == GST_STATE_CHANGE_FAILURE)
        {
            g_printerr("Unable to set the pipeline to the playing state. Exiting.\n");
            gst_object_unref(pipeline);
            return -1;
        }
    } while (ret != GST_STATE_CHANGE_SUCCESS);

    loop = g_main_loop_new(NULL, FALSE);
    g_main_loop_run(loop);

    gst_element_set_state(pipeline, GST_STATE_NULL);
    gst_object_unref(pipeline);
    g_main_loop_unref(loop);

    return 0;
}

And it is my .cu file:

// invertColors.cu

#include <cuda_runtime.h>

__global__ void invertColors(unsigned char *input, unsigned char *output, int width, int height, int channels)
{
    int x = blockIdx.x * blockDim.x + threadIdx.x;
    int y = blockIdx.y * blockDim.y + threadIdx.y;

    if (x < width && y < height)
    {
        int index = (y * width + x) * channels;
        for (int c = 0; c < channels; ++c)
        {
            output[index + c] = 255 - input[index + c];
        }
    }
}

When I researched, I applied the solutions I found, but I couldn’t resolve the compilation error. I used both CMake and the nvcc command below. I received the same error I provided above in both cases. Do you have any recommendations for this?

nvcc main.cpp cuda_file.cu -o my_program     -I/usr/local/include/opencv4     -I/usr/include/gstreamer-1.0 -I/usr/i
nclude/glib-2.0 -I/usr/lib/aarch64-linux-gnu/glib-2.0/include     -L/usr/local/lib -lopencv_core -lopencv_imgproc -lopencv_highgui     -L/usr/lib/aarch64-linux-gnu -
lgstapp-1.0 -lgstvideo-1.0     -L/usr/local/cuda/lib64 -lcudart -lcuda -lcudnn

Thank you for everything :)

There is no update from you for a period, assuming this is not an issue any more.
Hence we are closing this topic. If need further support, please open a new one.
Thanks

Sorry for the late response.
Is this still an issue to support? Any result can be shared?