首页 > 编程语言 > 详细

[]how to use caffe model with TensorRT c++

时间:2019-05-27 17:33:04      阅读:283      评论:0      收藏:0      [点我收藏+]

 

 

 

//IHostMemory *gieModelStream {nullptr};
    //const char* prototxt = "./googlenet/test_20181010.prototxt";//argv[1];
    //const char* caffemodel = "./googlenet/lane_area_lx1890_iter_320000_20181010.caffemodel";//argv[2];
    //std::vector<std::string> output_blobnames;
    //output_blobnames.push_back(OUTPUT_BLOB_NAME_1);
    //output_blobnames.push_back(OUTPUT_BLOB_NAME_2);
    //caffeToGIEModel(prototxt, caffemodel, output_blobnames, 1, &plugin_factory, gieModelStream, true);
    //caffeToGIEModel_serialize(prototxt, caffemodel, output_blobnames, 1, &plugin_factory, gieModelStream, true, s);
    plugin_factory.destroyPlugin();
    //std::vector<std::string>().swap(output_blobnames);
    // deserialize the engine
    IRuntime* runtime = createInferRuntime(gLogger);
    //ICudaEngine* engine = runtime->deserializeCudaEngine(gieModelStream->data(), gieModelStream->size(), &plugin_factory);
    ICudaEngine* engine = runtime->deserializeCudaEngine(ss.data(), ss.size(), &plugin_factory);
    //if (gieModelStream)
        //gieModelStream->destroy();
    IExecutionContext *context = engine->createExecutionContext();

 

[]how to use caffe model with TensorRT c++

原文:https://www.cnblogs.com/happyamyhope/p/10931449.html

(0)
(0)
   
举报
评论 一句话评论(0
关于我们 - 联系我们 - 留言反馈 - 联系我们:wmxa8@hotmail.com
© 2014 bubuko.com 版权所有
打开技术之扣,分享程序人生!