pointnet C++推理部署--tensorrt框架

这篇具有很好参考价值的文章主要介绍了pointnet C++推理部署--tensorrt框架。希望对大家有所帮助。如果存在错误或未考虑完全的地方,请大家不吝赐教,您也可以点击"举报违法"按钮提交疑问。

classification

python推理:

import numpy as np
import tensorrt as trt
import pycuda.autoinit 
import pycuda.driver as cuda  


point_num = 1024

 
def pc_normalize(pc):
    centroid = np.mean(pc, axis=0)
    pc = pc - centroid
    m = np.max(np.sqrt(np.sum(pc**2, axis=1)))
    pc = pc / m
    return pc


if __name__ == '__main__':
    logger = trt.Logger(trt.Logger.WARNING)
    with open("cls.trt", "rb") as f, trt.Runtime(logger) as runtime:
        engine = runtime.deserialize_cuda_engine(f.read())
    context = engine.create_execution_context()
    h_input = cuda.pagelocked_empty(trt.volume(context.get_binding_shape(0)), dtype=np.float32)
    h_output0 = cuda.pagelocked_empty(trt.volume(context.get_binding_shape(1)), dtype=np.float32)
    h_output1 = cuda.pagelocked_empty(trt.volume(context.get_binding_shape(2)), dtype=np.float32)
    d_input = cuda.mem_alloc(h_input.nbytes)
    d_output0 = cuda.mem_alloc(h_output0.nbytes)
    d_output1 = cuda.mem_alloc(h_output1.nbytes)
    stream = cuda.Stream()
    
    file = './bed_0610.txt'
    data = np.loadtxt(file, delimiter=',').astype(np.float32)
    point_set = data[:, 0:3]
    point_set = point_set[0:point_num, :]     
    point_set[:, 0:3] = pc_normalize(point_set[:, 0:3])
    points = np.reshape(point_set, ((1, point_num, 3)))
    points = points.swapaxes(2, 1)
    
    np.copyto(h_input, points.ravel())

    with engine.create_execution_context() as context:
        cuda.memcpy_htod_async(d_input, h_input, stream)
        context.execute_async_v2(bindings=[int(d_input), int(d_output0), int(d_output1)], stream_handle=stream.handle)
        cuda.memcpy_dtoh_async(h_output0, d_output0, stream)
        cuda.memcpy_dtoh_async(h_output1, d_output1, stream)
        stream.synchronize()
        outputs = np.argmax(h_output1)
        print(outputs)

C++推理:

#include <iostream>
#include <fstream>
#include <vector>
#include <algorithm>
#include <cuda_runtime.h>
#include <NvInfer.h>
#include <NvInferRuntime.h>
#include <NvOnnxParser.h>


const int point_num = 1024;


void pc_normalize(std::vector<float>& points)
{
	float mean_x = 0, mean_y = 0, mean_z = 0;
	for (size_t i = 0; i < point_num; ++i)
	{
		mean_x += points[3 * i];
		mean_y += points[3 * i + 1];
		mean_z += points[3 * i + 2];
	}
	mean_x /= point_num;
	mean_y /= point_num;
	mean_z /= point_num;

	for (size_t i = 0; i < point_num; ++i)
	{
		points[3 * i] -= mean_x;
		points[3 * i + 1] -= mean_y;
		points[3 * i + 2] -= mean_z;
	}

	float m = 0;
	for (size_t i = 0; i < point_num; ++i)
	{
		if (sqrt(pow(points[3 * i], 2) + pow(points[3 * i + 1], 2) + pow(points[3 * i + 2], 2)) > m)
			m = sqrt(pow(points[3 * i], 2) + pow(points[3 * i + 1], 2) + pow(points[3 * i + 2], 2));
	}

	for (size_t i = 0; i < point_num; ++i)
	{
		points[3 * i] /= m;
		points[3 * i + 1] /= m;
		points[3 * i + 2] /= m;
	}
}

class TRTLogger : public nvinfer1::ILogger 
{
public:
	virtual void log(Severity severity, nvinfer1::AsciiChar const* msg) noexcept override
	{
		if (severity <= Severity::kINFO) 
			printf(msg);
	}
} logger;

std::vector<unsigned char> load_file(const std::string& file) 
{
	std::ifstream in(file, std::ios::in | std::ios::binary);
	if (!in.is_open())
		return {};

	in.seekg(0, std::ios::end);
	size_t length = in.tellg();

	std::vector<uint8_t> data;
	if (length > 0) 
	{
		in.seekg(0, std::ios::beg);
		data.resize(length);
		in.read((char*)& data[0], length);
	}
	in.close();
	return data;
}

void classfier(std::vector<float> & points)
{
	TRTLogger logger;
	nvinfer1::ICudaEngine* engine;

	auto engine_data = load_file("cls.engine");
	nvinfer1::IRuntime* runtime = nvinfer1::createInferRuntime(logger);
	engine = runtime->deserializeCudaEngine(engine_data.data(), engine_data.size());
	if (engine == nullptr)
	{
		printf("Deserialize cuda engine failed.\n");
		runtime->destroy();
		return;
	}

	nvinfer1::IExecutionContext* execution_context = engine->createExecutionContext();
	cudaStream_t stream = nullptr;
	cudaStreamCreate(&stream);

	float* input_data_host = nullptr;
	const size_t input_numel = 1 * 3 * point_num;
	cudaMallocHost(&input_data_host, input_numel * sizeof(float));
	for (size_t i = 0; i < 3; i++)
	{
		for (size_t j = 0; j < point_num; j++)
		{
			input_data_host[point_num * i + j] = points[3 * j + i];
		}
	}

	float* input_data_device = nullptr;
	float output_data_host0[4096];
	float* output_data_device0 = nullptr;
	float output_data_host1[10];
	float* output_data_device1 = nullptr;
	cudaMalloc(&input_data_device, input_numel * sizeof(float));
	cudaMalloc(&output_data_device0, sizeof(output_data_host0));
	cudaMalloc(&output_data_device1, sizeof(output_data_host1));
	cudaMemcpyAsync(input_data_device, input_data_host, input_numel * sizeof(float), cudaMemcpyHostToDevice, stream);
	float* bindings[] = { input_data_device, output_data_device0, output_data_device1 };

	bool success = execution_context->enqueueV2((void**)bindings, stream, nullptr);
	cudaMemcpyAsync(output_data_host0, output_data_device0, sizeof(output_data_host0), cudaMemcpyDeviceToHost, stream);
	cudaMemcpyAsync(output_data_host1, output_data_device1, sizeof(output_data_host1), cudaMemcpyDeviceToHost, stream);
	cudaStreamSynchronize(stream);

	int predict_label = std::max_element(output_data_host1, output_data_host1 + 10) - output_data_host1;
	std::cout << predict_label << std::endl;

	cudaStreamDestroy(stream);
	execution_context->destroy();
	engine->destroy();
	runtime->destroy();
}


int main()
{
	std::vector<float> points;
	std::ifstream infile;
	float x, y, z, nx, ny, nz;
	char ch;
	infile.open("bed_0610.txt");
	for (size_t i = 0; i < point_num; i++)
	{
		infile >> x >> ch >> y >> ch >> z >> ch >> nx >> ch >> ny >> ch >> nz;
		points.push_back(x);
		points.push_back(y);
		points.push_back(z);
	}
	infile.close();

	pc_normalize(points);

	classfier(points);

	return 0;
}

其中推理引擎的构建也可以直接使用tensorrt的bin目录下的trtexec.exe。
LZ也实现了cuda版本的前处理代码,但似乎效率比cpu前处理还低。可能是数据量不够大吧(才10^3数量级),而且目前LZ的cuda水平也只是入门阶段…

#include <iostream>
#include <fstream>
#include <vector>
#include <algorithm>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <NvInfer.h>
#include <NvInferRuntime.h>
#include <NvOnnxParser.h>


const int point_num = 1024;
const int thread_num = 1024;
const int block_num = 1;

__global__ void array_sum(float* data, float* val, int N)
{
	__shared__ double share_dTemp[thread_num];
	const int nStep = gridDim.x * blockDim.x;
	const int tid = blockIdx.x * blockDim.x + threadIdx.x;
	double dTempSum = 0.0;
	for (int i = tid; i < N; i += nStep)
	{
		dTempSum += data[i];
	}
	share_dTemp[threadIdx.x] = dTempSum;
	__syncthreads();

	for (int i = blockDim.x / 2; i != 0; i /= 2)
	{
		if (threadIdx.x < i)
		{
			share_dTemp[threadIdx.x] += share_dTemp[threadIdx.x + i];
		}
		__syncthreads();
	}

	if (0 == threadIdx.x)
	{
		atomicAdd(val, share_dTemp[0]);
	}
}

__global__ void array_sub(float* data, float val, int N)
{
	const int tid = blockIdx.x * blockDim.x + threadIdx.x;
	const int nStep = blockDim.x * gridDim.x;

	for (int i = tid; i < N; i += nStep)
	{
		data[i] = data[i] - val;
	}
}

__global__ void array_L2(float* in, float* out, int N)
{
	const int tid = blockIdx.x * blockDim.x + threadIdx.x;
	const int nStep = blockDim.x * gridDim.x;

	for (int i = tid; i < N; i += nStep)
	{
		out[i] = sqrt(pow(in[i], 2) + pow(in[i + N], 2) + pow(in[i + 2 * N], 2));
	}
}

__global__ void array_max(float* mem, int numbers) 
{
	int tid = threadIdx.x;
	int idof = blockIdx.x * blockDim.x;
	int idx = tid + idof;
	extern __shared__ float tep[];
	if (idx >= numbers) return;
	tep[tid] = mem[idx];
	unsigned int bi = 0;
	for (int s = 1; s < blockDim.x; s = (s << 1))
	{
		unsigned int kid = tid << (bi + 1);
		if ((kid + s) >= blockDim.x || (idof + kid + s) >= numbers) break;
		tep[kid] = tep[kid] > tep[kid + s] ? tep[kid] : tep[kid + s];
		++bi;
		__syncthreads();
	}
	if (tid == 0) 
	{
		mem[blockIdx.x] = tep[0];
	}
}

__global__ void array_div(float* data, float val, int N)
{
	const int tid = blockIdx.x * blockDim.x + threadIdx.x;
	const int nStep = blockDim.x * gridDim.x;

	for (int i = tid; i < N; i += nStep)
	{
		data[i] = data[i] / val;
	}
}

void pc_normalize_gpu(float* points)
{
	float *mean_x = NULL,  *mean_y = NULL,  *mean_z = NULL;
	cudaMalloc((void**)& mean_x, sizeof(float));
	cudaMalloc((void**)& mean_y, sizeof(float));
	cudaMalloc((void**)& mean_z, sizeof(float));
	array_sum << <thread_num, block_num >> > (points + 0 * point_num, mean_x, point_num);
	array_sum << <thread_num, block_num >> > (points + 1 * point_num, mean_y, point_num);
	array_sum << <thread_num, block_num >> > (points + 2 * point_num, mean_z, point_num);

	float mx, my, mz;
	cudaMemcpy(&mx, mean_x, sizeof(float), cudaMemcpyDeviceToHost);
	cudaMemcpy(&my, mean_y, sizeof(float), cudaMemcpyDeviceToHost);
	cudaMemcpy(&mz, mean_z, sizeof(float), cudaMemcpyDeviceToHost);

	array_sub << <thread_num, block_num >> > (points + 0 * point_num, mx / point_num, point_num);
	array_sub << <thread_num, block_num >> > (points + 1 * point_num, my / point_num, point_num);
	array_sub << <thread_num, block_num >> > (points + 2 * point_num, mz / point_num, point_num);
	//float* pts = (float*)malloc(sizeof(float) * point_num);
	//cudaMemcpy(pts, points, sizeof(float) * point_num, cudaMemcpyDeviceToHost);
	//for (size_t i = 0; i < point_num; i++)
	//{
	//	std::cout << pts[i] << std::endl;
	//}

	float* L2 = NULL;
	cudaMalloc((void**)& L2, sizeof(float) * point_num);
	array_L2 << <thread_num, block_num >> > (points, L2, point_num);
	//float* l2 = (float*)malloc(sizeof(float) * point_num);
	//cudaMemcpy(l2, L2, sizeof(float) * point_num, cudaMemcpyDeviceToHost);
	//for (size_t i = 0; i < point_num; i++)
	//{
	//	std::cout << l2[i] << std::endl;
	//}

	int tmp_num = point_num;
	int share_size = sizeof(float) * thread_num;
	int block_num = (tmp_num + thread_num - 1) / thread_num;
	do {
		array_max << <block_num, thread_num, share_size >> > (L2, thread_num);
		tmp_num = block_num;
		block_num = (tmp_num + thread_num - 1) / thread_num;
	} while (tmp_num > 1);

	float max;
	cudaMemcpy(&max, L2, sizeof(float), cudaMemcpyDeviceToHost);
	//std::cout << max << std::endl;

	array_div << <thread_num, block_num >> > (points + 0 * point_num, max, point_num);
	array_div << <thread_num, block_num >> > (points + 1 * point_num, max, point_num);
	array_div << <thread_num, block_num >> > (points + 2 * point_num, max, point_num);


}

class TRTLogger : public nvinfer1::ILogger 
{
public:
	virtual void log(Severity severity, nvinfer1::AsciiChar const* msg) noexcept override
	{
		if (severity <= Severity::kINFO) 
			printf(msg);
	}
} logger;

std::vector<unsigned char> load_file(const std::string& file) 
{
	std::ifstream in(file, std::ios::in | std::ios::binary);
	if (!in.is_open())
		return {};

	in.seekg(0, std::ios::end);
	size_t length = in.tellg();

	std::vector<uint8_t> data;
	if (length > 0) 
	{
		in.seekg(0, std::ios::beg);
		data.resize(length);
		in.read((char*)& data[0], length);
	}
	in.close();
	return data;
}

void classfier(std::vector<float> & points)
{
	TRTLogger logger;
	nvinfer1::ICudaEngine* engine;

	auto engine_data = load_file("cls.engine");
	nvinfer1::IRuntime* runtime = nvinfer1::createInferRuntime(logger);
	engine = runtime->deserializeCudaEngine(engine_data.data(), engine_data.size());
	if (engine == nullptr)
	{
		printf("Deserialize cuda engine failed.\n");
		runtime->destroy();
		return;
	}

	nvinfer1::IExecutionContext* execution_context = engine->createExecutionContext();
	cudaStream_t stream = nullptr;
	cudaStreamCreate(&stream);

	float* input_data_host = nullptr;
	const size_t input_numel = 1 * 3 * point_num;
	cudaMallocHost(&input_data_host, input_numel * sizeof(float));
	for (size_t i = 0; i < 3; i++)
	{
		for (size_t j = 0; j < point_num; j++)
		{
			input_data_host[point_num * i + j] = points[3 * j + i];
		}
	}

	float* input_data_device = nullptr;
	float output_data_host0[4096];
	float* output_data_device0 = nullptr;
	float output_data_host1[10];
	float* output_data_device1 = nullptr;
	cudaMalloc(&input_data_device, input_numel * sizeof(float));
	cudaMalloc(&output_data_device0, sizeof(output_data_host0));
	cudaMalloc(&output_data_device1, sizeof(output_data_host1));
	cudaMemcpyAsync(input_data_device, input_data_host, input_numel * sizeof(float), cudaMemcpyHostToDevice, stream);
	float* bindings[] = { input_data_device, output_data_device0, output_data_device1 };

	bool success = execution_context->enqueueV2((void**)bindings, stream, nullptr);
	cudaMemcpyAsync(output_data_host0, output_data_device0, sizeof(output_data_host0), cudaMemcpyDeviceToHost, stream);
	cudaMemcpyAsync(output_data_host1, output_data_device1, sizeof(output_data_host1), cudaMemcpyDeviceToHost, stream);
	cudaStreamSynchronize(stream);

	int predict_label = std::max_element(output_data_host1, output_data_host1 + 10) - output_data_host1;
	std::cout << "\npredict_label: " << predict_label << std::endl;

	cudaStreamDestroy(stream);
	execution_context->destroy();
	engine->destroy();
	runtime->destroy();
}


int main()
{
	std::vector<float> points;
	std::ifstream infile;
	float x, y, z, nx, ny, nz;
	char ch;
	infile.open("sofa_0020.txt");
	for (size_t i = 0; i < point_num; i++)
	{
		infile >> x >> ch >> y >> ch >> z >> ch >> nx >> ch >> ny >> ch >> nz;
		points.push_back(x);
		points.push_back(y);
		points.push_back(z);
	}
	infile.close();

	classfier(points);

	return 0;
}

Part Segmentation

python推理:

import numpy as np
import tensorrt as trt
import pycuda.autoinit 
import pycuda.driver as cuda  


point_num = 2048
class_num = 16
parts_num = 50

 
def to_categorical(y, class_num):
    """ 1-hot encodes a tensor """
    new_y = np.eye(class_num)[y,]
    return new_y.astype(np.float32)


def pc_normalize(pc):
    centroid = np.mean(pc, axis=0)
    pc = pc - centroid
    m = np.max(np.sqrt(np.sum(pc ** 2, axis=1)))
    pc = pc / m
    return pc


if __name__ == '__main__':
    logger = trt.Logger(trt.Logger.WARNING)
    with open("part_seg.trt", "rb") as f, trt.Runtime(logger) as runtime:
        engine = runtime.deserialize_cuda_engine(f.read())
    context = engine.create_execution_context()
    h_input0 = cuda.pagelocked_empty(trt.volume(context.get_binding_shape(0)), dtype=np.float32)
    h_input1 = cuda.pagelocked_empty(trt.volume(context.get_binding_shape(1)), dtype=np.float32)
    h_output0 = cuda.pagelocked_empty(trt.volume(context.get_binding_shape(2)), dtype=np.float32)
    h_output1 = cuda.pagelocked_empty(trt.volume(context.get_binding_shape(3)), dtype=np.float32)
    d_input0 = cuda.mem_alloc(h_input0.nbytes)
    d_input1 = cuda.mem_alloc(h_input1.nbytes)
    d_output0 = cuda.mem_alloc(h_output0.nbytes)
    d_output1 = cuda.mem_alloc(h_output1.nbytes)
    stream = cuda.Stream()
    
    data = np.loadtxt('85a15c26a6e9921ae008cc4902bfe3cd.txt').astype(np.float32)
    point_set = data[:, 0:3]
    point_set[:, 0:3] = pc_normalize(point_set[:, 0:3])

    choice = np.random.choice(point_set.shape[0], point_num, replace=True)
    point_set = point_set[choice, :][:, 0:3]
    pts = point_set

    points = np.reshape(point_set, ((1, point_num, 3)))
    points = points.swapaxes(2, 1)
    label = np.array([[0]], dtype=np.int32)

    np.copyto(h_input0, points.ravel())
    np.copyto(h_input1,  to_categorical(label, class_num).ravel())

    with engine.create_execution_context() as context:
        cuda.memcpy_htod_async(d_input0, h_input0, stream)
        cuda.memcpy_htod_async(d_input1, h_input1, stream)
        context.execute_async_v2(bindings=[int(d_input0), int(d_input1),int(d_output0), int(d_output1)], stream_handle=stream.handle)
        cuda.memcpy_dtoh_async(h_output0, d_output0, stream)
        cuda.memcpy_dtoh_async(h_output1, d_output1, stream)
        stream.synchronize()

        outputs = h_output1.reshape(1, point_num, parts_num)
        cur_pred_val = np.zeros((1, point_num)).astype(np.int32)
        
        logits = outputs[0, :, :]
        cur_pred_val[0, :] = np.argmax(logits, 1)

        pts = np.append(pts.reshape(point_num, 3), cur_pred_val[0, :].reshape(point_num, 1), 1)
        np.savetxt('pred.txt', pts, fmt='%.06f')       

C++推理:

#include <iostream>
#include <fstream>
#include <vector>
#include <ctime>
#include <algorithm>
#include <cuda_runtime.h>
#include <NvInfer.h>
#include <NvInferRuntime.h>
#include <NvOnnxParser.h>


const int point_num = 2048;
const int class_num = 16;
const int parts_num = 50;


void pc_normalize(std::vector<float>& points)
{
	float mean_x = 0, mean_y = 0, mean_z = 0;
	for (size_t i = 0; i < point_num; ++i)
	{
		mean_x += points[3 * i];
		mean_y += points[3 * i + 1];
		mean_z += points[3 * i + 2];
	}
	mean_x /= point_num;
	mean_y /= point_num;
	mean_z /= point_num;

	for (size_t i = 0; i < point_num; ++i)
	{
		points[3 * i] -= mean_x;
		points[3 * i + 1] -= mean_y;
		points[3 * i + 2] -= mean_z;
	}

	float m = 0;
	for (size_t i = 0; i < point_num; ++i)
	{
		if (sqrt(pow(points[3 * i], 2) + pow(points[3 * i + 1], 2) + pow(points[3 * i + 2], 2)) > m)
			m = sqrt(pow(points[3 * i], 2) + pow(points[3 * i + 1], 2) + pow(points[3 * i + 2], 2));
	}

	for (size_t i = 0; i < point_num; ++i)
	{
		points[3 * i] /= m;
		points[3 * i + 1] /= m;
		points[3 * i + 2] /= m;
	}
}


void resample(std::vector<float>& points)
{
	srand((int)time(0));
	std::vector<int> choice(point_num);
	for (size_t i = 0; i < point_num; i++)
	{
		choice[i] = rand() % (points.size() / 3);
	}

	std::vector<float> temp_points(3 * point_num);
	for (size_t i = 0; i < point_num; i++)
	{
		temp_points[3 * i] = points[3 * choice[i]];
		temp_points[3 * i + 1] = points[3 * choice[i] + 1];
		temp_points[3 * i + 2] = points[3 * choice[i] + 2];
	}
	points = temp_points;
}


class TRTLogger : public nvinfer1::ILogger
{
public:
	virtual void log(Severity severity, nvinfer1::AsciiChar const* msg) noexcept override
	{
		if (severity <= Severity::kINFO)
			printf(msg);
	}
} logger;

std::vector<unsigned char> load_file(const std::string& file)
{
	std::ifstream in(file, std::ios::in | std::ios::binary);
	if (!in.is_open())
		return {};

	in.seekg(0, std::ios::end);
	size_t length = in.tellg();

	std::vector<uint8_t> data;
	if (length > 0)
	{
		in.seekg(0, std::ios::beg);
		data.resize(length);
		in.read((char*)& data[0], length);
	}
	in.close();
	return data;
}

std::vector<int> classfier(std::vector<float>& points, std::vector<float>& labels)
{
	std::vector<int> max_index(point_num, 0);

	TRTLogger logger;
	nvinfer1::ICudaEngine* engine;

	auto engine_data = load_file("part_seg.engine");
	nvinfer1::IRuntime* runtime = nvinfer1::createInferRuntime(logger);
	engine = runtime->deserializeCudaEngine(engine_data.data(), engine_data.size());
	if (engine == nullptr)
	{
		printf("Deserialize cuda engine failed.\n");
		runtime->destroy();
		return max_index;
	}

	nvinfer1::IExecutionContext* execution_context = engine->createExecutionContext();
	cudaStream_t stream = nullptr;
	cudaStreamCreate(&stream);

	float* input_data_host0 = nullptr;
	const size_t input_numel = 1 * 3 * point_num;
	cudaMallocHost(&input_data_host0, input_numel * sizeof(float));
	for (size_t i = 0; i < 3; i++)
	{
		for (size_t j = 0; j < point_num; j++)
		{
			input_data_host0[point_num * i + j] = points[3 * j + i];
		}
	}

	float* input_data_host1 = nullptr;
	cudaMallocHost(&input_data_host1, 1 * 1 * class_num * sizeof(float));
	for (size_t i = 0; i < class_num; i++)
	{
		input_data_host1[i] = labels[i];
	}

	float* input_data_device0 = nullptr;
	float* input_data_device1 = nullptr;
	float output_data_host0[1 * 128 * 128];
	float* output_data_device0 = nullptr;
	float output_data_host1[1 * point_num * parts_num];
	float* output_data_device1 = nullptr;
	cudaMalloc(&input_data_device0, input_numel * sizeof(float));
	cudaMalloc(&input_data_device1, class_num * sizeof(float));
	cudaMalloc(&output_data_device0, sizeof(output_data_host0));
	cudaMalloc(&output_data_device1, sizeof(output_data_host1));
	cudaMemcpyAsync(input_data_device0, input_data_host0, input_numel * sizeof(float), cudaMemcpyHostToDevice, stream);
	cudaMemcpyAsync(input_data_device1, input_data_host1, class_num * sizeof(float), cudaMemcpyHostToDevice, stream);
	float* bindings[] = { input_data_device0, input_data_device1, output_data_device0, output_data_device1 };

	bool success = execution_context->enqueueV2((void**)bindings, stream, nullptr);
	cudaMemcpyAsync(output_data_host0, output_data_device0, sizeof(output_data_host0), cudaMemcpyDeviceToHost, stream);
	cudaMemcpyAsync(output_data_host1, output_data_device1, sizeof(output_data_host1), cudaMemcpyDeviceToHost, stream);
	cudaStreamSynchronize(stream);

	std::vector<std::vector<float>> outputs(point_num, std::vector<float>(parts_num, 0));
	for (size_t i = 0; i < point_num; i++)
	{
		for (size_t j = 0; j < parts_num; j++)
		{
			outputs[i][j] = output_data_host1[i * parts_num + j];
		}
	}

	for (size_t i = 0; i < point_num; i++)
	{
		max_index[i] = std::max_element(outputs[i].begin(), outputs[i].end()) - outputs[i].begin();
	}

	cudaStreamDestroy(stream);
	execution_context->destroy();
	engine->destroy();
	runtime->destroy();

	return max_index;
}


int main()
{
	std::vector<float> points, labels;
	float x, y, z, nx, ny, nz, label;
	std::ifstream infile("85a15c26a6e9921ae008cc4902bfe3cd.txt");
	while (infile >> x >> y >> z >> nx >> ny >> nz >> label)
	{
		points.push_back(x);
		points.push_back(y);
		points.push_back(z);
	}
	for (size_t i = 0; i < class_num; i++)
	{
		labels.push_back(0.0);
	}
	labels[0] = 1.0;
	infile.close();

	pc_normalize(points);

	resample(points);

	std::vector<int> result = classfier(points, labels);

	std::fstream outfile("pred.txt", 'w');
	for (size_t i = 0; i < point_num; i++)
	{
		outfile << points[3 * i] << " " << points[3 * i + 1] << " " << points[3 * i + 2] << " " << result[i] << std::endl;
	}
	outfile.close();

	return 0;
}

Semantic Segmentation

python推理:

import numpy as np
import tensorrt as trt
import pycuda.autoinit 
import pycuda.driver as cuda  


point_num = 4096
class_num = 13
stride = 0.5
block_size = 1.0


if __name__ == '__main__':
    logger = trt.Logger(trt.Logger.WARNING)
    with open("sem_seg.trt", "rb") as f, trt.Runtime(logger) as runtime:
        engine = runtime.deserialize_cuda_engine(f.read())
    context = engine.create_execution_context()
    h_input = cuda.pagelocked_empty(trt.volume(context.get_binding_shape(0)), dtype=np.float32)
    h_output0 = cuda.pagelocked_empty(trt.volume(context.get_binding_shape(1)), dtype=np.float32)
    h_output1 = cuda.pagelocked_empty(trt.volume(context.get_binding_shape(2)), dtype=np.float32)
    d_input = cuda.mem_alloc(h_input.nbytes)
    d_output0 = cuda.mem_alloc(h_output0.nbytes)
    d_output1 = cuda.mem_alloc(h_output1.nbytes)
    stream = cuda.Stream()
    
    data = np.load('Area_1_conferenceRoom_1.npy')
    points = data[:,:6]
    coord_min, coord_max = np.amin(points, axis=0)[:3], np.amax(points, axis=0)[:3]
    grid_x = int(np.ceil(float(coord_max[0] - coord_min[0] - block_size) / stride) + 1)
    grid_y = int(np.ceil(float(coord_max[1] - coord_min[1] - block_size) / stride) + 1)
    data_room, index_room = np.array([]), np.array([])
    for index_y in range(0, grid_y):
        for index_x in range(0, grid_x):
            s_x = coord_min[0] + index_x * stride
            e_x = min(s_x + block_size, coord_max[0])
            s_x = e_x - block_size
            s_y = coord_min[1] + index_y * stride
            e_y = min(s_y + block_size, coord_max[1])
            s_y = e_y - block_size
            point_idxs = np.where((points[:, 0] >= s_x) & (points[:, 0] <= e_x) & (points[:, 1] >= s_y) & (points[:, 1] <= e_y))[0]
            if point_idxs.size == 0:
                continue
            num_batch = int(np.ceil(point_idxs.size / point_num))
            point_size = int(num_batch * point_num)
            replace = False if (point_size - point_idxs.size <= point_idxs.size) else True
            point_idxs_repeat = np.random.choice(point_idxs, point_size - point_idxs.size, replace=replace)
            point_idxs = np.concatenate((point_idxs, point_idxs_repeat))
            np.random.shuffle(point_idxs)
            data_batch = points[point_idxs, :]
            normlized_xyz = np.zeros((point_size, 3))
            normlized_xyz[:, 0] = data_batch[:, 0] / coord_max[0]
            normlized_xyz[:, 1] = data_batch[:, 1] / coord_max[1]
            normlized_xyz[:, 2] = data_batch[:, 2] / coord_max[2]
            data_batch[:, 0] = data_batch[:, 0] - (s_x + block_size / 2.0)
            data_batch[:, 1] = data_batch[:, 1] - (s_y + block_size / 2.0)
            data_batch[:, 3:6] /= 255.0
            data_batch = np.concatenate((data_batch, normlized_xyz), axis=1)
            data_room = np.vstack([data_room, data_batch]) if data_room.size else data_batch
            index_room = np.hstack([index_room, point_idxs]) if index_room.size else point_idxs
    data_room = data_room.reshape((-1, point_num, data_room.shape[1]))
    index_room = index_room.reshape((-1, point_num))

    vote_label_pool = np.zeros((points.shape[0], class_num))
    num_blocks = data_room.shape[0]
    batch_data = np.zeros((1, point_num, 9))
    batch_point_index = np.zeros((1, point_num))

    with engine.create_execution_context() as context:
        for sbatch in range(num_blocks):
            start_idx = sbatch
            end_idx = min(sbatch + 1, num_blocks)
            real_batch_size = end_idx - start_idx
            batch_data[0:real_batch_size, ...] = data_room[start_idx:end_idx, ...]
            batch_point_index[0:real_batch_size, ...] = index_room[start_idx:end_idx, ...]
            
            np.copyto(h_input, batch_data.swapaxes(2, 1).astype(np.float32).ravel())

            cuda.memcpy_htod_async(d_input, h_input, stream)
            context.execute_async_v2(bindings=[int(d_input), int(d_output0), int(d_output1)], stream_handle=stream.handle)
            cuda.memcpy_dtoh_async(h_output0, d_output0, stream)
            cuda.memcpy_dtoh_async(h_output1, d_output1, stream)
            stream.synchronize()

            outputs = h_output1.reshape(1, point_num, class_num)
            batch_pred_label = np.argmax(outputs, 2)
            point_idx = batch_point_index[0:real_batch_size, ...]
            pred_label = batch_pred_label[0:real_batch_size, ...]
            for b in range(pred_label.shape[0]):
                for n in range(pred_label.shape[1]):
                    vote_label_pool[int(point_idx[b, n]), int(pred_label[b, n])] += 1

    pred = np.argmax(vote_label_pool, 1)
    fout = open('pred.txt', 'w')
    for i in range(points.shape[0]):
        fout.write('%f %f %f %d\n' % (points[i, 0], points[i, 1], points[i, 2], pred[i]))
    fout.close()

C++推理:

#include <iostream>
#include <fstream>
#include <vector>
#include <algorithm>
#include <ctime>
#include <random>
#include <cuda_runtime.h>
#include <NvInfer.h>
#include <NvInferRuntime.h>
#include <NvOnnxParser.h>


const int point_num = 4096;
const int class_num = 13;


struct point
{
	float m_x, m_y, m_z, m_r, m_g, m_b, m_normal_x, m_normal_y, m_normal_z;
	point() :
		m_x(0), m_y(0), m_z(0), m_r(0), m_g(0), m_b(0), m_normal_x(0), m_normal_y(0), m_normal_z(0) {}
	point(float x, float y, float z, float r, float g, float b) :
		m_x(x), m_y(y), m_z(z), m_r(r), m_g(g), m_b(b), m_normal_x(0), m_normal_y(0), m_normal_z(0) {}
	point(float x, float y, float z, float r, float g, float b, float normal_x, float normal_y, float normal_z) :
		m_x(x), m_y(y), m_z(z), m_r(r), m_g(g), m_b(b), m_normal_x(normal_x), m_normal_y(normal_y), m_normal_z(normal_z) {}
};


class TRTLogger : public nvinfer1::ILogger
{
public:
	virtual void log(Severity severity, nvinfer1::AsciiChar const* msg) noexcept override
	{
		if (severity <= Severity::kINFO)
			printf(msg);
	}
} logger;


std::vector<unsigned char> load_file(const std::string& file)
{
	std::ifstream in(file, std::ios::in | std::ios::binary);
	if (!in.is_open())
		return {};

	in.seekg(0, std::ios::end);
	size_t length = in.tellg();

	std::vector<uint8_t> data;
	if (length > 0)
	{
		in.seekg(0, std::ios::beg);
		data.resize(length);
		in.read((char*)& data[0], length);
	}
	in.close();
	return data;
}


int main()
{
	TRTLogger logger;
	nvinfer1::ICudaEngine* engine;

	float x, y, z, r, g, b, l;
	std::vector<point> pts;
	std::vector<float> points_x, points_y, points_z;
	int points_num = 0;
	std::ifstream infile("Area_1_conferenceRoom_1.txt");
	while (infile >> x >> y >> z >> r >> g >> b >> l)
	{
		point pt(x, y, z, r, g, b);
		pts.push_back(pt);
		points_x.push_back(x);
		points_y.push_back(y);
		points_z.push_back(z);
		points_num++;
	}

	float x_min = *std::min_element(points_x.begin(), points_x.end());
	float y_min = *std::min_element(points_y.begin(), points_y.end());
	float z_min = *std::min_element(points_z.begin(), points_z.end());
	float x_max = *std::max_element(points_x.begin(), points_x.end());
	float y_max = *std::max_element(points_y.begin(), points_y.end());
	float z_max = *std::max_element(points_z.begin(), points_z.end());

	float stride = 0.5;
	float block_size = 1.0;
	srand((int)time(0));

	int grid_x = ceil((x_max - x_min - block_size) / stride) + 1;
	int grid_y = ceil((y_max - y_min - block_size) / stride) + 1;

	std::vector<point> data_room;
	std::vector<int> index_room;
	for (size_t index_y = 0; index_y < grid_y; index_y++)
	{
		for (size_t index_x = 0; index_x < grid_x; index_x++)
		{
			float s_x = x_min + index_x * stride;
			float e_x = std::min(s_x + block_size, x_max);
			s_x = e_x - block_size;
			float s_y = y_min + index_y * stride;
			float e_y = std::min(s_y + block_size, y_max);
			s_y = e_y - block_size;

			std::vector<int> point_idxs;
			for (size_t i = 0; i < points_num; i++)
			{
				if (points_x[i] >= s_x && points_x[i] <= e_x && points_y[i] >= s_y && points_y[i] <= e_y)
					point_idxs.push_back(i);
			}
			if (point_idxs.size() == 0)
				continue;

			int num_batch = ceil(point_idxs.size() * 1.0 / point_num);
			int point_size = num_batch * point_num;
			bool replace = (point_size - point_idxs.size() <= point_idxs.size() ? false : true);

			std::vector<int> point_idxs_repeat;
			if (replace)
			{
				for (size_t i = 0; i < point_size - point_idxs.size(); i++)
				{
					int id = rand() % point_idxs.size();
					point_idxs_repeat.push_back(point_idxs[id]);
				}
			}
			else
			{
				std::vector<bool> flags(pts.size(), false);
				for (size_t i = 0; i < point_size - point_idxs.size(); i++)
				{
					int id = rand() % point_idxs.size();
					while (true)
					{
						if (flags[id] == false)
						{
							flags[id] = true;
							break;
						}
						id = rand() % point_idxs.size();
					}
					point_idxs_repeat.push_back(point_idxs[id]);
				}
			}
			point_idxs.insert(point_idxs.end(), point_idxs_repeat.begin(), point_idxs_repeat.end());

			std::random_device rd;
			std::mt19937 g(rd());	// 随机数引擎:基于梅森缠绕器算法的随机数生成器
			std::shuffle(point_idxs.begin(), point_idxs.end(), g);	// 打乱顺序,重新排序(随机序列)

			std::vector<point> data_batch;
			for (size_t i = 0; i < point_idxs.size(); i++)
			{
				data_batch.push_back(pts[point_idxs[i]]);
			}

			for (size_t i = 0; i < point_size; i++)
			{
				data_batch[i].m_normal_x = data_batch[i].m_x / x_max;
				data_batch[i].m_normal_y = data_batch[i].m_y / y_max;
				data_batch[i].m_normal_z = data_batch[i].m_z / z_max;
				data_batch[i].m_x -= (s_x + block_size / 2.0);
				data_batch[i].m_y -= (s_y + block_size / 2.0);
				data_batch[i].m_r /= 255.0;
				data_batch[i].m_g /= 255.0;
				data_batch[i].m_b /= 255.0;
				data_room.push_back(data_batch[i]);
				index_room.push_back(point_idxs[i]);
			}
		}
	}

	int n = point_num, m = index_room.size() / n;
	std::vector<std::vector<point>> data_rooms(m, std::vector<point>(n, point()));
	std::vector<std::vector<int>> index_rooms(m, std::vector<int>(n, 0));
	for (size_t i = 0; i < m; i++)
	{
		for (size_t j = 0; j < n; j++)
		{
			data_rooms[i][j] = data_room[i * n + j];
			index_rooms[i][j] = index_room[i * n + j];
		}
	}

	std::vector<std::vector<int>> vote_label_pool(points_num, std::vector<int>(class_num, 0));
	int num_blocks = data_rooms.size();

	auto engine_data = load_file("sem_seg.engine");
	nvinfer1::IRuntime* runtime = nvinfer1::createInferRuntime(logger);
	engine = runtime->deserializeCudaEngine(engine_data.data(), engine_data.size());
	if (engine == nullptr)
	{
		printf("Deserialize cuda engine failed.\n");
		runtime->destroy();
		return -1;
	}

	nvinfer1::IExecutionContext* execution_context = engine->createExecutionContext();
	cudaStream_t stream = nullptr;
	cudaStreamCreate(&stream);

	float* input_data_device = nullptr;
	float output_data_host0[1 * 64 * 64];
	float* output_data_device0 = nullptr;
	float output_data_host1[1 * point_num * class_num];
	float* output_data_device1 = nullptr;
	const size_t input_numel = 1 * 9 * point_num;
	cudaMalloc(&input_data_device, input_numel * sizeof(float));
	cudaMalloc(&output_data_device0, sizeof(output_data_host0));
	cudaMalloc(&output_data_device1, sizeof(output_data_host1));

	std::vector<float> input_tensor_values(input_numel);
	for (int sbatch = 0; sbatch < num_blocks; sbatch++)
	{
		int start_idx = sbatch;
		int end_idx = std::min(sbatch + 1, num_blocks);
		int real_batch_size = end_idx - start_idx;
		std::vector<point> batch_data = data_rooms[start_idx];
		std::vector<int> point_idx = index_rooms[start_idx];
		std::vector<float> batch(point_num * 9);
		for (size_t i = 0; i < point_num; i++)
		{
			batch[9 * i + 0] = batch_data[i].m_x;
			batch[9 * i + 1] = batch_data[i].m_y;
			batch[9 * i + 2] = batch_data[i].m_z;
			batch[9 * i + 3] = batch_data[i].m_r;
			batch[9 * i + 4] = batch_data[i].m_g;
			batch[9 * i + 5] = batch_data[i].m_b;
			batch[9 * i + 6] = batch_data[i].m_normal_x;
			batch[9 * i + 7] = batch_data[i].m_normal_y;
			batch[9 * i + 8] = batch_data[i].m_normal_z;
		}

		float* input_data_host = nullptr;
		cudaMallocHost(&input_data_host, input_numel * sizeof(float));
		for (size_t i = 0; i < 9; i++)
		{
			for (size_t j = 0; j < point_num; j++)
			{
				input_data_host[i * point_num + j] = batch[9 * j + i];
			}
		}

		cudaMemcpyAsync(input_data_device, input_data_host, input_numel * sizeof(float), cudaMemcpyHostToDevice, stream);
		float* bindings[] = { input_data_device, output_data_device0, output_data_device1 };

		bool success = execution_context->enqueueV2((void**)bindings, stream, nullptr);
		cudaMemcpyAsync(output_data_host0, output_data_device0, sizeof(output_data_host0), cudaMemcpyDeviceToHost, stream);
		cudaMemcpyAsync(output_data_host1, output_data_device1, sizeof(output_data_host1), cudaMemcpyDeviceToHost, stream);
		cudaStreamSynchronize(stream);

		std::vector<std::vector<float>> outputs(point_num, std::vector<float>(class_num, 0));
		for (size_t i = 0; i < point_num; i++)
		{
			for (size_t j = 0; j < class_num; j++)
			{
				outputs[i][j] = output_data_host1[i * class_num + j];
			}
		}

		std::vector<int> pred_label(point_num, 0);
		for (size_t i = 0; i < point_num; i++)
		{
			pred_label[i] = std::max_element(outputs[i].begin(), outputs[i].end()) - outputs[i].begin();
			vote_label_pool[point_idx[i]][pred_label[i]] += 1;
		}
	}

	std::ofstream outfile("pred.txt");
	for (size_t i = 0; i < points_num; i++)
	{
		int max_index = std::max_element(vote_label_pool[i].begin(), vote_label_pool[i].end()) - vote_label_pool[i].begin();
		outfile << pts[i].m_x << " " << pts[i].m_y << " " << pts[i].m_z << " " << max_index << std::endl;
	}

	outfile.close();
	return 0;
}

模型下载地址:pointnet模型权重 文章来源地址https://www.toymoban.com/news/detail-668884.html

到了这里,关于pointnet C++推理部署--tensorrt框架的文章就介绍完了。如果您还想了解更多内容,请在右上角搜索TOY模板网以前的文章或继续浏览下面的相关文章,希望大家以后多多支持TOY模板网!

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处: 如若内容造成侵权/违法违规/事实不符,请点击违法举报进行投诉反馈,一经查实,立即删除!

领支付宝红包 赞助服务器费用

相关文章

  • 【TensorRT】c++使用面向对象来封装tensorRT推理代码的指针释放问题

    初始化模型 这里可以看到我用了智能指针来分别定义_engine, _runtime, _context。并且我在头文件中定义了一个类来封装我的推理代码,包括初始化模型的步骤: 类封装 在这里可以看到,我已经将_engine, _runtime, _context都定义在了类的成员变量当中,并且都用的智能指针shared_ptr的方

    2024年01月18日
    浏览(44)
  • Deep Learning for 3D Point Clouds: A Survey

    Guo Y, Wang H, Hu Q, et al. Deep learning for 3d point clouds: A survey[J]. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2020. 之前组会要分享的一篇综述,太长了没读完,不知道啥时候能写完。。 最近,点云学习因其在计算机视觉、自动驾驶和机器人等许多领域的广泛应用而引起越来越多

    2024年02月05日
    浏览(38)
  • 机器学习笔记 - windows基于TensorRT的UNet推理部署

            NVIDIA TensorRT是一个用于高性能深度学习推理的平台。TensorRT适用于使用CUDA平台的所有NVIDIA GPU。所以如果需要基于TensorRT部署,至少需要一个NVIDIA显卡,算力5.0以上,比Maxwell更新的架构,可以参考下表。 CUDA GPUs - Compute Capability | NVIDIA Developer https://developer.nvidia.com

    2024年02月05日
    浏览(36)
  • PaddleSeg学习4——paddle模型使用TensorRT推理(c++)

    前文 PaddleSeg c++部署OCRNet+HRNet模型中的语义分割模型输出为 float32 类型,模型不含softmax和argmax处理,导致在项目应用过程中后处理耗时较高。 通过PaddleSeg/tools/export.py在网络末端增加softmax和argmax算子,解决应用中的后处理耗时问题。 参考文档PaddleSeg/docs/model_export_cn.md导出预测

    2024年01月16日
    浏览(39)
  • yolov7的 TensorRT c++推理,win10, cuda11.4.3 ,cudnn8.2.4.15,tensorrt8.2.1.8。

    yolov7的 tensorrt8 推理, c++ 版本 环境 win10 vs2019 opencv4.5.5 cuda_11.4.3_472.50_win10.exe cudnn-11.4-windows-x64-v8.2.4.15 TensorRT-8.2.1.8.Windows10.x86_64.cuda-11.4.cudnn8.2.zip RTX2060推理yolov7, FP32 耗时 28ms  ,FP16 耗时 8ms,单帧对应总耗时30ms 和 10ms。 推理yolov7-tiny,FP32 耗时 8ms  ,FP16 耗时 2ms。 tensorr

    2024年02月15日
    浏览(45)
  • yolov8n 瑞芯微RKNN、地平线Horizon芯片部署、TensorRT部署,部署工程难度小、模型推理速度快

      特别说明:参考官方开源的yolov8代码、瑞芯微官方文档、地平线的官方文档,如有侵权告知删,谢谢。   模型和完整仿真测试代码,放在github上参考链接 模型和代码。   因为之前写了几篇yolov8模型部署的博文,存在两个问题:部署难度大、模型推理速度慢。该篇解

    2024年01月16日
    浏览(62)
  • 【TensorRT】TensorRT 部署Yolov5模型(C++)

      该项目代码在本人GitHub代码仓库开源,本人GitHub主页为:GitHub   项目代码:   NVIDIA TensorRT™ 是用于高性能深度学习推理的 SDK,可为深度学习推理应用提供低延迟和高吞吐量。详细安装方式参考以下博客: NVIDIA TensorRT 安装 (Windows C++)   经典的一个TensorRT部署模型步骤为

    2023年04月26日
    浏览(48)
  • 论文解读《Learning Deep Network for Detecting 3D Object Keypoints and 6D Poses》 无需位姿标注的model-free 6D位姿估计

    论文:《Learning Deep Network for Detecting 3D Object Keypoints and 6D Poses》 摘要: 解决问题:标注困难且没有CAD模型。 开发了一种基于关键点的6D对象姿态检测方法,Object Keypoint based POSe Estimation (OK-POSE)。通过使用大量具有多视点之间的 相对变换信息 的图像对(相对变换信息可以很容

    2024年02月04日
    浏览(48)
  • YOLOv8-TensorRT C++ ubuntu部署

    先要安装好显卡驱动、CUDA、CUDNN 以ubuntu20.04、显卡1650安装470版本的显卡驱动、11.3版本的CUDA及8.2版本的CUDNN为例 进入网站: https://developer.nvidia.com/nvidia-tensorrt-8x-download 进行勾选下载: TAR是免安装直接解压可用的 解压: 终端打印出如下内容表明cuda+cudnn+tensorrt安装正常: 可以

    2024年01月20日
    浏览(42)
  • 3D点云(3D point cloud)及PointNet、PointNet++

    https://www.youtube.com/watch?v=Ew24Rac8eYE 传统图像数据是2维的 3D点云是3维的,可以表达更多信息 比如对化工厂进行违章识别、安全隐患的识别 城市管理 点云分割 点云补全 点云生成 点云物体检测(3D物体检测) 点云配准(后续任务的基础) 一般点云数据都是基于激光雷达扫描生

    2024年02月02日
    浏览(38)

觉得文章有用就打赏一下文章作者

支付宝扫一扫打赏

博客赞助

微信扫一扫打赏

请作者喝杯咖啡吧~博客赞助

支付宝扫一扫领取红包,优惠每天领

二维码1

领取红包

二维码2

领红包