Skip to content

Commit

Permalink
Add files via upload
Browse files Browse the repository at this point in the history
  • Loading branch information
RichardoMrMu authored Sep 11, 2021
0 parents commit cb30017
Show file tree
Hide file tree
Showing 30 changed files with 16,970 additions and 0 deletions.
42 changes: 42 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
cmake_minimum_required(VERSION 2.6)
project(deepsort)

set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
set(CMAKE_BUILD_TYPE Release)

find_package(CUDA REQUIRED)
find_package(OpenCV REQUIRED)


include_directories(
${CUDA_INCLUDE_DIRS}
${OpenCV_INCLUDE_DIRS}
${PROJECT_SOURCE_DIR}/include
)
link_directories(
/usr/local/cuda/lib64
)
aux_source_directory(${PROJECT_SOURCE_DIR}/src SRC_DIR)

# ===== deepsort =====
add_library(deepsort SHARED ${SRC_DIR})
target_link_libraries(deepsort
${CUDA_LIBS} ${OpenCV_LIBS}
cudart nvinfer nvonnxparser
)

# ===== onnx2engine =====
add_executable(onnx2engine ${PROJECT_SOURCE_DIR}/onnx2engine.cpp)
target_link_libraries(onnx2engine
${CUDA_LIBS}
cudart nvinfer nvonnxparser deepsort
)

# ===== demo =====
add_executable(demo ${PROJECT_SOURCE_DIR}/demo.cpp)
target_link_libraries(demo
${CUDA_LIBS} ${OpenCV_LIBS}
cudart nvinfer nvonnxparser deepsort
)


29 changes: 29 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
# DeepSort_TensorRT
Use tensorrt to accelerate deepsort tracking.
使用tensorrt来加速deepsort的特征提取。
## Install
[REQUIRE] TensorRT 7</p>
[REQUIRE] OpenCV</p>
下载本库和deepsort的pytorch库
```
git clone [email protected]:ZQPei/deep_sort_pytorch.git
git clone https://github.com/RichardoMrMu/deepsort-tensorrt.git
```
配置好大佬的pytorch工程后,将exportOnnx.py复制到pytorch工程的项目目录下运行,将会生成deepsort.onnx。</p>
把deepsort.onnx移到本工程的resources目录下。然后回到工程的根目录</p>
```
mkdir build
cd build
cmake ..
make
./onnx2engine ../resources/deepsort.onnx ../resources/deepsort.engine
```
然后就可以运行demo测试了</p>
```
./demo ../resources/deepsort.engine ../resources/track.txt
```
然后就可以把自己TensorRT的目标检测工程进行对接。附上大佬TensorRT目标检测的工程</p>
```
git clone https://github.com/wang-xinyu/tensorrtx.git
```
注:视频文件太大了,所以只把检测结果保存成txt进行测试,因为图片全是黑的,所以demo只是demo,以实际使用为准哈。
111 changes: 111 additions & 0 deletions demo.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,111 @@
#include <iostream>
#include <fstream>
#include <opencv2/opencv.hpp>
#include <vector>
#include "deepsort.h"
#include "logging.h"
#include <ctime>

using std::vector;

static Logger gLogger;

void showDetection(cv::Mat& img, std::vector<DetectBox>& boxes) {
cv::Mat temp = img.clone();
for (auto box : boxes) {
cv::Point lt(box.x1, box.y1);
cv::Point br(box.x2, box.y2);
cv::rectangle(temp, lt, br, cv::Scalar(255, 0, 0), 1);
std::string lbl = cv::format("ID:%d_C:%d_CONF:%.2f", (int)box.trackID, (int)box.classID, box.confidence);
cv::putText(temp, lbl, lt, cv::FONT_HERSHEY_COMPLEX, 0.8, cv::Scalar(0,255,0));
}
cv::imshow("img", temp);
cv::waitKey(1);
}

class Tester {
public:
Tester(string modelPath) {
allDetections.clear();
out.clear();
DS = new DeepSort(modelPath, 128, 256, 0, &gLogger);
}
~Tester() {
}

public:

void split(const std::string& s, vector<std::string>& token, char delim=' ') {
token.clear();
auto string_find_first_not = [s, delim](size_t pos = 0) -> size_t {
for (size_t i = pos; i < s.size(); ++i)
if (s[i] != delim) return i;
return string::npos;
};
size_t lastPos = string_find_first_not(0);
size_t pos = s.find(delim, lastPos);
while (lastPos != string::npos) {
token.emplace_back(s.substr(lastPos, pos-lastPos));
lastPos = string_find_first_not(pos);
pos = s.find(delim, lastPos);
}
}

void loadDetections(std::string txtPath) {
//fstream f(filePath, ios::in);
this->txtPath = txtPath;
ifstream inFile;
inFile.open(txtPath, ios::binary);
std::string temp;
vector<std::string> token;
while (getline(inFile, temp)) {
// std::cout << temp << std::endl;
split(temp, token, ' ');
int frame = atoi(token[0].c_str());
int c = atoi(token[1].c_str());
int x = atoi(token[2].c_str());
int y = atoi(token[3].c_str());
int w = atoi(token[4].c_str());
int h = atoi(token[5].c_str());
float con= atof(token[6].c_str());
while (allDetections.size() <= frame) {
vector<DetectBox> t;
allDetections.push_back(t);
}
DetectBox dd(x-w/2, y-h/2, x+w/2, y+h/2, con, c);
allDetections[frame].push_back(dd);
}
allDetections.pop_back();
}

void run() {
cv::namedWindow("DeepSortTest");
int i = 1;
cv::Mat whiteBoard(1080, 1920, CV_8UC3, cv::Scalar::all(0));
for (vector<DetectBox> d : allDetections) {
cv::Mat img_rgb;
cv::cvtColor(whiteBoard, img_rgb, cv::COLOR_BGR2RGB);

DS->sort(img_rgb, d);

showDetection(whiteBoard, d);
}
cv::destroyAllWindows();
}

private:
vector<vector<DetectBox>> allDetections;
vector<DetectBox> out;
std::string txtPath;
DeepSort* DS;
};

int main(int argc, char** argv) {
if (argc < 3) {
std::cout << "./demo [input model path] [input txt path]" << std::endl;
return -1;
}
Tester* test = new Tester(argv[1]);
test->loadDetections(argv[2]);
test->run();
}
42 changes: 42 additions & 0 deletions exportOnnx.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@

import os
import cv2
import time
import argparse
import torch
import numpy as np

from deep_sort import build_tracker
from utils.draw import draw_boxes
from utils.parser import get_config

from tqdm import tqdm

if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--config_deepsort", type=str, default="./configs/deep_sort.yaml", help='Configure tracker')
parser.add_argument("--cpu", dest="use_cuda", action="store_false", default=True, help='Run in CPU')
args = parser.parse_args()

cfg = get_config()
cfg.merge_from_file(args.config_deepsort)
use_cuda = args.use_cuda and torch.cuda.is_available()
torch.set_grad_enabled(False)
model = build_tracker(cfg, use_cuda=False)

model.reid = True
model.extractor.net.eval()

device = 'cuda'
output_onnx = 'deepsort.onnx'
# ------------------------ export -----------------------------
print("==> Exporting model to ONNX format at '{}'".format(output_onnx))
input_names = ['input']
output_names = ['output']

input_tensor = torch.randn(1, 3, 128, 64, device=device)

torch.onnx.export(model.extractor.net.cuda(), input_tensor, output_onnx, export_params=True, verbose=False,
input_names=input_names, output_names=output_names, opset_version=10,
do_constant_folding=True,
dynamic_axes={'input': {0: 'batch_size'}, 'output': {0: 'batch_size'}})
73 changes: 73 additions & 0 deletions include/datatype.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
#ifndef DATATYPE_H
#define DATATYPE_H

typedef struct DetectBox {
DetectBox(float x1=0, float y1=0, float x2=0, float y2=0,
float confidence=0, float classID=-1, float trackID=-1) {
this->x1 = x1;
this->y1 = y1;
this->x2 = x2;
this->y2 = y2;
this->confidence = confidence;
this->classID = classID;
this->trackID = trackID;
}
float x1, y1, x2, y2;
float confidence;
float classID;
float trackID;
} DetectBox;

#endif // DATATYPE_H

#ifndef DEEPSORTDATATYPE_H
#define DEEPSORTDATATYPE_H

#include <cstddef>
#include <vector>
#include <Eigen/Core>
#include <Eigen/Dense>
typedef struct CLSCONF {
CLSCONF() {
this->cls = -1;
this->conf = -1;
}
CLSCONF(int cls, float conf) {
this->cls = cls;
this->conf = conf;
}
int cls;
float conf;
} CLSCONF;

typedef Eigen::Matrix<float, 1, 4, Eigen::RowMajor> DETECTBOX;
typedef Eigen::Matrix<float, -1, 4, Eigen::RowMajor> DETECTBOXSS;
typedef Eigen::Matrix<float, 1, 256, Eigen::RowMajor> FEATURE;
typedef Eigen::Matrix<float, Eigen::Dynamic, 256, Eigen::RowMajor> FEATURESS;
//typedef std::vector<FEATURE> FEATURESS;

//Kalmanfilter
//typedef Eigen::Matrix<float, 8, 8, Eigen::RowMajor> KAL_FILTER;
typedef Eigen::Matrix<float, 1, 8, Eigen::RowMajor> KAL_MEAN;
typedef Eigen::Matrix<float, 8, 8, Eigen::RowMajor> KAL_COVA;
typedef Eigen::Matrix<float, 1, 4, Eigen::RowMajor> KAL_HMEAN;
typedef Eigen::Matrix<float, 4, 4, Eigen::RowMajor> KAL_HCOVA;
using KAL_DATA = std::pair<KAL_MEAN, KAL_COVA>;
using KAL_HDATA = std::pair<KAL_HMEAN, KAL_HCOVA>;

//main
using RESULT_DATA = std::pair<int, DETECTBOX>;

//tracker:
using TRACKER_DATA = std::pair<int, FEATURESS>;
using MATCH_DATA = std::pair<int, int>;
typedef struct t{
std::vector<MATCH_DATA> matches;
std::vector<int> unmatched_tracks;
std::vector<int> unmatched_detections;
}TRACHER_MATCHD;

//linear_assignment:
typedef Eigen::Matrix<float, -1, -1, Eigen::RowMajor> DYNAMICM;

#endif //DEEPSORTDATATYPE_H
48 changes: 48 additions & 0 deletions include/deepsort.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
#ifndef DEEPSORT_H
#define DEEPSORT_H

#include <iostream>
#include <opencv2/opencv.hpp>
#include "featuretensor.h"
#include "tracker.h"
#include "datatype.h"
#include <vector>

using std::vector;
using nvinfer1::ILogger;

class DeepSort {
public:
DeepSort(std::string modelPath, int batchSize, int featureDim, int gpuID, ILogger* gLogger);
~DeepSort();

public:
void sort(cv::Mat& frame, vector<DetectBox>& dets);

private:
void sort(cv::Mat& frame, DETECTIONS& detections);
void sort(cv::Mat& frame, DETECTIONSV2& detectionsv2);
void sort(vector<DetectBox>& dets);
void sort(DETECTIONS& detections);
void init();

private:
std::string enginePath;
int batchSize;
int featureDim;
cv::Size imgShape;
float confThres;
float nmsThres;
int maxBudget;
float maxCosineDist;

private:
vector<RESULT_DATA> result;
vector<std::pair<CLSCONF, DETECTBOX>> results;
tracker* objTracker;
FeatureTensor* featureExtractor;
ILogger* gLogger;
int gpuID;
};

#endif //deepsort.h
30 changes: 30 additions & 0 deletions include/deepsortenginegenerator.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
#ifndef DEEPSORT_ENGINE_GENERATOR_H
#define DEEPSORT_ENGINE_GENERATOR_H

#include <iostream>
#include <NvInfer.h>
#include <NvOnnxParser.h>

using namespace nvinfer1;

const int IMG_HEIGHT = 128;
const int IMG_WIDTH = 64;
const int MAX_BATCH_SIZE = 128;
const std::string INPUT_NAME("input");

class DeepSortEngineGenerator {
public:
DeepSortEngineGenerator(ILogger* gLogger);
~DeepSortEngineGenerator();

public:
void setFP16(bool state);
void createEngine(std::string onnxPath, std::string enginePath);

private:
std::string modelPath, engingPath;
ILogger* gLogger;
bool useFP16;
};

#endif
Loading

0 comments on commit cb30017

Please sign in to comment.