mirror of https://github.com/AlexeyAB/darknet.git
OpenCV fix for channel=1. README.md fix
This commit is contained in:
parent
099b71d1de
commit
8cd5a9364a
10
README.md
10
README.md
|
@ -1,13 +1,14 @@
|
||||||
# Yolo-v3 and Yolo-v2 for Windows and Linux
|
# Yolo-v3 and Yolo-v2 for Windows and Linux
|
||||||
### (neural network for object detection) - Tensor Cores can be used on [Linux](https://github.com/AlexeyAB/darknet#how-to-compile-on-linux) and [Windows](https://github.com/AlexeyAB/darknet#how-to-compile-on-windows-using-vcpkg)
|
### (neural network for object detection) - Tensor Cores can be used on [Linux](https://github.com/AlexeyAB/darknet#how-to-compile-on-linux) and [Windows](https://github.com/AlexeyAB/darknet#how-to-compile-on-windows-using-vcpkg)
|
||||||
|
|
||||||
Contributors: https://github.com/AlexeyAB/darknet/graphs/contributors
|
|
||||||
More details: http://pjreddie.com/darknet/yolo/
|
More details: http://pjreddie.com/darknet/yolo/
|
||||||
|
|
||||||
|
|
||||||
[![CircleCI](https://circleci.com/gh/AlexeyAB/darknet.svg?style=svg)](https://circleci.com/gh/AlexeyAB/darknet)
|
[![CircleCI](https://circleci.com/gh/AlexeyAB/darknet.svg?style=svg)](https://circleci.com/gh/AlexeyAB/darknet)
|
||||||
[![TravisCI](https://travis-ci.org/AlexeyAB/darknet.svg?branch=master)](https://travis-ci.org/AlexeyAB/darknet)
|
[![TravisCI](https://travis-ci.org/AlexeyAB/darknet.svg?branch=master)](https://travis-ci.org/AlexeyAB/darknet)
|
||||||
[![AppveyorCI](https://ci.appveyor.com/api/projects/status/594bwb5uoc1fxwiu/branch/master?svg=true)](https://ci.appveyor.com/project/AlexeyAB/darknet/branch/master)
|
[![AppveyorCI](https://ci.appveyor.com/api/projects/status/594bwb5uoc1fxwiu/branch/master?svg=true)](https://ci.appveyor.com/project/AlexeyAB/darknet/branch/master)
|
||||||
|
[![Contributors](https://img.shields.io/github/contributors/AlexeyAB/Darknet.svg)](https://github.com/AlexeyAB/darknet/graphs/contributors)
|
||||||
|
[![License: Unlicense](https://img.shields.io/badge/license-Unlicense-blue.svg)](https://github.com/AlexeyAB/darknet/blob/master/LICENSE)
|
||||||
|
|
||||||
|
|
||||||
* [Requirements (and how to install dependecies)](#requirements)
|
* [Requirements (and how to install dependecies)](#requirements)
|
||||||
|
@ -73,9 +74,10 @@ You can get cfg-files by path: `darknet/cfg/`
|
||||||
|
|
||||||
#### Yolo v3 in other frameworks
|
#### Yolo v3 in other frameworks
|
||||||
|
|
||||||
* Convert `yolov3.weights`/`cfg` model to **TensorFlow**: by using [mystic123](https://github.com/mystic123/tensorflow-yolo-v3) or [jinyu121](https://github.com/jinyu121/DW2TF) projects, and [TensorFlow-lite](https://www.tensorflow.org/lite/guide/get_started#2_convert_the_model_format)
|
* **TensorFlow:** convert `yolov3.weights`/`cfg` files to `yolov3.ckpt`/`pb/meta`: by using [mystic123](https://github.com/mystic123/tensorflow-yolo-v3) or [jinyu121](https://github.com/jinyu121/DW2TF) projects, and [TensorFlow-lite](https://www.tensorflow.org/lite/guide/get_started#2_convert_the_model_format)
|
||||||
* To use Yolo v3 model in **Intel OpenVINO** (Myriad X / USB Neural Compute Stick / Arria FPGA): read this [manual](https://software.intel.com/en-us/articles/OpenVINO-Using-TensorFlow#converting-a-darknet-yolo-model)
|
* **Intel OpenVINO:** (Myriad X / USB Neural Compute Stick / Arria FPGA): read this [manual](https://software.intel.com/en-us/articles/OpenVINO-Using-TensorFlow#converting-a-darknet-yolo-model)
|
||||||
* **OpenCV-dnn** is very fast DNN implementation on CPU (x86/ARM-Android), use `yolov3.weights`/`cfg` with: [C++ example](https://github.com/opencv/opencv/blob/8c25a8eb7b10fb50cda323ee6bec68aa1a9ce43c/samples/dnn/object_detection.cpp#L192-L221), [Python example](https://github.com/opencv/opencv/blob/8c25a8eb7b10fb50cda323ee6bec68aa1a9ce43c/samples/dnn/object_detection.py#L129-L150)
|
* **OpenCV-dnn** is very fast DNN implementation on CPU (x86/ARM-Android), use `yolov3.weights`/`cfg` with: [C++ example](https://github.com/opencv/opencv/blob/8c25a8eb7b10fb50cda323ee6bec68aa1a9ce43c/samples/dnn/object_detection.cpp#L192-L221), [Python example](https://github.com/opencv/opencv/blob/8c25a8eb7b10fb50cda323ee6bec68aa1a9ce43c/samples/dnn/object_detection.py#L129-L150)
|
||||||
|
* **PyTorch > ONNX > CoreML > iOS** how to convert cfg/weights-files to pt-file: [ultralytics/yolov3](https://github.com/ultralytics/yolov3#darknet-conversion)
|
||||||
|
|
||||||
##### Examples of results
|
##### Examples of results
|
||||||
|
|
||||||
|
@ -164,6 +166,8 @@ Before make, you can set such options in the `Makefile`: [link](https://github.c
|
||||||
* `OPENMP=1` to build with OpenMP support to accelerate Yolo by using multi-core CPU
|
* `OPENMP=1` to build with OpenMP support to accelerate Yolo by using multi-core CPU
|
||||||
* `LIBSO=1` to build a library `darknet.so` and binary runable file `uselib` that uses this library. Or you can try to run so `LD_LIBRARY_PATH=./:$LD_LIBRARY_PATH ./uselib test.mp4` How to use this SO-library from your own code - you can look at C++ example: https://github.com/AlexeyAB/darknet/blob/master/src/yolo_console_dll.cpp
|
* `LIBSO=1` to build a library `darknet.so` and binary runable file `uselib` that uses this library. Or you can try to run so `LD_LIBRARY_PATH=./:$LD_LIBRARY_PATH ./uselib test.mp4` How to use this SO-library from your own code - you can look at C++ example: https://github.com/AlexeyAB/darknet/blob/master/src/yolo_console_dll.cpp
|
||||||
or use in such a way: `LD_LIBRARY_PATH=./:$LD_LIBRARY_PATH ./uselib data/coco.names cfg/yolov3.cfg yolov3.weights test.mp4`
|
or use in such a way: `LD_LIBRARY_PATH=./:$LD_LIBRARY_PATH ./uselib data/coco.names cfg/yolov3.cfg yolov3.weights test.mp4`
|
||||||
|
* `ZED_CAMERA=1` to build a library with ZED-3D-camera support (should be ZED SDK installed), then run
|
||||||
|
`LD_LIBRARY_PATH=./:$LD_LIBRARY_PATH ./uselib data/coco.names cfg/yolov3.cfg yolov3.weights zed_camera`
|
||||||
|
|
||||||
To run Darknet on Linux use examples from this article, just use `./darknet` instead of `darknet.exe`, i.e. use this command: `./darknet detector test ./cfg/coco.data ./cfg/yolov3.cfg ./yolov3.weights`
|
To run Darknet on Linux use examples from this article, just use `./darknet` instead of `darknet.exe`, i.e. use this command: `./darknet detector test ./cfg/coco.data ./cfg/yolov3.cfg ./yolov3.weights`
|
||||||
|
|
||||||
|
|
|
@ -10,6 +10,7 @@
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
#include <fstream>
|
#include <fstream>
|
||||||
|
#include <algorithm>
|
||||||
|
|
||||||
#include <opencv2/core/version.hpp>
|
#include <opencv2/core/version.hpp>
|
||||||
#include <opencv2/imgproc/imgproc.hpp>
|
#include <opencv2/imgproc/imgproc.hpp>
|
||||||
|
@ -112,7 +113,8 @@ mat_cv *load_image_mat_cv(const char *filename, int flag)
|
||||||
//if (check_mistakes) getchar();
|
//if (check_mistakes) getchar();
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
cv::cvtColor(mat, mat, cv::COLOR_RGB2BGR);
|
if (mat.channels() == 3) cv::cvtColor(mat, mat, cv::COLOR_RGB2BGR);
|
||||||
|
else if (mat.channels() == 4) cv::cvtColor(mat, mat, cv::COLOR_RGBA2BGRA);
|
||||||
|
|
||||||
return (mat_cv *)mat_ptr;
|
return (mat_cv *)mat_ptr;
|
||||||
}
|
}
|
||||||
|
@ -429,7 +431,8 @@ void show_image_cv(image p, const char *name)
|
||||||
constrain_image(copy);
|
constrain_image(copy);
|
||||||
|
|
||||||
cv::Mat mat = image_to_mat(copy);
|
cv::Mat mat = image_to_mat(copy);
|
||||||
cv::cvtColor(mat, mat, cv::COLOR_RGB2BGR);
|
if (mat.channels() == 3) cv::cvtColor(mat, mat, cv::COLOR_RGB2BGR);
|
||||||
|
else if (mat.channels() == 4) cv::cvtColor(mat, mat, cv::COLOR_RGBA2BGR);
|
||||||
cv::namedWindow(name, cv::WINDOW_NORMAL);
|
cv::namedWindow(name, cv::WINDOW_NORMAL);
|
||||||
cv::imshow(name, mat);
|
cv::imshow(name, mat);
|
||||||
free_image(copy);
|
free_image(copy);
|
||||||
|
@ -816,7 +819,7 @@ extern int stbi_write_jpg(char const *filename, int x, int y, int comp, const vo
|
||||||
void save_mat_png(cv::Mat img_src, const char *name)
|
void save_mat_png(cv::Mat img_src, const char *name)
|
||||||
{
|
{
|
||||||
cv::Mat img_rgb;
|
cv::Mat img_rgb;
|
||||||
cv::cvtColor(img_src, img_rgb, cv::COLOR_RGB2BGR);
|
if (img_src.channels() >= 3) cv::cvtColor(img_src, img_rgb, cv::COLOR_RGB2BGR);
|
||||||
stbi_write_png(name, img_rgb.cols, img_rgb.rows, 3, (char *)img_rgb.data, 0);
|
stbi_write_png(name, img_rgb.cols, img_rgb.rows, 3, (char *)img_rgb.data, 0);
|
||||||
}
|
}
|
||||||
// ----------------------------------------
|
// ----------------------------------------
|
||||||
|
@ -824,7 +827,7 @@ void save_mat_png(cv::Mat img_src, const char *name)
|
||||||
void save_mat_jpg(cv::Mat img_src, const char *name)
|
void save_mat_jpg(cv::Mat img_src, const char *name)
|
||||||
{
|
{
|
||||||
cv::Mat img_rgb;
|
cv::Mat img_rgb;
|
||||||
cv::cvtColor(img_src, img_rgb, cv::COLOR_RGB2BGR);
|
if (img_src.channels() >= 3) cv::cvtColor(img_src, img_rgb, cv::COLOR_RGB2BGR);
|
||||||
stbi_write_jpg(name, img_rgb.cols, img_rgb.rows, 3, (char *)img_rgb.data, 80);
|
stbi_write_jpg(name, img_rgb.cols, img_rgb.rows, 3, (char *)img_rgb.data, 80);
|
||||||
}
|
}
|
||||||
// ----------------------------------------
|
// ----------------------------------------
|
||||||
|
@ -1063,6 +1066,7 @@ void draw_train_loss(mat_cv* img_src, int img_size, float avg_loss, float max_im
|
||||||
// precision
|
// precision
|
||||||
if (draw_precision) {
|
if (draw_precision) {
|
||||||
static float old_precision = 0;
|
static float old_precision = 0;
|
||||||
|
static float max_precision = 0;
|
||||||
static int iteration_old = 0;
|
static int iteration_old = 0;
|
||||||
static int text_iteration_old = 0;
|
static int text_iteration_old = 0;
|
||||||
if (iteration_old == 0)
|
if (iteration_old == 0)
|
||||||
|
@ -1073,12 +1077,14 @@ void draw_train_loss(mat_cv* img_src, int img_size, float avg_loss, float max_im
|
||||||
cv::Point(img_offset + draw_size * (float)current_batch / max_batches, draw_size * (1 - precision)),
|
cv::Point(img_offset + draw_size * (float)current_batch / max_batches, draw_size * (1 - precision)),
|
||||||
CV_RGB(255, 0, 0), 1, 8, 0);
|
CV_RGB(255, 0, 0), 1, 8, 0);
|
||||||
|
|
||||||
sprintf(char_buff, "%2.0f%% ", precision * 100);
|
sprintf(char_buff, "%2.1f%% ", precision * 100);
|
||||||
cv::putText(img, char_buff, cv::Point(10, 28), cv::FONT_HERSHEY_COMPLEX_SMALL, 0.7, CV_RGB(255, 255, 255), 5, CV_AA);
|
cv::putText(img, char_buff, cv::Point(10, 28), cv::FONT_HERSHEY_COMPLEX_SMALL, 0.7, CV_RGB(255, 255, 255), 5, CV_AA);
|
||||||
cv::putText(img, char_buff, cv::Point(10, 28), cv::FONT_HERSHEY_COMPLEX_SMALL, 0.7, CV_RGB(200, 0, 0), 1, CV_AA);
|
cv::putText(img, char_buff, cv::Point(10, 28), cv::FONT_HERSHEY_COMPLEX_SMALL, 0.7, CV_RGB(200, 0, 0), 1, CV_AA);
|
||||||
|
|
||||||
if (((int)(old_precision * 10) != (int)(precision * 10)) || (current_batch - text_iteration_old) >= max_batches / 10) {
|
if (((int)(old_precision * 10) != (int)(precision * 10)) || (max_precision < precision) || (current_batch - text_iteration_old) >= max_batches / 10) {
|
||||||
text_iteration_old = current_batch;
|
text_iteration_old = current_batch;
|
||||||
|
max_precision = std::max(max_precision, precision);
|
||||||
|
sprintf(char_buff, "%2.0f%% ", precision * 100);
|
||||||
cv::putText(img, char_buff, cv::Point(pt1.x - 30, draw_size * (1 - precision) + 15), cv::FONT_HERSHEY_COMPLEX_SMALL, 0.7, CV_RGB(255, 255, 255), 5, CV_AA);
|
cv::putText(img, char_buff, cv::Point(pt1.x - 30, draw_size * (1 - precision) + 15), cv::FONT_HERSHEY_COMPLEX_SMALL, 0.7, CV_RGB(255, 255, 255), 5, CV_AA);
|
||||||
cv::putText(img, char_buff, cv::Point(pt1.x - 30, draw_size * (1 - precision) + 15), cv::FONT_HERSHEY_COMPLEX_SMALL, 0.7, CV_RGB(200, 0, 0), 1, CV_AA);
|
cv::putText(img, char_buff, cv::Point(pt1.x - 30, draw_size * (1 - precision) + 15), cv::FONT_HERSHEY_COMPLEX_SMALL, 0.7, CV_RGB(200, 0, 0), 1, CV_AA);
|
||||||
}
|
}
|
||||||
|
@ -1098,7 +1104,9 @@ void draw_train_loss(mat_cv* img_src, int img_size, float avg_loss, float max_im
|
||||||
cv::imshow("average loss", img);
|
cv::imshow("average loss", img);
|
||||||
k = cv::waitKey(20);
|
k = cv::waitKey(20);
|
||||||
}
|
}
|
||||||
if (k == 's' || current_batch == (max_batches - 1) || current_batch % 100 == 0) {
|
static int old_batch = 0;
|
||||||
|
if (k == 's' || current_batch == (max_batches - 1) || (current_batch / 100 > old_batch / 100)) {
|
||||||
|
old_batch = current_batch;
|
||||||
save_mat_png(img, "chart.png");
|
save_mat_png(img, "chart.png");
|
||||||
cv::putText(img, "- Saved", cv::Point(260, img_size - 10), cv::FONT_HERSHEY_COMPLEX_SMALL, 0.7, CV_RGB(255, 0, 0), 1, CV_AA);
|
cv::putText(img, "- Saved", cv::Point(260, img_size - 10), cv::FONT_HERSHEY_COMPLEX_SMALL, 0.7, CV_RGB(255, 0, 0), 1, CV_AA);
|
||||||
}
|
}
|
||||||
|
@ -1133,7 +1141,8 @@ image image_data_augmentation(mat_cv* mat, int w, int h,
|
||||||
cv::Rect dst_rect(cv::Point2i(std::max<int>(0, -pleft), std::max<int>(0, -ptop)), new_src_rect.size());
|
cv::Rect dst_rect(cv::Point2i(std::max<int>(0, -pleft), std::max<int>(0, -ptop)), new_src_rect.size());
|
||||||
|
|
||||||
cv::Mat cropped(cv::Size(src_rect.width, src_rect.height), img.type());
|
cv::Mat cropped(cv::Size(src_rect.width, src_rect.height), img.type());
|
||||||
cropped.setTo(cv::Scalar::all(0));
|
//cropped.setTo(cv::Scalar::all(0));
|
||||||
|
cropped.setTo(cv::mean(img));
|
||||||
|
|
||||||
img(new_src_rect).copyTo(cropped(dst_rect));
|
img(new_src_rect).copyTo(cropped(dst_rect));
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue