mirror of https://github.com/AlexeyAB/darknet.git
Minor fixes
This commit is contained in:
parent
9815012a01
commit
ab9e891f60
|
@ -247,7 +247,7 @@ endif()
|
|||
|
||||
set_source_files_properties(${sources} PROPERTIES LANGUAGE CXX)
|
||||
|
||||
add_library(darklib ${CMAKE_CURRENT_LIST_DIR}/include/yolo_v2_class.hpp ${CMAKE_CURRENT_LIST_DIR}/src/yolo_v2_class.cpp ${sources} ${headers} ${cuda_sources})
|
||||
add_library(darklib SHARED ${CMAKE_CURRENT_LIST_DIR}/include/yolo_v2_class.hpp ${CMAKE_CURRENT_LIST_DIR}/src/yolo_v2_class.cpp ${sources} ${headers} ${cuda_sources})
|
||||
set_target_properties(darklib PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
||||
if(ENABLE_CUDA)
|
||||
set_target_properties(darklib PROPERTIES CUDA_SEPARABLE_COMPILATION ON)
|
||||
|
|
19
README.md
19
README.md
|
@ -1,4 +1,4 @@
|
|||
# Yolo-v3 and Yolo-v2 for Windows and Linux
|
||||
# Yolo-v3 and Yolo-v2 for Windows and Linux
|
||||
### (neural network for object detection) - Tensor Cores can be used on [Linux](https://github.com/AlexeyAB/darknet#how-to-compile-on-linux) and [Windows](https://github.com/AlexeyAB/darknet#how-to-compile-on-windows-using-vcpkg)
|
||||
|
||||
Contributors: https://github.com/AlexeyAB/darknet/graphs/contributors
|
||||
|
@ -13,6 +13,7 @@ More details: http://pjreddie.com/darknet/yolo/
|
|||
* [Requirements (and how to install dependecies)](#requirements)
|
||||
* [Pre-trained models](#pre-trained-models)
|
||||
* [Explanations in issues](https://github.com/AlexeyAB/darknet/issues?q=is%3Aopen+is%3Aissue+label%3AExplanations)
|
||||
* [Yolo v3 in other frameworks (TensorFlow, OpenVINO, OpenCV-dnn, ...)](#yolo-v3-in-other-frameworks)
|
||||
|
||||
0. [Improvements in this repository](#improvements-in-this-repository)
|
||||
1. [How to use](#how-to-use-on-the-command-line)
|
||||
|
@ -66,6 +67,12 @@ Put it near compiled: darknet.exe
|
|||
|
||||
You can get cfg-files by path: `darknet/cfg/`
|
||||
|
||||
#### Yolo v3 in other frameworks
|
||||
|
||||
* Convert `yolov3.weights`/`cfg` model to **TensorFlow**: by using [mystic123](https://github.com/mystic123/tensorflow-yolo-v3) or [jinyu121](https://github.com/jinyu121/DW2TF) projects
|
||||
* To use Yolo v3 model in **Intel OpenVINO** (Myriad X / USB Neural Compute Stick / Arria FPGA): read this [manual](https://software.intel.com/en-us/articles/OpenVINO-Using-TensorFlow#converting-a-darknet-yolo-model)
|
||||
* **OpenCV-dnn** is very fast DNN implementation on CPU (x86/ARM-Android), use `yolov3.weights`/`cfg` with: [C++ example](https://github.com/opencv/opencv/blob/8c25a8eb7b10fb50cda323ee6bec68aa1a9ce43c/samples/dnn/object_detection.cpp#L192-L221), [Python example](https://github.com/opencv/opencv/blob/8c25a8eb7b10fb50cda323ee6bec68aa1a9ce43c/samples/dnn/object_detection.py#L129-L150)
|
||||
|
||||
##### Examples of results
|
||||
|
||||
[![Yolo v3](http://img.youtube.com/vi/VOC3huqHrss/0.jpg)](https://www.youtube.com/watch?v=MPU2HistivI "Yolo v3")
|
||||
|
@ -113,14 +120,16 @@ On Linux find executable file `./darknet` in the root directory, while on Window
|
|||
* **JSON and MJPEG server** that allows multiple connections from your soft or Web-browser `ip-address:8070` and 8090: `./darknet detector demo ./cfg/coco.data ./cfg/yolov3.cfg ./yolov3.weights test50.mp4 -json_port 8070 -mjpeg_port 8090 -ext_output`
|
||||
* Yolo v3 Tiny **on GPU #1**: `darknet.exe detector demo cfg/coco.data cfg/yolov3-tiny.cfg yolov3-tiny.weights -i 1 test.mp4`
|
||||
* Alternative method Yolo v3 COCO - image: `darknet.exe detect cfg/yolov3.cfg yolov3.weights -i 0 -thresh 0.25`
|
||||
* Train on **Amazon EC2**, to see mAP & Loss-chart using URL like: `http://ec2-35-160-228-91.us-west-2.compute.amazonaws.com:8090` in the Chrome/Firefox:
|
||||
* Train on **Amazon EC2**, to see mAP & Loss-chart using URL like: `http://ec2-35-160-228-91.us-west-2.compute.amazonaws.com:8090` in the Chrome/Firefox (**Darknet should be compiled with OpenCV**):
|
||||
`./darknet detector train cfg/coco.data yolov3.cfg darknet53.conv.74 -dont_show -mjpeg_port 8090 -map`
|
||||
* 186 MB Yolo9000 - image: `darknet.exe detector test cfg/combine9k.data yolo9000.cfg yolo9000.weights`
|
||||
* 186 MB Yolo9000 - image: `darknet.exe detector test cfg/combine9k.data cfg/yolo9000.cfg yolo9000.weights`
|
||||
* Remeber to put data/9k.tree and data/coco9k.map under the same folder of your app if you use the cpp api to build an app
|
||||
* To process a list of images `data/train.txt` and save results of detection to `result.json` file use:
|
||||
`darknet.exe detector test cfg/coco.data cfg/yolov3.cfg yolov3.weights -ext_output -dont_show -out result.json < data/train.txt`
|
||||
* To process a list of images `data/train.txt` and save results of detection to `result.txt` use:
|
||||
`darknet.exe detector test cfg/coco.data yolov3.cfg yolov3.weights -dont_show -ext_output < data/train.txt > result.txt`
|
||||
`darknet.exe detector test cfg/coco.data cfg/yolov3.cfg yolov3.weights -dont_show -ext_output < data/train.txt > result.txt`
|
||||
* Pseudo-lableing - to process a list of images `data/new_train.txt` and save results of detection in Yolo training format for each image as label `<image_name>.txt` (in this way you can increase the amount of training data) use:
|
||||
`darknet.exe detector test cfg/coco.data yolov3.cfg yolov3.weights -thresh 0.25 -dont_show -save_labels < data/new_train.txt`
|
||||
`darknet.exe detector test cfg/coco.data cfg/yolov3.cfg yolov3.weights -thresh 0.25 -dont_show -save_labels < data/new_train.txt`
|
||||
* To calculate anchors: `darknet.exe detector calc_anchors data/obj.data -num_of_clusters 9 -width 416 -height 416`
|
||||
* To check accuracy mAP@IoU=50: `darknet.exe detector map data/obj.data yolo-obj.cfg backup\yolo-obj_7000.weights`
|
||||
* To check accuracy mAP@IoU=75: `darknet.exe detector map data/obj.data yolo-obj.cfg backup\yolo-obj_7000.weights -iou_thresh 0.75`
|
||||
|
|
|
@ -24,10 +24,11 @@ namespace Darknet
|
|||
public struct bbox_t
|
||||
{
|
||||
public UInt32 x, y, w, h; // (x,y) - top-left corner, (w, h) - width & height of bounded box
|
||||
public float prob; // confidence - probability that the object was found correctly
|
||||
public float prob; // confidence - probability that the object was found correctly
|
||||
public UInt32 obj_id; // class of object - from range [0, classes-1]
|
||||
public UInt32 track_id; // tracking id for video (0 - untracked, 1 - inf - tracked object)
|
||||
public UInt32 frames_counter;
|
||||
public float x_3d, y_3d, z_3d; // 3-D coordinates, if there is used 3D-stereo camera
|
||||
};
|
||||
|
||||
[StructLayout(LayoutKind.Sequential)]
|
||||
|
|
|
@ -332,7 +332,7 @@ int main(int argc, char *argv[])
|
|||
sl::InitParameters init_params;
|
||||
init_params.depth_minimum_distance = 0.5;
|
||||
init_params.depth_mode = sl::DEPTH_MODE_ULTRA;
|
||||
init_params.camera_resolution = sl::RESOLUTION_HD720;
|
||||
init_params.camera_resolution = sl::RESOLUTION_HD720;// sl::RESOLUTION_HD1080, sl::RESOLUTION_HD720
|
||||
init_params.coordinate_units = sl::UNIT_METER;
|
||||
//init_params.sdk_cuda_ctx = (CUcontext)detector.get_cuda_context();
|
||||
init_params.sdk_gpu_id = detector.cur_gpu_id;
|
||||
|
|
Loading…
Reference in New Issue