Dlib and caffe actually do use max pooling layers with padding in the same way.

So I just removed the error check that was preventing the conversion from
proceeding in that case.  I also added more useful output messages about
setting input tensor dimensions.
This commit is contained in:
Davis King 2017-05-19 20:16:40 -04:00
parent e28768eafa
commit 10d3f93333
1 changed files with 2 additions and 5 deletions

View File

@ -127,6 +127,7 @@ void convert_dlib_xml_to_caffe_python_code(
fout << "batch_size = 1;" << endl;
if (layers.back().detail_name == "input_rgb_image")
{
cout << "WARNING: The source dlib network didn't commit to a specific input tensor size, we are using a default size of 28x28x1 which is appropriate for MNIST input. But if you are using different inputs you will need to edit the auto-generated python script to tell it your input size." << endl;
fout << "input_nr = 28; #WARNING, the source dlib network didn't commit to a specific input size, so we put 28 here as a default. It might not be the right value." << endl;
fout << "input_nc = 28; #WARNING, the source dlib network didn't commit to a specific input size, so we put 28 here as a default. It might not be the right value." << endl;
fout << "input_k = 3;" << endl;
@ -139,6 +140,7 @@ void convert_dlib_xml_to_caffe_python_code(
}
else if (layers.back().detail_name == "input")
{
cout << "WARNING: The source dlib network didn't commit to a specific input tensor size, we are using a default size of 28x28x1 which is appropriate for MNIST input. But if you are using different inputs you will need to edit the auto-generated python script to tell it your input size." << endl;
fout << "input_nr = 28; #WARNING, the source dlib network didn't commit to a specific input size, so we put 28 here as a default. It might not be the right value." << endl;
fout << "input_nc = 28; #WARNING, the source dlib network didn't commit to a specific input size, so we put 28 here as a default. It might not be the right value." << endl;
fout << "input_k = 1;" << endl;
@ -221,11 +223,6 @@ void convert_dlib_xml_to_caffe_python_code(
fout << ", kernel_w=" << i->attribute("nc");
fout << ", kernel_h=" << i->attribute("nr");
}
if (i->attribute("padding_x") != 0 || i->attribute("padding_y") != 0)
{
throw dlib::error("dlib and caffe implement pooling with non-zero padding differently, so you can't convert a "
"network with such pooling layers.");
}
fout << ", stride_w=" << i->attribute("stride_x");
fout << ", stride_h=" << i->attribute("stride_y");