Visual studio now compiles dnn_mnist_advanced, inception and dtest

This commit is contained in:
Fm 2016-06-21 19:15:25 +03:00
parent 943a07cbb9
commit cbb69de299
2 changed files with 54 additions and 35 deletions

View File

@ -78,6 +78,8 @@ elseif (MSVC OR "${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC") # else if using Visu
message(STATUS "Enabling SSE2 instructions")
add_definitions(-DDLIB_HAVE_SSE2)
endif()
# DNN module produces long type names for NN definitions - disable this warning for MSVC
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4503")
endif()

View File

@ -1985,44 +1985,34 @@ namespace dlib
// ----------------------------------------------------------------------------------------
namespace impl{
// helper classes for layer concat processing
template <template<typename> class... TAG_TYPES>
struct concat_helper_impl {
};
template <template<typename> class TAG_TYPE>
struct concat_helper_impl<TAG_TYPE>{
constexpr static size_t tag_count() {return 1;}
static void list_tags(std::ostream& out)
{
out << tag_id<TAG_TYPE>::id;
}
template<typename SUBNET>
static void resize_out(resizable_tensor& out, const SUBNET& sub, long sum_k)
{
auto& t = layer<TAG_TYPE>(sub).get_output();
out.set_size(t.num_samples(), t.k() + sum_k, t.nr(), t.nc());
}
template<typename SUBNET>
static void concat(tensor& out, const SUBNET& sub, size_t k_offset)
{
auto& t = layer<TAG_TYPE>(sub).get_output();
tt::copy_tensor(out, k_offset, t, 0, t.k());
}
template<typename SUBNET>
static void split(const tensor& input, SUBNET& sub, size_t k_offset)
{
auto& t = layer<TAG_TYPE>(sub).get_gradient_input();
tt::copy_tensor(t, 0, input, k_offset, t.k());
}
};
// // helper classes for layer concat processing
// template <template<typename> class... TAG_TYPES>
// struct concat_helper_impl {
// // this specialization will be used only by MSVC
// constexpr static size_t tag_count() {return 0;}
// static void list_tags(std::ostream& out)
// {
// }
// template<typename SUBNET>
// static void resize_out(resizable_tensor&, const SUBNET&, long)
// {
// }
// template<typename SUBNET>
// static void concat(tensor&, const SUBNET&, size_t)
// {
// }
// template<typename SUBNET>
// static void split(const tensor&, SUBNET&, size_t)
// {
// }
// };
template <template<typename> class TAG_TYPE, template<typename> class... TAG_TYPES>
struct concat_helper_impl<TAG_TYPE, TAG_TYPES...>{
struct concat_helper_impl{
constexpr static size_t tag_count() {return 1 + concat_helper_impl<TAG_TYPES...>::tag_count();}
static void list_tags(std::ostream& out)
{
out << tag_id<TAG_TYPE>::id << ",";
static void list_tags(std::ostream& out)
{
out << tag_id<TAG_TYPE>::id << (tag_count() > 1 ? "," : "");
concat_helper_impl<TAG_TYPES...>::list_tags(out);
}
@ -2049,6 +2039,33 @@ namespace dlib
concat_helper_impl<TAG_TYPES...>::split(input, sub, k_offset);
}
};
template <template<typename> class TAG_TYPE>
struct concat_helper_impl<TAG_TYPE>{
constexpr static size_t tag_count() {return 1;}
static void list_tags(std::ostream& out)
{
out << tag_id<TAG_TYPE>::id;
}
template<typename SUBNET>
static void resize_out(resizable_tensor& out, const SUBNET& sub, long sum_k)
{
auto& t = layer<TAG_TYPE>(sub).get_output();
out.set_size(t.num_samples(), t.k() + sum_k, t.nr(), t.nc());
}
template<typename SUBNET>
static void concat(tensor& out, const SUBNET& sub, size_t k_offset)
{
auto& t = layer<TAG_TYPE>(sub).get_output();
tt::copy_tensor(out, k_offset, t, 0, t.k());
}
template<typename SUBNET>
static void split(const tensor& input, SUBNET& sub, size_t k_offset)
{
auto& t = layer<TAG_TYPE>(sub).get_gradient_input();
tt::copy_tensor(t, 0, input, k_offset, t.k());
}
};
}
// concat layer
template<