mirror of https://github.com/AlexeyAB/darknet.git
Added groups= and groupd_id= params to the [route] layer
This commit is contained in:
parent
704bd1138e
commit
0fa9c8f105
|
@ -206,6 +206,7 @@ struct layer {
|
||||||
int n;
|
int n;
|
||||||
int max_boxes;
|
int max_boxes;
|
||||||
int groups;
|
int groups;
|
||||||
|
int group_id;
|
||||||
int size;
|
int size;
|
||||||
int side;
|
int side;
|
||||||
int stride;
|
int stride;
|
||||||
|
|
|
@ -776,7 +776,10 @@ route_layer parse_route(list *options, size_params params)
|
||||||
}
|
}
|
||||||
int batch = params.batch;
|
int batch = params.batch;
|
||||||
|
|
||||||
route_layer layer = make_route_layer(batch, n, layers, sizes);
|
int groups = option_find_int_quiet(options, "groups", 1);
|
||||||
|
int group_id = option_find_int_quiet(options, "group_id", 0);
|
||||||
|
|
||||||
|
route_layer layer = make_route_layer(batch, n, layers, sizes, groups, group_id);
|
||||||
|
|
||||||
convolutional_layer first = params.net.layers[layers[0]];
|
convolutional_layer first = params.net.layers[layers[0]];
|
||||||
layer.out_w = first.out_w;
|
layer.out_w = first.out_w;
|
||||||
|
@ -791,6 +794,7 @@ route_layer parse_route(list *options, size_params params)
|
||||||
layer.out_h = layer.out_w = layer.out_c = 0;
|
layer.out_h = layer.out_w = layer.out_c = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
layer.out_c = layer.out_c / layer.groups;
|
||||||
|
|
||||||
return layer;
|
return layer;
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
#include "blas.h"
|
#include "blas.h"
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
|
|
||||||
route_layer make_route_layer(int batch, int n, int *input_layers, int *input_sizes)
|
route_layer make_route_layer(int batch, int n, int *input_layers, int *input_sizes, int groups, int group_id)
|
||||||
{
|
{
|
||||||
fprintf(stderr,"route ");
|
fprintf(stderr,"route ");
|
||||||
route_layer l = { (LAYER_TYPE)0 };
|
route_layer l = { (LAYER_TYPE)0 };
|
||||||
|
@ -12,6 +12,8 @@ route_layer make_route_layer(int batch, int n, int *input_layers, int *input_siz
|
||||||
l.n = n;
|
l.n = n;
|
||||||
l.input_layers = input_layers;
|
l.input_layers = input_layers;
|
||||||
l.input_sizes = input_sizes;
|
l.input_sizes = input_sizes;
|
||||||
|
l.groups = groups;
|
||||||
|
l.group_id = group_id;
|
||||||
int i;
|
int i;
|
||||||
int outputs = 0;
|
int outputs = 0;
|
||||||
for(i = 0; i < n; ++i){
|
for(i = 0; i < n; ++i){
|
||||||
|
@ -19,6 +21,7 @@ route_layer make_route_layer(int batch, int n, int *input_layers, int *input_siz
|
||||||
outputs += input_sizes[i];
|
outputs += input_sizes[i];
|
||||||
}
|
}
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
|
outputs = outputs / groups;
|
||||||
l.outputs = outputs;
|
l.outputs = outputs;
|
||||||
l.inputs = outputs;
|
l.inputs = outputs;
|
||||||
l.delta = (float*)calloc(outputs * batch, sizeof(float));
|
l.delta = (float*)calloc(outputs * batch, sizeof(float));
|
||||||
|
@ -57,6 +60,8 @@ void resize_route_layer(route_layer *l, network *net)
|
||||||
l->out_h = l->out_w = l->out_c = 0;
|
l->out_h = l->out_w = l->out_c = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
l->out_c = l->out_c / l->groups;
|
||||||
|
l->outputs = l->outputs / l->groups;
|
||||||
l->inputs = l->outputs;
|
l->inputs = l->outputs;
|
||||||
l->delta = (float*)realloc(l->delta, l->outputs * l->batch * sizeof(float));
|
l->delta = (float*)realloc(l->delta, l->outputs * l->batch * sizeof(float));
|
||||||
l->output = (float*)realloc(l->output, l->outputs * l->batch * sizeof(float));
|
l->output = (float*)realloc(l->output, l->outputs * l->batch * sizeof(float));
|
||||||
|
@ -78,10 +83,13 @@ void forward_route_layer(const route_layer l, network_state state)
|
||||||
int index = l.input_layers[i];
|
int index = l.input_layers[i];
|
||||||
float *input = state.net.layers[index].output;
|
float *input = state.net.layers[index].output;
|
||||||
int input_size = l.input_sizes[i];
|
int input_size = l.input_sizes[i];
|
||||||
|
int part_input_size = input_size / l.groups;
|
||||||
for(j = 0; j < l.batch; ++j){
|
for(j = 0; j < l.batch; ++j){
|
||||||
copy_cpu(input_size, input + j*input_size, 1, l.output + offset + j*l.outputs, 1);
|
//copy_cpu(input_size, input + j*input_size, 1, l.output + offset + j*l.outputs, 1);
|
||||||
|
copy_cpu(part_input_size, input + j*input_size + part_input_size*l.group_id, 1, l.output + offset + j*l.outputs, 1);
|
||||||
}
|
}
|
||||||
offset += input_size;
|
//offset += input_size;
|
||||||
|
offset += part_input_size;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -93,10 +101,13 @@ void backward_route_layer(const route_layer l, network_state state)
|
||||||
int index = l.input_layers[i];
|
int index = l.input_layers[i];
|
||||||
float *delta = state.net.layers[index].delta;
|
float *delta = state.net.layers[index].delta;
|
||||||
int input_size = l.input_sizes[i];
|
int input_size = l.input_sizes[i];
|
||||||
|
int part_input_size = input_size / l.groups;
|
||||||
for(j = 0; j < l.batch; ++j){
|
for(j = 0; j < l.batch; ++j){
|
||||||
axpy_cpu(input_size, 1, l.delta + offset + j*l.outputs, 1, delta + j*input_size, 1);
|
//axpy_cpu(input_size, 1, l.delta + offset + j*l.outputs, 1, delta + j*input_size, 1);
|
||||||
|
axpy_cpu(part_input_size, 1, l.delta + offset + j*l.outputs, 1, delta + j*input_size + part_input_size*l.group_id, 1);
|
||||||
}
|
}
|
||||||
offset += input_size;
|
//offset += input_size;
|
||||||
|
offset += part_input_size;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -109,11 +120,14 @@ void forward_route_layer_gpu(const route_layer l, network_state state)
|
||||||
int index = l.input_layers[i];
|
int index = l.input_layers[i];
|
||||||
float *input = state.net.layers[index].output_gpu;
|
float *input = state.net.layers[index].output_gpu;
|
||||||
int input_size = l.input_sizes[i];
|
int input_size = l.input_sizes[i];
|
||||||
|
int part_input_size = input_size / l.groups;
|
||||||
for(j = 0; j < l.batch; ++j){
|
for(j = 0; j < l.batch; ++j){
|
||||||
//copy_ongpu(input_size, input + j*input_size, 1, l.output_gpu + offset + j*l.outputs, 1);
|
//copy_ongpu(input_size, input + j*input_size, 1, l.output_gpu + offset + j*l.outputs, 1);
|
||||||
simple_copy_ongpu(input_size, input + j*input_size, l.output_gpu + offset + j*l.outputs);
|
//simple_copy_ongpu(input_size, input + j*input_size, l.output_gpu + offset + j*l.outputs);
|
||||||
|
simple_copy_ongpu(part_input_size, input + j*input_size + part_input_size*l.group_id, l.output_gpu + offset + j*l.outputs);
|
||||||
}
|
}
|
||||||
offset += input_size;
|
//offset += input_size;
|
||||||
|
offset += part_input_size;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -125,10 +139,13 @@ void backward_route_layer_gpu(const route_layer l, network_state state)
|
||||||
int index = l.input_layers[i];
|
int index = l.input_layers[i];
|
||||||
float *delta = state.net.layers[index].delta_gpu;
|
float *delta = state.net.layers[index].delta_gpu;
|
||||||
int input_size = l.input_sizes[i];
|
int input_size = l.input_sizes[i];
|
||||||
|
int part_input_size = input_size / l.groups;
|
||||||
for(j = 0; j < l.batch; ++j){
|
for(j = 0; j < l.batch; ++j){
|
||||||
axpy_ongpu(input_size, 1, l.delta_gpu + offset + j*l.outputs, 1, delta + j*input_size, 1);
|
//axpy_ongpu(input_size, 1, l.delta_gpu + offset + j*l.outputs, 1, delta + j*input_size, 1);
|
||||||
|
axpy_ongpu(part_input_size, 1, l.delta_gpu + offset + j*l.outputs, 1, delta + j*input_size + part_input_size*l.group_id, 1);
|
||||||
}
|
}
|
||||||
offset += input_size;
|
//offset += input_size;
|
||||||
|
offset += part_input_size;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -8,7 +8,7 @@ typedef layer route_layer;
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
extern "C" {
|
extern "C" {
|
||||||
#endif
|
#endif
|
||||||
route_layer make_route_layer(int batch, int n, int *input_layers, int *input_size);
|
route_layer make_route_layer(int batch, int n, int *input_layers, int *input_size, int groups, int group_id);
|
||||||
void forward_route_layer(const route_layer l, network_state state);
|
void forward_route_layer(const route_layer l, network_state state);
|
||||||
void backward_route_layer(const route_layer l, network_state state);
|
void backward_route_layer(const route_layer l, network_state state);
|
||||||
void resize_route_layer(route_layer *l, network *net);
|
void resize_route_layer(route_layer *l, network *net);
|
||||||
|
|
Loading…
Reference in New Issue