From fc9b867dd9c9a6d38d7fe478217060e11b9e7e1b Mon Sep 17 00:00:00 2001 From: Joseph Redmon Date: Wed, 16 Nov 2016 00:15:46 -0800 Subject: [PATCH] :fire: :fire: :dragonite: --- src/avgpool_layer.c | 2 +- src/connected_layer.c | 2 +- src/convolutional_layer.c | 2 +- src/cost_layer.c | 2 +- src/detector.c | 6 +++++- src/dropout_layer.c | 2 +- src/maxpool_layer.c | 2 +- src/parser.c | 3 ++- src/softmax_layer.c | 2 +- 9 files changed, 14 insertions(+), 9 deletions(-) diff --git a/src/avgpool_layer.c b/src/avgpool_layer.c index c6db477e..b6932fe7 100644 --- a/src/avgpool_layer.c +++ b/src/avgpool_layer.c @@ -4,7 +4,7 @@ avgpool_layer make_avgpool_layer(int batch, int w, int h, int c) { - fprintf(stderr, "Avgpool Layer: %d x %d x %d image\n", w,h,c); + fprintf(stderr, "avg %4d x%4d x%4d -> %4d\n", w, h, c, c); avgpool_layer l = {0}; l.type = AVGPOOL; l.batch = batch; diff --git a/src/connected_layer.c b/src/connected_layer.c index 26942292..b678ed0d 100644 --- a/src/connected_layer.c +++ b/src/connected_layer.c @@ -100,7 +100,7 @@ connected_layer make_connected_layer(int batch, int inputs, int outputs, ACTIVAT } #endif l.activation = activation; - fprintf(stderr, "Connected Layer: %d inputs, %d outputs\n", inputs, outputs); + fprintf(stderr, "connected %4d -> %4d\n", inputs, outputs); return l; } diff --git a/src/convolutional_layer.c b/src/convolutional_layer.c index 86285e03..3864c1bc 100644 --- a/src/convolutional_layer.c +++ b/src/convolutional_layer.c @@ -300,7 +300,7 @@ convolutional_layer make_convolutional_layer(int batch, int h, int w, int c, int l.workspace_size = get_workspace_size(l); l.activation = activation; - fprintf(stderr, "Convolutional Layer: %d x %d x %d image, %d filters -> %d x %d x %d image\n", h,w,c,n, out_h, out_w, n); + fprintf(stderr, "conv %5d %2d x%2d /%2d %4d x%4d x%4d -> %4d x%4d x%4d\n", n, size, size, stride, w, h, c, l.out_w, l.out_h, l.out_c); return l; } diff --git a/src/cost_layer.c b/src/cost_layer.c index f266c6a1..39d2398b 100644 --- a/src/cost_layer.c +++ b/src/cost_layer.c @@ -31,7 +31,7 @@ char *get_cost_string(COST_TYPE a) cost_layer make_cost_layer(int batch, int inputs, COST_TYPE cost_type, float scale) { - fprintf(stderr, "Cost Layer: %d inputs\n", inputs); + fprintf(stderr, "cost %4d\n", inputs); cost_layer l = {0}; l.type = COST; diff --git a/src/detector.c b/src/detector.c index 3853ebb3..a513816c 100644 --- a/src/detector.c +++ b/src/detector.c @@ -136,15 +136,19 @@ void train_detector(char *datacfg, char *cfgfile, char *weightfile, int *gpus, i i = get_current_batch(net); printf("%d: %f, %f avg, %f rate, %lf seconds, %d images\n", get_current_batch(net), loss, avg_loss, get_current_rate(net), sec(clock()-time), i*imgs); - if(i%100==0 || (i < 1000 && i%100 == 0)){ + if(i%1000==0 || (i < 1000 && i%100 == 0)){ +#ifdef GPU if(ngpus != 1) sync_nets(nets, ngpus, 0); +#endif char buff[256]; sprintf(buff, "%s/%s_%d.weights", backup_directory, base, i); save_weights(net, buff); } free_data(train); } +#ifdef GPU if(ngpus != 1) sync_nets(nets, ngpus, 0); +#endif char buff[256]; sprintf(buff, "%s/%s_final.weights", backup_directory, base); save_weights(net, buff); diff --git a/src/dropout_layer.c b/src/dropout_layer.c index 82be64b1..b1381e63 100644 --- a/src/dropout_layer.c +++ b/src/dropout_layer.c @@ -6,7 +6,6 @@ dropout_layer make_dropout_layer(int batch, int inputs, float probability) { - fprintf(stderr, "Dropout Layer: %d inputs, %f probability\n", inputs, probability); dropout_layer l = {0}; l.type = DROPOUT; l.probability = probability; @@ -22,6 +21,7 @@ dropout_layer make_dropout_layer(int batch, int inputs, float probability) l.backward_gpu = backward_dropout_layer_gpu; l.rand_gpu = cuda_make_array(l.rand, inputs*batch); #endif + fprintf(stderr, "dropout p = %.2f %4d -> %4d\n", probability, inputs, inputs); return l; } diff --git a/src/maxpool_layer.c b/src/maxpool_layer.c index 49cfeaf5..d1fbacb9 100644 --- a/src/maxpool_layer.c +++ b/src/maxpool_layer.c @@ -20,7 +20,6 @@ image get_maxpool_delta(maxpool_layer l) maxpool_layer make_maxpool_layer(int batch, int h, int w, int c, int size, int stride, int padding) { - fprintf(stderr, "Maxpool Layer: %d x %d x %d image, %d size, %d stride\n", h,w,c,size,stride); maxpool_layer l = {0}; l.type = MAXPOOL; l.batch = batch; @@ -48,6 +47,7 @@ maxpool_layer make_maxpool_layer(int batch, int h, int w, int c, int size, int s l.output_gpu = cuda_make_array(l.output, output_size); l.delta_gpu = cuda_make_array(l.delta, output_size); #endif + fprintf(stderr, "max %d x %d / %d %4d x%4d x%4d -> %4d x%4d x%4d\n", size, size, stride, w, h, c, l.out_w, l.out_h, l.out_c); return l; } diff --git a/src/parser.c b/src/parser.c index db4cf368..cde06b4b 100644 --- a/src/parser.c +++ b/src/parser.c @@ -610,9 +610,10 @@ network parse_network_cfg(char *filename) n = n->next; int count = 0; free_section(s); + fprintf(stderr, "layer filters size input output\n"); while(n){ params.index = count; - fprintf(stderr, "%d: ", count); + fprintf(stderr, "%5d ", count); s = (section *)n->val; options = s->options; layer l = {0}; diff --git a/src/softmax_layer.c b/src/softmax_layer.c index 0aa9047f..5d153148 100644 --- a/src/softmax_layer.c +++ b/src/softmax_layer.c @@ -10,7 +10,7 @@ softmax_layer make_softmax_layer(int batch, int inputs, int groups) { assert(inputs%groups == 0); - fprintf(stderr, "Softmax Layer: %d inputs\n", inputs); + fprintf(stderr, "softmax %4d\n", inputs); softmax_layer l = {0}; l.type = SOFTMAX; l.batch = batch;