From 114f677d74d71db1466e37e3b77994f29a13c9fa Mon Sep 17 00:00:00 2001 From: Davis King Date: Sat, 22 Feb 2014 16:07:17 -0500 Subject: [PATCH] Fixing grammar in comments. --- examples/bayes_net_ex.cpp | 4 ++-- examples/bayes_net_from_disk_ex.cpp | 2 +- examples/bayes_net_gui_ex.cpp | 2 +- examples/bridge_ex.cpp | 4 ++-- examples/config_reader_ex.cpp | 2 +- examples/custom_trainer_ex.cpp | 4 ++-- examples/empirical_kernel_map_ex.cpp | 8 ++++---- examples/fhog_ex.cpp | 2 +- examples/fhog_object_detector_ex.cpp | 4 ++-- examples/graph_labeling_ex.cpp | 4 ++-- examples/gui_api_ex.cpp | 10 +++++----- examples/image_ex.cpp | 4 ++-- examples/iosockstream_ex.cpp | 2 +- examples/kcentroid_ex.cpp | 4 ++-- examples/krls_ex.cpp | 2 +- examples/krls_filter_ex.cpp | 2 +- examples/krr_classification_ex.cpp | 6 +++--- examples/krr_regression_ex.cpp | 2 +- examples/least_squares_ex.cpp | 6 +++--- examples/linear_manifold_regularizer_ex.cpp | 2 +- examples/matrix_ex.cpp | 4 ++-- examples/matrix_expressions_ex.cpp | 4 ++-- examples/mlp_ex.cpp | 4 ++-- examples/model_selection_ex.cpp | 2 +- examples/multiclass_classification_ex.cpp | 2 +- examples/object_detector_advanced_ex.cpp | 4 ++-- examples/object_detector_ex.cpp | 2 +- examples/one_class_classifiers_ex.cpp | 2 +- examples/optimization_ex.cpp | 8 ++++---- examples/quantum_computing_ex.cpp | 4 ++-- examples/rank_features_ex.cpp | 2 +- examples/rvm_ex.cpp | 8 ++++---- examples/rvm_regression_ex.cpp | 2 +- examples/sequence_segmenter_ex.cpp | 4 ++-- examples/svm_ex.cpp | 8 ++++---- examples/svm_pegasos_ex.cpp | 6 +++--- examples/svm_rank_ex.cpp | 2 +- examples/svm_sparse_ex.cpp | 10 +++++----- examples/svm_struct_ex.cpp | 2 +- examples/train_object_detector.cpp | 2 +- examples/using_custom_kernels_ex.cpp | 4 ++-- 41 files changed, 81 insertions(+), 81 deletions(-) diff --git a/examples/bayes_net_ex.cpp b/examples/bayes_net_ex.cpp index 54d7d2f52..64f2ad957 100644 --- a/examples/bayes_net_ex.cpp +++ b/examples/bayes_net_ex.cpp @@ -161,7 +161,7 @@ int main() - // We have now finished setting up our bayesian network. So lets compute some + // We have now finished setting up our bayesian network. So let's compute some // probability values. The first thing we will do is compute the prior probability // of each node in the network. To do this we will use the join tree algorithm which // is an algorithm for performing exact inference in a bayesian network. @@ -198,7 +198,7 @@ int main() cout << "\n\n\n"; - // Now to make things more interesting lets say that we have discovered that the C + // Now to make things more interesting let's say that we have discovered that the C // node really has a value of 1. That is to say, we now have evidence that // C is 1. We can represent this in the network using the following two function // calls. diff --git a/examples/bayes_net_from_disk_ex.cpp b/examples/bayes_net_from_disk_ex.cpp index c74040a4b..eaab5881a 100644 --- a/examples/bayes_net_from_disk_ex.cpp +++ b/examples/bayes_net_from_disk_ex.cpp @@ -44,7 +44,7 @@ int main(int argc, char** argv) cout << "Number of nodes in the network: " << bn.number_of_nodes() << endl; - // Lets compute some probability values using the loaded network using the join tree (aka. Junction + // Let's compute some probability values using the loaded network using the join tree (aka. Junction // Tree) algorithm. // First we need to create an undirected graph which contains set objects at each node and diff --git a/examples/bayes_net_gui_ex.cpp b/examples/bayes_net_gui_ex.cpp index 1e7a719f9..4e098f071 100644 --- a/examples/bayes_net_gui_ex.cpp +++ b/examples/bayes_net_gui_ex.cpp @@ -413,7 +413,7 @@ initialize_node_cpt_if_necessary ( { node_type& node = graph_drawer.graph_node(index); - // if the cpt for this node isn't properly filled out then lets clear it out + // if the cpt for this node isn't properly filled out then let's clear it out // and populate it with some reasonable default values if (node_cpt_filled_out(graph_drawer.graph(), index) == false) { diff --git a/examples/bridge_ex.cpp b/examples/bridge_ex.cpp index b74f4461d..bc772ccbb 100644 --- a/examples/bridge_ex.cpp +++ b/examples/bridge_ex.cpp @@ -103,7 +103,7 @@ void run_example_1( - // Now lets put some things into the out pipe + // Now let's put some things into the out pipe int value = 1; out.enqueue(value); @@ -308,7 +308,7 @@ void run_example_4( bridge_status bs; // Once a connection is established it will generate a status message from each bridge. - // Lets get those and print them. + // Let's get those and print them. b1_status.dequeue(bs); cout << "bridge 1 status: is_connected: " << boolalpha << bs.is_connected << endl; cout << "bridge 1 status: foreign_ip: " << bs.foreign_ip << endl; diff --git a/examples/config_reader_ex.cpp b/examples/config_reader_ex.cpp index e24faae5c..02ad1cc68 100644 --- a/examples/config_reader_ex.cpp +++ b/examples/config_reader_ex.cpp @@ -75,7 +75,7 @@ int main() // Use our recursive function to print everything in the config file. print_config_reader_contents(cr); - // Now lets access some of the fields of the config file directly. You + // Now let's access some of the fields of the config file directly. You // use [] for accessing key values and .block() for accessing sub-blocks. // Print out the string value assigned to key1 in the config file diff --git a/examples/custom_trainer_ex.cpp b/examples/custom_trainer_ex.cpp index e821283ab..ec3fc550b 100644 --- a/examples/custom_trainer_ex.cpp +++ b/examples/custom_trainer_ex.cpp @@ -174,7 +174,7 @@ int main() trainer.set_trainer(rbf_trainer, "upper_left", "lower_right"); - // Now lets do 5-fold cross-validation using the one_vs_one_trainer we just setup. + // Now let's do 5-fold cross-validation using the one_vs_one_trainer we just setup. // As an aside, always shuffle the order of the samples before doing cross validation. // For a discussion of why this is a good idea see the svm_ex.cpp example. randomize_samples(samples, labels); @@ -201,7 +201,7 @@ int main() */ - // Finally, lets save our multiclass decision rule to disk. Remember that we have + // Finally, let's save our multiclass decision rule to disk. Remember that we have // to specify the types of binary decision function used inside the one_vs_one_decision_function. one_vs_one_decision_function sample_type; // We will be using the radial_basis_kernel in this example program. @@ -213,7 +213,7 @@ void test_empirical_kernel_map ( - // Now lets do something more interesting. The following loop finds the centroids + // Now let's do something more interesting. The following loop finds the centroids // of the two classes of data. sample_type class1_center; sample_type class2_center; @@ -254,7 +254,7 @@ void test_empirical_kernel_map ( // Next, note that classifying a point based on its distance between two other // points is the same thing as using the plane that lies between those two points - // as a decision boundary. So lets compute that decision plane and use it to classify + // as a decision boundary. So let's compute that decision plane and use it to classify // all the points. sample_type plane_normal_vector = class1_center - class2_center; @@ -291,7 +291,7 @@ void test_empirical_kernel_map ( { double side = dec_funct(samples[i]); - // And lets just check that the dec_funct really does compute the same thing as the previous equation. + // And let's just check that the dec_funct really does compute the same thing as the previous equation. double side_alternate_equation = dot(plane_normal_vector, projected_samples[i]) - bias; if (abs(side-side_alternate_equation) > 1e-14) cout << "dec_funct error: " << abs(side-side_alternate_equation) << endl; diff --git a/examples/fhog_ex.cpp b/examples/fhog_ex.cpp index 206070235..1e8d5a80d 100644 --- a/examples/fhog_ex.cpp +++ b/examples/fhog_ex.cpp @@ -55,7 +55,7 @@ int main(int argc, char** argv) cout << "hog image has " << hog.nr() << " rows and " << hog.nc() << " columns." << endl; - // Lets see what the image and FHOG features look like. + // Let's see what the image and FHOG features look like. image_window win(img); image_window winhog(draw_fhog(hog)); diff --git a/examples/fhog_object_detector_ex.cpp b/examples/fhog_object_detector_ex.cpp index b37f5fae6..ab1615948 100644 --- a/examples/fhog_object_detector_ex.cpp +++ b/examples/fhog_object_detector_ex.cpp @@ -161,7 +161,7 @@ int main(int argc, char** argv) // a face. image_window hogwin(draw_fhog(detector), "Learned fHOG detector"); - // Now for the really fun part. Lets display the testing images on the screen and + // Now for the really fun part. Let's display the testing images on the screen and // show the output of the face detector overlaid on each image. You will see that // it finds all the faces without false alarming on any non-faces. image_window win; @@ -191,7 +191,7 @@ int main(int argc, char** argv) - // Now lets talk about some optional features of this training tool as well as some + // Now let's talk about some optional features of this training tool as well as some // important points you should understand. // // The first thing that should be pointed out is that, since this is a sliding diff --git a/examples/graph_labeling_ex.cpp b/examples/graph_labeling_ex.cpp index 2756e3f38..984a93bf5 100644 --- a/examples/graph_labeling_ex.cpp +++ b/examples/graph_labeling_ex.cpp @@ -194,13 +194,13 @@ int main() // indicate that all nodes were correctly classified. cout << "3-fold cross-validation: " << cross_validate_graph_labeling_trainer(trainer, samples, labels, 3) << endl; - // Since the trainer is working well. Lets have it make a graph_labeler + // Since the trainer is working well. Let's have it make a graph_labeler // based on the training data. graph_labeler labeler = trainer.train(samples, labels); /* - Lets try the graph_labeler on a new test graph. In particular, lets + Let's try the graph_labeler on a new test graph. In particular, let's use one with 5 nodes as shown below: (0 F)-----(1 T) diff --git a/examples/gui_api_ex.cpp b/examples/gui_api_ex.cpp index 42d5de09d..4d947b756 100644 --- a/examples/gui_api_ex.cpp +++ b/examples/gui_api_ex.cpp @@ -114,7 +114,7 @@ public: b.set_pos(10,60); b.set_name("button"); - // lets put the label 5 pixels below the button + // let's put the label 5 pixels below the button c.set_pos(b.left(),b.bottom()+5); @@ -137,7 +137,7 @@ public: // functions or lambda functions. - // Lets also make a simple menu bar. + // Let's also make a simple menu bar. // First we say how many menus we want in our menu bar. In this example we only want 1. mbar.set_number_of_menus(1); // Now we set the name of our menu. The 'M' means that the M in Menu will be underlined @@ -147,12 +147,12 @@ public: // Now we add some items to the menu. Note that items in a menu are listed in the // order in which they were added. - // First lets make a menu item that does the same thing as our button does when it is clicked. + // First let's make a menu item that does the same thing as our button does when it is clicked. // Again, the 'C' means the C in Click is underlined in the menu. mbar.menu(0).add_menu_item(menu_item_text("Click Button!",*this,&win::on_button_clicked,'C')); - // lets add a separator (i.e. a horizontal separating line) to the menu + // let's add a separator (i.e. a horizontal separating line) to the menu mbar.menu(0).add_menu_item(menu_item_separator()); - // Now lets make a menu item that calls show_about when the user selects it. + // Now let's make a menu item that calls show_about when the user selects it. mbar.menu(0).add_menu_item(menu_item_text("About",*this,&win::show_about,'A')); diff --git a/examples/image_ex.cpp b/examples/image_ex.cpp index 2c384f26f..8221c08aa 100644 --- a/examples/image_ex.cpp +++ b/examples/image_ex.cpp @@ -46,7 +46,7 @@ int main(int argc, char** argv) load_image(img, argv[1]); - // Now lets use some image functions. First lets blur the image a little. + // Now let's use some image functions. First let's blur the image a little. array2d blurred_img; gaussian_blur(img, blurred_img); @@ -58,7 +58,7 @@ int main(int argc, char** argv) // now we do the non-maximum edge suppression step so that our edges are nice and thin suppress_non_maximum_edges(horz_gradient, vert_gradient, edge_image); - // Now we would like to see what our images look like. So lets use a + // Now we would like to see what our images look like. So let's use a // window to display them on the screen. (Note that you can zoom into // the window by holding CTRL and scrolling the mouse wheel) image_window my_window(edge_image, "Normal Edge Image"); diff --git a/examples/iosockstream_ex.cpp b/examples/iosockstream_ex.cpp index fac370896..8a5dbbb24 100644 --- a/examples/iosockstream_ex.cpp +++ b/examples/iosockstream_ex.cpp @@ -28,7 +28,7 @@ int main() iosockstream stream("www.google.com:80"); // At this point, we can use stream the same way we would use any other - // C++ iostream object. So to test it out, lets make a HTTP GET request + // C++ iostream object. So to test it out, let's make a HTTP GET request // for the main Google page. stream << "GET / HTTP/1.0\r\n\r\n"; diff --git a/examples/kcentroid_ex.cpp b/examples/kcentroid_ex.cpp index 6f52cdabf..1f9311bc1 100644 --- a/examples/kcentroid_ex.cpp +++ b/examples/kcentroid_ex.cpp @@ -66,7 +66,7 @@ int main() running_stats rs; - // Now lets output the distance from the centroid to some points that are from the sinc function. + // Now let's output the distance from the centroid to some points that are from the sinc function. // These numbers should all be similar. We will also calculate the statistics of these numbers // by accumulating them into the running_stats object called rs. This will let us easily // find the mean and standard deviation of the distances for use below. @@ -80,7 +80,7 @@ int main() m(0) = -0.5; m(1) = sinc(m(0)); cout << " " << test(m) << endl; rs.add(test(m)); cout << endl; - // Lets output the distance from the centroid to some points that are NOT from the sinc function. + // Let's output the distance from the centroid to some points that are NOT from the sinc function. // These numbers should all be significantly bigger than previous set of numbers. We will also // use the rs.scale() function to find out how many standard deviations they are away from the // mean of the test points from the sinc function. So in this case our criterion for "significantly bigger" diff --git a/examples/krls_ex.cpp b/examples/krls_ex.cpp index ff04066cb..d20b90777 100644 --- a/examples/krls_ex.cpp +++ b/examples/krls_ex.cpp @@ -82,7 +82,7 @@ int main() serialize(test,fout); fout.close(); - // now lets open that file back up and load the krls object it contains + // Now let's open that file back up and load the krls object it contains. ifstream fin("saved_krls_object.dat",ios::binary); deserialize(test, fin); diff --git a/examples/krls_filter_ex.cpp b/examples/krls_filter_ex.cpp index 45706222f..5bb74b183 100644 --- a/examples/krls_filter_ex.cpp +++ b/examples/krls_filter_ex.cpp @@ -63,7 +63,7 @@ int main() dlib::rand rnd; - // Now lets loop over a big range of values from the sinc() function. Each time + // Now let's loop over a big range of values from the sinc() function. Each time // adding some random noise to the data we send to the krls object for training. sample_type m; double mse_noise = 0; diff --git a/examples/krr_classification_ex.cpp b/examples/krr_classification_ex.cpp index 12eb8cc84..4eff004c0 100644 --- a/examples/krr_classification_ex.cpp +++ b/examples/krr_classification_ex.cpp @@ -43,7 +43,7 @@ int main() std::vector samples; std::vector labels; - // Now lets put some data into our samples and labels objects. We do this + // Now let's put some data into our samples and labels objects. We do this // by looping over a bunch of points and labeling them according to their // distance from the origin. for (double r = -20; r <= 20; r += 0.4) @@ -129,7 +129,7 @@ int main() cout << "\nnumber of basis vectors in our learned_function is " << learned_function.function.basis_vectors.size() << endl; - // Now lets try this decision_function on some samples we haven't seen before. + // Now let's try this decision_function on some samples we haven't seen before. // The decision function will return values >= 0 for samples it predicts // are in the +1 class and numbers < 0 for samples it predicts to be in the -1 class. sample_type sample; @@ -200,7 +200,7 @@ int main() serialize(learned_pfunct,fout); fout.close(); - // now lets open that file back up and load the function object it contains + // Now let's open that file back up and load the function object it contains. ifstream fin("saved_function.dat",ios::binary); deserialize(learned_pfunct, fin); diff --git a/examples/krr_regression_ex.cpp b/examples/krr_regression_ex.cpp index 4b182f985..f1bb23694 100644 --- a/examples/krr_regression_ex.cpp +++ b/examples/krr_regression_ex.cpp @@ -98,7 +98,7 @@ int main() serialize(test,fout); fout.close(); - // now lets open that file back up and load the function object it contains + // Now let's open that file back up and load the function object it contains. ifstream fin("saved_function.dat",ios::binary); deserialize(test, fin); diff --git a/examples/least_squares_ex.cpp b/examples/least_squares_ex.cpp index d00ce828f..875790b2d 100644 --- a/examples/least_squares_ex.cpp +++ b/examples/least_squares_ex.cpp @@ -95,7 +95,7 @@ int main() cout << "params: " << trans(params) << endl; - // Now lets generate a bunch of input/output pairs according to our model. + // Now let's generate a bunch of input/output pairs according to our model. std::vector > data_samples; input_vector input; for (int i = 0; i < 1000; ++i) @@ -107,7 +107,7 @@ int main() data_samples.push_back(make_pair(input, output)); } - // Before we do anything, lets make sure that our derivative function defined above matches + // Before we do anything, let's make sure that our derivative function defined above matches // the approximate derivative computed using central differences (via derivative()). // If this value is big then it means we probably typed the derivative function incorrectly. cout << "derivative error: " << length(residual_derivative(data_samples[0], params) - @@ -117,7 +117,7 @@ int main() - // Now lets use the solve_least_squares_lm() routine to figure out what the + // Now let's use the solve_least_squares_lm() routine to figure out what the // parameters are based on just the data_samples. parameter_vector x; x = 1; diff --git a/examples/linear_manifold_regularizer_ex.cpp b/examples/linear_manifold_regularizer_ex.cpp index 36941f04f..9c6f10f26 100644 --- a/examples/linear_manifold_regularizer_ex.cpp +++ b/examples/linear_manifold_regularizer_ex.cpp @@ -98,7 +98,7 @@ using namespace dlib; // ---------------------------------------------------------------------------------------- -// First lets make a typedef for the kind of samples we will be using. +// First let's make a typedef for the kind of samples we will be using. typedef matrix sample_type; // We will be using the radial_basis_kernel in this example program. diff --git a/examples/matrix_ex.cpp b/examples/matrix_ex.cpp index eefb32b0f..a56dbfbb2 100644 --- a/examples/matrix_ex.cpp +++ b/examples/matrix_ex.cpp @@ -16,7 +16,7 @@ using namespace std; int main() { - // Lets begin this example by using the library to solve a simple + // Let's begin this example by using the library to solve a simple // linear system. // // We will find the value of x such that y = M*x where @@ -32,7 +32,7 @@ int main() // 5.9 0.05 1 - // First lets declare these 3 matrices. + // First let's declare these 3 matrices. // This declares a matrix that contains doubles and has 3 rows and 1 column. // Moreover, it's size is a compile time constant since we put it inside the <>. matrix y; diff --git a/examples/matrix_expressions_ex.cpp b/examples/matrix_expressions_ex.cpp index f85779288..b52370907 100644 --- a/examples/matrix_expressions_ex.cpp +++ b/examples/matrix_expressions_ex.cpp @@ -354,7 +354,7 @@ void custom_matrix_expressions_example( cout << x << endl; - // Finally, lets use the matrix expressions we defined above. + // Finally, let's use the matrix expressions we defined above. // prints the transpose of x cout << example_trans(x) << endl; @@ -382,7 +382,7 @@ void custom_matrix_expressions_example( vect.push_back(3); vect.push_back(5); - // Now lets treat our std::vector like a matrix and print some things. + // Now let's treat our std::vector like a matrix and print some things. cout << example_vector_to_matrix(vect) << endl; cout << add_scalar(example_vector_to_matrix(vect), 10) << endl; diff --git a/examples/mlp_ex.cpp b/examples/mlp_ex.cpp index f2fedce4b..372753c81 100644 --- a/examples/mlp_ex.cpp +++ b/examples/mlp_ex.cpp @@ -44,7 +44,7 @@ int main() // their default values. mlp::kernel_1a_c net(2,5); - // Now lets put some data into our sample and train on it. We do this + // Now let's put some data into our sample and train on it. We do this // by looping over 41*41 points and labeling them according to their // distance from the origin. for (int i = 0; i < 1000; ++i) @@ -65,7 +65,7 @@ int main() } } - // Now we have trained our mlp. Lets see how well it did. + // Now we have trained our mlp. Let's see how well it did. // Note that if you run this program multiple times you will get different results. This // is because the mlp network is randomly initialized. diff --git a/examples/model_selection_ex.cpp b/examples/model_selection_ex.cpp index 11e6a2748..20a3e0b82 100644 --- a/examples/model_selection_ex.cpp +++ b/examples/model_selection_ex.cpp @@ -101,7 +101,7 @@ int main() std::vector samples; std::vector labels; - // Now lets put some data into our samples and labels objects. We do this + // Now let's put some data into our samples and labels objects. We do this // by looping over a bunch of points and labeling them according to their // distance from the origin. for (double r = -20; r <= 20; r += 0.8) diff --git a/examples/multiclass_classification_ex.cpp b/examples/multiclass_classification_ex.cpp index da93bf7b6..cb7c46abd 100644 --- a/examples/multiclass_classification_ex.cpp +++ b/examples/multiclass_classification_ex.cpp @@ -92,7 +92,7 @@ int main() // still be solved with the rbf_trainer. trainer.set_trainer(poly_trainer, 1, 2); - // Now lets do 5-fold cross-validation using the one_vs_one_trainer we just setup. + // Now let's do 5-fold cross-validation using the one_vs_one_trainer we just setup. // As an aside, always shuffle the order of the samples before doing cross validation. // For a discussion of why this is a good idea see the svm_ex.cpp example. randomize_samples(samples, labels); diff --git a/examples/object_detector_advanced_ex.cpp b/examples/object_detector_advanced_ex.cpp index 79411f36c..31897677b 100644 --- a/examples/object_detector_advanced_ex.cpp +++ b/examples/object_detector_advanced_ex.cpp @@ -203,7 +203,7 @@ int main() typedef scan_image_pyramid, very_simple_feature_extractor> image_scanner_type; image_scanner_type scanner; - // Instead of using setup_grid_detection_templates() like in object_detector_ex.cpp, lets manually + // Instead of using setup_grid_detection_templates() like in object_detector_ex.cpp, let's manually // setup the sliding window box. We use a window with the same shape as the white boxes we // are trying to detect. const rectangle object_box = compute_box_dimensions(1, // width/height ratio @@ -272,7 +272,7 @@ int main() */ - // Lets display the output of the detector along with our training images. + // Let's display the output of the detector along with our training images. image_window win; for (unsigned long i = 0; i < images.size(); ++i) { diff --git a/examples/object_detector_ex.cpp b/examples/object_detector_ex.cpp index 70043225b..f7168fe62 100644 --- a/examples/object_detector_ex.cpp +++ b/examples/object_detector_ex.cpp @@ -226,7 +226,7 @@ int main() - // Lets display the output of the detector along with our training images. + // Let's display the output of the detector along with our training images. image_window win; for (unsigned long i = 0; i < images.size(); ++i) { diff --git a/examples/one_class_classifiers_ex.cpp b/examples/one_class_classifiers_ex.cpp index 0708db41e..50b94ea02 100644 --- a/examples/one_class_classifiers_ex.cpp +++ b/examples/one_class_classifiers_ex.cpp @@ -66,7 +66,7 @@ int main() // anomalous (i.e. not on the sinc() curve in our case). decision_function df = trainer.train(samples); - // So for example, lets look at the output from some points on the sinc() curve. + // So for example, let's look at the output from some points on the sinc() curve. cout << "Points that are on the sinc function:\n"; m(0) = -1.5; m(1) = sinc(m(0)); cout << " " << df(m) << endl; m(0) = -1.5; m(1) = sinc(m(0)); cout << " " << df(m) << endl; diff --git a/examples/optimization_ex.cpp b/examples/optimization_ex.cpp index 6f9e152a7..8b2f9bff2 100644 --- a/examples/optimization_ex.cpp +++ b/examples/optimization_ex.cpp @@ -201,7 +201,7 @@ int main() cout << "rosen solution:\n" << starting_point << endl; - // Now lets try doing it again with a different starting point and the version + // Now let's try doing it again with a different starting point and the version // of find_min() that doesn't require you to supply a derivative function. // This version will compute a numerical approximation of the derivative since // we didn't supply one to it. @@ -285,7 +285,7 @@ int main() - // Now lets look at using the test_function object with the optimization + // Now let's look at using the test_function object with the optimization // functions. cout << "\nFind the minimum of the test_function" << endl; @@ -306,7 +306,7 @@ int main() // At this point the correct value of (3,5,1,7) should be found and stored in starting_point cout << "test_function solution:\n" << starting_point << endl; - // Now lets try it again with the conjugate gradient algorithm. + // Now let's try it again with the conjugate gradient algorithm. starting_point = -4,5,99,3; find_min_using_approximate_derivatives(cg_search_strategy(), objective_delta_stop_strategy(1e-7), @@ -315,7 +315,7 @@ int main() - // Finally, lets try the BOBYQA algorithm. This is a technique specially + // Finally, let's try the BOBYQA algorithm. This is a technique specially // designed to minimize a function in the absence of derivative information. // Generally speaking, it is the method of choice if derivatives are not available. starting_point = -4,5,99,3; diff --git a/examples/quantum_computing_ex.cpp b/examples/quantum_computing_ex.cpp index c6bb4f768..fcc7c845f 100644 --- a/examples/quantum_computing_ex.cpp +++ b/examples/quantum_computing_ex.cpp @@ -296,8 +296,8 @@ int main() - // Now lets test out the Shor 9 bit encoding - cout << "\n\n\n\nNow lets try playing around with Shor's 9bit error correcting code" << endl; + // Now let's test out the Shor 9 bit encoding + cout << "\n\n\n\nNow let's try playing around with Shor's 9bit error correcting code" << endl; // Reset the quantum register to contain a single bit reg.set_num_bits(1); diff --git a/examples/rank_features_ex.cpp b/examples/rank_features_ex.cpp index 4adaa687f..548db4be7 100644 --- a/examples/rank_features_ex.cpp +++ b/examples/rank_features_ex.cpp @@ -36,7 +36,7 @@ int main() - // Now lets make some vector objects that can hold our samples + // Now let's make some vector objects that can hold our samples std::vector samples; std::vector labels; diff --git a/examples/rvm_ex.cpp b/examples/rvm_ex.cpp index c427ddaaf..fcf648233 100644 --- a/examples/rvm_ex.cpp +++ b/examples/rvm_ex.cpp @@ -47,7 +47,7 @@ int main() std::vector samples; std::vector labels; - // Now lets put some data into our samples and labels objects. We do this + // Now let's put some data into our samples and labels objects. We do this // by looping over a bunch of points and labeling them according to their // distance from the origin. for (int r = -20; r <= 20; ++r) @@ -141,11 +141,11 @@ int main() learned_function.normalizer = normalizer; // save normalization information learned_function.function = trainer.train(samples, labels); // perform the actual RVM training and save the results - // print out the number of relevance vectors in the resulting decision function + // Print out the number of relevance vectors in the resulting decision function. cout << "\nnumber of relevance vectors in our learned_function is " << learned_function.function.basis_vectors.size() << endl; - // now lets try this decision_function on some samples we haven't seen before + // Now let's try this decision_function on some samples we haven't seen before sample_type sample; sample(0) = 3.123; @@ -209,7 +209,7 @@ int main() serialize(learned_pfunct,fout); fout.close(); - // now lets open that file back up and load the function object it contains + // Now let's open that file back up and load the function object it contains. ifstream fin("saved_function.dat",ios::binary); deserialize(learned_pfunct, fin); diff --git a/examples/rvm_regression_ex.cpp b/examples/rvm_regression_ex.cpp index 0ff6e85f1..c2604b583 100644 --- a/examples/rvm_regression_ex.cpp +++ b/examples/rvm_regression_ex.cpp @@ -95,7 +95,7 @@ int main() serialize(test,fout); fout.close(); - // now lets open that file back up and load the function object it contains + // Now let's open that file back up and load the function object it contains. ifstream fin("saved_function.dat",ios::binary); deserialize(test, fin); diff --git a/examples/sequence_segmenter_ex.cpp b/examples/sequence_segmenter_ex.cpp index 813b6f5de..ba0897d48 100644 --- a/examples/sequence_segmenter_ex.cpp +++ b/examples/sequence_segmenter_ex.cpp @@ -192,7 +192,7 @@ int main() sequence_segmenter segmenter = trainer.train(samples, segments); - // Lets print out all the segments our segmenter detects. + // Let's print out all the segments our segmenter detects. for (unsigned long i = 0; i < samples.size(); ++i) { // get all the detected segments in samples[i] @@ -205,7 +205,7 @@ int main() } - // Now lets test it on a new sentence and see what it detects. + // Now let's test it on a new sentence and see what it detects. std::vector sentence(split("There once was a man from Nantucket whose name rhymed with Bob Bucket")); std::vector > seg = segmenter(sentence); for (unsigned long j = 0; j < seg.size(); ++j) diff --git a/examples/svm_ex.cpp b/examples/svm_ex.cpp index 64a78c522..7d598536f 100644 --- a/examples/svm_ex.cpp +++ b/examples/svm_ex.cpp @@ -47,7 +47,7 @@ int main() std::vector samples; std::vector labels; - // Now lets put some data into our samples and labels objects. We do this by looping + // Now let's put some data into our samples and labels objects. We do this by looping // over a bunch of points and labeling them according to their distance from the // origin. for (int r = -20; r <= 20; ++r) @@ -149,7 +149,7 @@ int main() cout << "\nnumber of support vectors in our learned_function is " << learned_function.function.basis_vectors.size() << endl; - // now lets try this decision_function on some samples we haven't seen before + // Now let's try this decision_function on some samples we haven't seen before. sample_type sample; sample(0) = 3.123; @@ -214,7 +214,7 @@ int main() serialize(learned_pfunct,fout); fout.close(); - // now lets open that file back up and load the function object it contains + // Now let's open that file back up and load the function object it contains. ifstream fin("saved_function.dat",ios::binary); deserialize(learned_pfunct, fin); @@ -242,7 +242,7 @@ int main() cout << "\ncross validation accuracy with only 10 support vectors: " << cross_validate_trainer(reduced2(trainer,10), samples, labels, 3); - // Lets print out the original cross validation score too for comparison. + // Let's print out the original cross validation score too for comparison. cout << "cross validation accuracy with all the original support vectors: " << cross_validate_trainer(trainer, samples, labels, 3); diff --git a/examples/svm_pegasos_ex.cpp b/examples/svm_pegasos_ex.cpp index bb91d7792..e69b485fc 100644 --- a/examples/svm_pegasos_ex.cpp +++ b/examples/svm_pegasos_ex.cpp @@ -67,7 +67,7 @@ int main() center = 20, 20; - // Now lets go into a loop and randomly generate 1000 samples. + // Now let's go into a loop and randomly generate 1000 samples. srand(time(0)); for (int i = 0; i < 10000; ++i) { @@ -96,7 +96,7 @@ int main() } } - // Now we have trained our SVM. Lets see how well it did. + // Now we have trained our SVM. Let's see how well it did. // Each of these statements prints out the output of the SVM given a particular sample. // The SVM outputs a number > 0 if a sample is predicted to be in the +1 class and < 0 // if a sample is predicted to be in the -1 class. @@ -123,7 +123,7 @@ int main() // function. To support this the dlib library provides functions for converting an online // training object like svm_pegasos into a batch training object. - // First lets clear out anything in the trainer object. + // First let's clear out anything in the trainer object. trainer.clear(); // Now to begin with, you might want to compute the cross validation score of a trainer object diff --git a/examples/svm_rank_ex.cpp b/examples/svm_rank_ex.cpp index 6b39f5905..e39b90a1b 100644 --- a/examples/svm_rank_ex.cpp +++ b/examples/svm_rank_ex.cpp @@ -38,7 +38,7 @@ int main() typedef matrix sample_type; - // Now lets make some testing data. To make it really simple, lets + // Now let's make some testing data. To make it really simple, let's // suppose that vectors with positive values in the first dimension // should rank higher than other vectors. So what we do is make // examples of relevant (i.e. high ranking) and non-relevant (i.e. low diff --git a/examples/svm_sparse_ex.cpp b/examples/svm_sparse_ex.cpp index 784fe5406..5d68e4a2c 100644 --- a/examples/svm_sparse_ex.cpp +++ b/examples/svm_sparse_ex.cpp @@ -45,7 +45,7 @@ int main() // description of what this parameter does. trainer.set_lambda(0.00001); - // Lets also use the svm trainer specially optimized for the linear_kernel and + // Let's also use the svm trainer specially optimized for the linear_kernel and // sparse_linear_kernel. svm_c_linear_trainer linear_trainer; // This trainer solves the "C" formulation of the SVM. See the documentation for @@ -59,7 +59,7 @@ int main() sample_type sample; - // Now lets go into a loop and randomly generate 10000 samples. + // Now let's go into a loop and randomly generate 10000 samples. srand(time(0)); double label = +1; for (int i = 0; i < 10000; ++i) @@ -87,11 +87,11 @@ int main() labels.push_back(label); } - // In addition to the rule we learned with the pegasos trainer lets also use our linear_trainer - // to learn a decision rule. + // In addition to the rule we learned with the pegasos trainer, let's also use our + // linear_trainer to learn a decision rule. decision_function df = linear_trainer.train(samples, labels); - // Now we have trained our SVMs. Lets test them out a bit. + // Now we have trained our SVMs. Let's test them out a bit. // Each of these statements prints the output of the SVMs given a particular sample. // Each SVM outputs a number > 0 if a sample is predicted to be in the +1 class and < 0 // if a sample is predicted to be in the -1 class. diff --git a/examples/svm_struct_ex.cpp b/examples/svm_struct_ex.cpp index 7d0fffe36..f79ae4d14 100644 --- a/examples/svm_struct_ex.cpp +++ b/examples/svm_struct_ex.cpp @@ -245,7 +245,7 @@ public: // are the four virtual functions defined below. - // So lets make an empty 9-dimensional PSI vector + // So let's make an empty 9-dimensional PSI vector feature_vector_type psi(get_num_dimensions()); psi = 0; // zero initialize it diff --git a/examples/train_object_detector.cpp b/examples/train_object_detector.cpp index 4f983c896..170684701 100644 --- a/examples/train_object_detector.cpp +++ b/examples/train_object_detector.cpp @@ -23,7 +23,7 @@ cmake --build . --config Release Note that you may need to install CMake (www.cmake.org) for this to work. - Next, lets assume you have a folder of images called /tmp/images. These images + Next, let's assume you have a folder of images called /tmp/images. These images should contain examples of the objects you want to learn to detect. You will use the imglab tool to label these objects. Do this by typing the following ./imglab -c mydataset.xml /tmp/images diff --git a/examples/using_custom_kernels_ex.cpp b/examples/using_custom_kernels_ex.cpp index 3a5c98876..f0cac6905 100644 --- a/examples/using_custom_kernels_ex.cpp +++ b/examples/using_custom_kernels_ex.cpp @@ -139,7 +139,7 @@ int main() typedef ukf_kernel kernel_type; - // Now lets generate some training data + // Now let's generate some training data std::vector samples; std::vector labels; for (double r = -20; r <= 20; r += 0.9) @@ -177,7 +177,7 @@ int main() trainer.use_classification_loss_for_loo_cv(); - // Finally, lets test how good our new kernel is by doing some leave-one-out cross-validation. + // Finally, let's test how good our new kernel is by doing some leave-one-out cross-validation. cout << "\ndoing leave-one-out cross-validation" << endl; for (double sigma = 0.01; sigma <= 100; sigma *= 3) {