mirror of https://github.com/davisking/dlib.git
Fixing grammar in comments.
This commit is contained in:
parent
f9d3da11d0
commit
114f677d74
|
@ -161,7 +161,7 @@ int main()
|
|||
|
||||
|
||||
|
||||
// We have now finished setting up our bayesian network. So lets compute some
|
||||
// We have now finished setting up our bayesian network. So let's compute some
|
||||
// probability values. The first thing we will do is compute the prior probability
|
||||
// of each node in the network. To do this we will use the join tree algorithm which
|
||||
// is an algorithm for performing exact inference in a bayesian network.
|
||||
|
@ -198,7 +198,7 @@ int main()
|
|||
cout << "\n\n\n";
|
||||
|
||||
|
||||
// Now to make things more interesting lets say that we have discovered that the C
|
||||
// Now to make things more interesting let's say that we have discovered that the C
|
||||
// node really has a value of 1. That is to say, we now have evidence that
|
||||
// C is 1. We can represent this in the network using the following two function
|
||||
// calls.
|
||||
|
|
|
@ -44,7 +44,7 @@ int main(int argc, char** argv)
|
|||
|
||||
cout << "Number of nodes in the network: " << bn.number_of_nodes() << endl;
|
||||
|
||||
// Lets compute some probability values using the loaded network using the join tree (aka. Junction
|
||||
// Let's compute some probability values using the loaded network using the join tree (aka. Junction
|
||||
// Tree) algorithm.
|
||||
|
||||
// First we need to create an undirected graph which contains set objects at each node and
|
||||
|
|
|
@ -413,7 +413,7 @@ initialize_node_cpt_if_necessary (
|
|||
{
|
||||
node_type& node = graph_drawer.graph_node(index);
|
||||
|
||||
// if the cpt for this node isn't properly filled out then lets clear it out
|
||||
// if the cpt for this node isn't properly filled out then let's clear it out
|
||||
// and populate it with some reasonable default values
|
||||
if (node_cpt_filled_out(graph_drawer.graph(), index) == false)
|
||||
{
|
||||
|
|
|
@ -103,7 +103,7 @@ void run_example_1(
|
|||
|
||||
|
||||
|
||||
// Now lets put some things into the out pipe
|
||||
// Now let's put some things into the out pipe
|
||||
int value = 1;
|
||||
out.enqueue(value);
|
||||
|
||||
|
@ -308,7 +308,7 @@ void run_example_4(
|
|||
bridge_status bs;
|
||||
|
||||
// Once a connection is established it will generate a status message from each bridge.
|
||||
// Lets get those and print them.
|
||||
// Let's get those and print them.
|
||||
b1_status.dequeue(bs);
|
||||
cout << "bridge 1 status: is_connected: " << boolalpha << bs.is_connected << endl;
|
||||
cout << "bridge 1 status: foreign_ip: " << bs.foreign_ip << endl;
|
||||
|
|
|
@ -75,7 +75,7 @@ int main()
|
|||
// Use our recursive function to print everything in the config file.
|
||||
print_config_reader_contents(cr);
|
||||
|
||||
// Now lets access some of the fields of the config file directly. You
|
||||
// Now let's access some of the fields of the config file directly. You
|
||||
// use [] for accessing key values and .block() for accessing sub-blocks.
|
||||
|
||||
// Print out the string value assigned to key1 in the config file
|
||||
|
|
|
@ -174,7 +174,7 @@ int main()
|
|||
trainer.set_trainer(rbf_trainer, "upper_left", "lower_right");
|
||||
|
||||
|
||||
// Now lets do 5-fold cross-validation using the one_vs_one_trainer we just setup.
|
||||
// Now let's do 5-fold cross-validation using the one_vs_one_trainer we just setup.
|
||||
// As an aside, always shuffle the order of the samples before doing cross validation.
|
||||
// For a discussion of why this is a good idea see the svm_ex.cpp example.
|
||||
randomize_samples(samples, labels);
|
||||
|
@ -201,7 +201,7 @@ int main()
|
|||
*/
|
||||
|
||||
|
||||
// Finally, lets save our multiclass decision rule to disk. Remember that we have
|
||||
// Finally, let's save our multiclass decision rule to disk. Remember that we have
|
||||
// to specify the types of binary decision function used inside the one_vs_one_decision_function.
|
||||
one_vs_one_decision_function<ovo_trainer,
|
||||
custom_decision_function, // This is the output of the simple_custom_trainer
|
||||
|
|
|
@ -76,7 +76,7 @@ using namespace dlib;
|
|||
|
||||
// ----------------------------------------------------------------------------------------
|
||||
|
||||
// First lets make a typedef for the kind of samples we will be using.
|
||||
// First let's make a typedef for the kind of samples we will be using.
|
||||
typedef matrix<double, 0, 1> sample_type;
|
||||
|
||||
// We will be using the radial_basis_kernel in this example program.
|
||||
|
@ -213,7 +213,7 @@ void test_empirical_kernel_map (
|
|||
|
||||
|
||||
|
||||
// Now lets do something more interesting. The following loop finds the centroids
|
||||
// Now let's do something more interesting. The following loop finds the centroids
|
||||
// of the two classes of data.
|
||||
sample_type class1_center;
|
||||
sample_type class2_center;
|
||||
|
@ -254,7 +254,7 @@ void test_empirical_kernel_map (
|
|||
|
||||
// Next, note that classifying a point based on its distance between two other
|
||||
// points is the same thing as using the plane that lies between those two points
|
||||
// as a decision boundary. So lets compute that decision plane and use it to classify
|
||||
// as a decision boundary. So let's compute that decision plane and use it to classify
|
||||
// all the points.
|
||||
|
||||
sample_type plane_normal_vector = class1_center - class2_center;
|
||||
|
@ -291,7 +291,7 @@ void test_empirical_kernel_map (
|
|||
{
|
||||
double side = dec_funct(samples[i]);
|
||||
|
||||
// And lets just check that the dec_funct really does compute the same thing as the previous equation.
|
||||
// And let's just check that the dec_funct really does compute the same thing as the previous equation.
|
||||
double side_alternate_equation = dot(plane_normal_vector, projected_samples[i]) - bias;
|
||||
if (abs(side-side_alternate_equation) > 1e-14)
|
||||
cout << "dec_funct error: " << abs(side-side_alternate_equation) << endl;
|
||||
|
|
|
@ -55,7 +55,7 @@ int main(int argc, char** argv)
|
|||
|
||||
cout << "hog image has " << hog.nr() << " rows and " << hog.nc() << " columns." << endl;
|
||||
|
||||
// Lets see what the image and FHOG features look like.
|
||||
// Let's see what the image and FHOG features look like.
|
||||
image_window win(img);
|
||||
image_window winhog(draw_fhog(hog));
|
||||
|
||||
|
|
|
@ -161,7 +161,7 @@ int main(int argc, char** argv)
|
|||
// a face.
|
||||
image_window hogwin(draw_fhog(detector), "Learned fHOG detector");
|
||||
|
||||
// Now for the really fun part. Lets display the testing images on the screen and
|
||||
// Now for the really fun part. Let's display the testing images on the screen and
|
||||
// show the output of the face detector overlaid on each image. You will see that
|
||||
// it finds all the faces without false alarming on any non-faces.
|
||||
image_window win;
|
||||
|
@ -191,7 +191,7 @@ int main(int argc, char** argv)
|
|||
|
||||
|
||||
|
||||
// Now lets talk about some optional features of this training tool as well as some
|
||||
// Now let's talk about some optional features of this training tool as well as some
|
||||
// important points you should understand.
|
||||
//
|
||||
// The first thing that should be pointed out is that, since this is a sliding
|
||||
|
|
|
@ -194,13 +194,13 @@ int main()
|
|||
// indicate that all nodes were correctly classified.
|
||||
cout << "3-fold cross-validation: " << cross_validate_graph_labeling_trainer(trainer, samples, labels, 3) << endl;
|
||||
|
||||
// Since the trainer is working well. Lets have it make a graph_labeler
|
||||
// Since the trainer is working well. Let's have it make a graph_labeler
|
||||
// based on the training data.
|
||||
graph_labeler<vector_type> labeler = trainer.train(samples, labels);
|
||||
|
||||
|
||||
/*
|
||||
Lets try the graph_labeler on a new test graph. In particular, lets
|
||||
Let's try the graph_labeler on a new test graph. In particular, let's
|
||||
use one with 5 nodes as shown below:
|
||||
|
||||
(0 F)-----(1 T)
|
||||
|
|
|
@ -114,7 +114,7 @@ public:
|
|||
b.set_pos(10,60);
|
||||
b.set_name("button");
|
||||
|
||||
// lets put the label 5 pixels below the button
|
||||
// let's put the label 5 pixels below the button
|
||||
c.set_pos(b.left(),b.bottom()+5);
|
||||
|
||||
|
||||
|
@ -137,7 +137,7 @@ public:
|
|||
// functions or lambda functions.
|
||||
|
||||
|
||||
// Lets also make a simple menu bar.
|
||||
// Let's also make a simple menu bar.
|
||||
// First we say how many menus we want in our menu bar. In this example we only want 1.
|
||||
mbar.set_number_of_menus(1);
|
||||
// Now we set the name of our menu. The 'M' means that the M in Menu will be underlined
|
||||
|
@ -147,12 +147,12 @@ public:
|
|||
// Now we add some items to the menu. Note that items in a menu are listed in the
|
||||
// order in which they were added.
|
||||
|
||||
// First lets make a menu item that does the same thing as our button does when it is clicked.
|
||||
// First let's make a menu item that does the same thing as our button does when it is clicked.
|
||||
// Again, the 'C' means the C in Click is underlined in the menu.
|
||||
mbar.menu(0).add_menu_item(menu_item_text("Click Button!",*this,&win::on_button_clicked,'C'));
|
||||
// lets add a separator (i.e. a horizontal separating line) to the menu
|
||||
// let's add a separator (i.e. a horizontal separating line) to the menu
|
||||
mbar.menu(0).add_menu_item(menu_item_separator());
|
||||
// Now lets make a menu item that calls show_about when the user selects it.
|
||||
// Now let's make a menu item that calls show_about when the user selects it.
|
||||
mbar.menu(0).add_menu_item(menu_item_text("About",*this,&win::show_about,'A'));
|
||||
|
||||
|
||||
|
|
|
@ -46,7 +46,7 @@ int main(int argc, char** argv)
|
|||
load_image(img, argv[1]);
|
||||
|
||||
|
||||
// Now lets use some image functions. First lets blur the image a little.
|
||||
// Now let's use some image functions. First let's blur the image a little.
|
||||
array2d<unsigned char> blurred_img;
|
||||
gaussian_blur(img, blurred_img);
|
||||
|
||||
|
@ -58,7 +58,7 @@ int main(int argc, char** argv)
|
|||
// now we do the non-maximum edge suppression step so that our edges are nice and thin
|
||||
suppress_non_maximum_edges(horz_gradient, vert_gradient, edge_image);
|
||||
|
||||
// Now we would like to see what our images look like. So lets use a
|
||||
// Now we would like to see what our images look like. So let's use a
|
||||
// window to display them on the screen. (Note that you can zoom into
|
||||
// the window by holding CTRL and scrolling the mouse wheel)
|
||||
image_window my_window(edge_image, "Normal Edge Image");
|
||||
|
|
|
@ -28,7 +28,7 @@ int main()
|
|||
iosockstream stream("www.google.com:80");
|
||||
|
||||
// At this point, we can use stream the same way we would use any other
|
||||
// C++ iostream object. So to test it out, lets make a HTTP GET request
|
||||
// C++ iostream object. So to test it out, let's make a HTTP GET request
|
||||
// for the main Google page.
|
||||
stream << "GET / HTTP/1.0\r\n\r\n";
|
||||
|
||||
|
|
|
@ -66,7 +66,7 @@ int main()
|
|||
|
||||
running_stats<double> rs;
|
||||
|
||||
// Now lets output the distance from the centroid to some points that are from the sinc function.
|
||||
// Now let's output the distance from the centroid to some points that are from the sinc function.
|
||||
// These numbers should all be similar. We will also calculate the statistics of these numbers
|
||||
// by accumulating them into the running_stats object called rs. This will let us easily
|
||||
// find the mean and standard deviation of the distances for use below.
|
||||
|
@ -80,7 +80,7 @@ int main()
|
|||
m(0) = -0.5; m(1) = sinc(m(0)); cout << " " << test(m) << endl; rs.add(test(m));
|
||||
|
||||
cout << endl;
|
||||
// Lets output the distance from the centroid to some points that are NOT from the sinc function.
|
||||
// Let's output the distance from the centroid to some points that are NOT from the sinc function.
|
||||
// These numbers should all be significantly bigger than previous set of numbers. We will also
|
||||
// use the rs.scale() function to find out how many standard deviations they are away from the
|
||||
// mean of the test points from the sinc function. So in this case our criterion for "significantly bigger"
|
||||
|
|
|
@ -82,7 +82,7 @@ int main()
|
|||
serialize(test,fout);
|
||||
fout.close();
|
||||
|
||||
// now lets open that file back up and load the krls object it contains
|
||||
// Now let's open that file back up and load the krls object it contains.
|
||||
ifstream fin("saved_krls_object.dat",ios::binary);
|
||||
deserialize(test, fin);
|
||||
|
||||
|
|
|
@ -63,7 +63,7 @@ int main()
|
|||
|
||||
dlib::rand rnd;
|
||||
|
||||
// Now lets loop over a big range of values from the sinc() function. Each time
|
||||
// Now let's loop over a big range of values from the sinc() function. Each time
|
||||
// adding some random noise to the data we send to the krls object for training.
|
||||
sample_type m;
|
||||
double mse_noise = 0;
|
||||
|
|
|
@ -43,7 +43,7 @@ int main()
|
|||
std::vector<sample_type> samples;
|
||||
std::vector<double> labels;
|
||||
|
||||
// Now lets put some data into our samples and labels objects. We do this
|
||||
// Now let's put some data into our samples and labels objects. We do this
|
||||
// by looping over a bunch of points and labeling them according to their
|
||||
// distance from the origin.
|
||||
for (double r = -20; r <= 20; r += 0.4)
|
||||
|
@ -129,7 +129,7 @@ int main()
|
|||
cout << "\nnumber of basis vectors in our learned_function is "
|
||||
<< learned_function.function.basis_vectors.size() << endl;
|
||||
|
||||
// Now lets try this decision_function on some samples we haven't seen before.
|
||||
// Now let's try this decision_function on some samples we haven't seen before.
|
||||
// The decision function will return values >= 0 for samples it predicts
|
||||
// are in the +1 class and numbers < 0 for samples it predicts to be in the -1 class.
|
||||
sample_type sample;
|
||||
|
@ -200,7 +200,7 @@ int main()
|
|||
serialize(learned_pfunct,fout);
|
||||
fout.close();
|
||||
|
||||
// now lets open that file back up and load the function object it contains
|
||||
// Now let's open that file back up and load the function object it contains.
|
||||
ifstream fin("saved_function.dat",ios::binary);
|
||||
deserialize(learned_pfunct, fin);
|
||||
|
||||
|
|
|
@ -98,7 +98,7 @@ int main()
|
|||
serialize(test,fout);
|
||||
fout.close();
|
||||
|
||||
// now lets open that file back up and load the function object it contains
|
||||
// Now let's open that file back up and load the function object it contains.
|
||||
ifstream fin("saved_function.dat",ios::binary);
|
||||
deserialize(test, fin);
|
||||
|
||||
|
|
|
@ -95,7 +95,7 @@ int main()
|
|||
cout << "params: " << trans(params) << endl;
|
||||
|
||||
|
||||
// Now lets generate a bunch of input/output pairs according to our model.
|
||||
// Now let's generate a bunch of input/output pairs according to our model.
|
||||
std::vector<std::pair<input_vector, double> > data_samples;
|
||||
input_vector input;
|
||||
for (int i = 0; i < 1000; ++i)
|
||||
|
@ -107,7 +107,7 @@ int main()
|
|||
data_samples.push_back(make_pair(input, output));
|
||||
}
|
||||
|
||||
// Before we do anything, lets make sure that our derivative function defined above matches
|
||||
// Before we do anything, let's make sure that our derivative function defined above matches
|
||||
// the approximate derivative computed using central differences (via derivative()).
|
||||
// If this value is big then it means we probably typed the derivative function incorrectly.
|
||||
cout << "derivative error: " << length(residual_derivative(data_samples[0], params) -
|
||||
|
@ -117,7 +117,7 @@ int main()
|
|||
|
||||
|
||||
|
||||
// Now lets use the solve_least_squares_lm() routine to figure out what the
|
||||
// Now let's use the solve_least_squares_lm() routine to figure out what the
|
||||
// parameters are based on just the data_samples.
|
||||
parameter_vector x;
|
||||
x = 1;
|
||||
|
|
|
@ -98,7 +98,7 @@ using namespace dlib;
|
|||
|
||||
// ----------------------------------------------------------------------------------------
|
||||
|
||||
// First lets make a typedef for the kind of samples we will be using.
|
||||
// First let's make a typedef for the kind of samples we will be using.
|
||||
typedef matrix<double, 0, 1> sample_type;
|
||||
|
||||
// We will be using the radial_basis_kernel in this example program.
|
||||
|
|
|
@ -16,7 +16,7 @@ using namespace std;
|
|||
|
||||
int main()
|
||||
{
|
||||
// Lets begin this example by using the library to solve a simple
|
||||
// Let's begin this example by using the library to solve a simple
|
||||
// linear system.
|
||||
//
|
||||
// We will find the value of x such that y = M*x where
|
||||
|
@ -32,7 +32,7 @@ int main()
|
|||
// 5.9 0.05 1
|
||||
|
||||
|
||||
// First lets declare these 3 matrices.
|
||||
// First let's declare these 3 matrices.
|
||||
// This declares a matrix that contains doubles and has 3 rows and 1 column.
|
||||
// Moreover, it's size is a compile time constant since we put it inside the <>.
|
||||
matrix<double,3,1> y;
|
||||
|
|
|
@ -354,7 +354,7 @@ void custom_matrix_expressions_example(
|
|||
|
||||
cout << x << endl;
|
||||
|
||||
// Finally, lets use the matrix expressions we defined above.
|
||||
// Finally, let's use the matrix expressions we defined above.
|
||||
|
||||
// prints the transpose of x
|
||||
cout << example_trans(x) << endl;
|
||||
|
@ -382,7 +382,7 @@ void custom_matrix_expressions_example(
|
|||
vect.push_back(3);
|
||||
vect.push_back(5);
|
||||
|
||||
// Now lets treat our std::vector like a matrix and print some things.
|
||||
// Now let's treat our std::vector like a matrix and print some things.
|
||||
cout << example_vector_to_matrix(vect) << endl;
|
||||
cout << add_scalar(example_vector_to_matrix(vect), 10) << endl;
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@ int main()
|
|||
// their default values.
|
||||
mlp::kernel_1a_c net(2,5);
|
||||
|
||||
// Now lets put some data into our sample and train on it. We do this
|
||||
// Now let's put some data into our sample and train on it. We do this
|
||||
// by looping over 41*41 points and labeling them according to their
|
||||
// distance from the origin.
|
||||
for (int i = 0; i < 1000; ++i)
|
||||
|
@ -65,7 +65,7 @@ int main()
|
|||
}
|
||||
}
|
||||
|
||||
// Now we have trained our mlp. Lets see how well it did.
|
||||
// Now we have trained our mlp. Let's see how well it did.
|
||||
// Note that if you run this program multiple times you will get different results. This
|
||||
// is because the mlp network is randomly initialized.
|
||||
|
||||
|
|
|
@ -101,7 +101,7 @@ int main()
|
|||
std::vector<sample_type> samples;
|
||||
std::vector<double> labels;
|
||||
|
||||
// Now lets put some data into our samples and labels objects. We do this
|
||||
// Now let's put some data into our samples and labels objects. We do this
|
||||
// by looping over a bunch of points and labeling them according to their
|
||||
// distance from the origin.
|
||||
for (double r = -20; r <= 20; r += 0.8)
|
||||
|
|
|
@ -92,7 +92,7 @@ int main()
|
|||
// still be solved with the rbf_trainer.
|
||||
trainer.set_trainer(poly_trainer, 1, 2);
|
||||
|
||||
// Now lets do 5-fold cross-validation using the one_vs_one_trainer we just setup.
|
||||
// Now let's do 5-fold cross-validation using the one_vs_one_trainer we just setup.
|
||||
// As an aside, always shuffle the order of the samples before doing cross validation.
|
||||
// For a discussion of why this is a good idea see the svm_ex.cpp example.
|
||||
randomize_samples(samples, labels);
|
||||
|
|
|
@ -203,7 +203,7 @@ int main()
|
|||
|
||||
typedef scan_image_pyramid<pyramid_down<5>, very_simple_feature_extractor> image_scanner_type;
|
||||
image_scanner_type scanner;
|
||||
// Instead of using setup_grid_detection_templates() like in object_detector_ex.cpp, lets manually
|
||||
// Instead of using setup_grid_detection_templates() like in object_detector_ex.cpp, let's manually
|
||||
// setup the sliding window box. We use a window with the same shape as the white boxes we
|
||||
// are trying to detect.
|
||||
const rectangle object_box = compute_box_dimensions(1, // width/height ratio
|
||||
|
@ -272,7 +272,7 @@ int main()
|
|||
*/
|
||||
|
||||
|
||||
// Lets display the output of the detector along with our training images.
|
||||
// Let's display the output of the detector along with our training images.
|
||||
image_window win;
|
||||
for (unsigned long i = 0; i < images.size(); ++i)
|
||||
{
|
||||
|
|
|
@ -226,7 +226,7 @@ int main()
|
|||
|
||||
|
||||
|
||||
// Lets display the output of the detector along with our training images.
|
||||
// Let's display the output of the detector along with our training images.
|
||||
image_window win;
|
||||
for (unsigned long i = 0; i < images.size(); ++i)
|
||||
{
|
||||
|
|
|
@ -66,7 +66,7 @@ int main()
|
|||
// anomalous (i.e. not on the sinc() curve in our case).
|
||||
decision_function<kernel_type> df = trainer.train(samples);
|
||||
|
||||
// So for example, lets look at the output from some points on the sinc() curve.
|
||||
// So for example, let's look at the output from some points on the sinc() curve.
|
||||
cout << "Points that are on the sinc function:\n";
|
||||
m(0) = -1.5; m(1) = sinc(m(0)); cout << " " << df(m) << endl;
|
||||
m(0) = -1.5; m(1) = sinc(m(0)); cout << " " << df(m) << endl;
|
||||
|
|
|
@ -201,7 +201,7 @@ int main()
|
|||
cout << "rosen solution:\n" << starting_point << endl;
|
||||
|
||||
|
||||
// Now lets try doing it again with a different starting point and the version
|
||||
// Now let's try doing it again with a different starting point and the version
|
||||
// of find_min() that doesn't require you to supply a derivative function.
|
||||
// This version will compute a numerical approximation of the derivative since
|
||||
// we didn't supply one to it.
|
||||
|
@ -285,7 +285,7 @@ int main()
|
|||
|
||||
|
||||
|
||||
// Now lets look at using the test_function object with the optimization
|
||||
// Now let's look at using the test_function object with the optimization
|
||||
// functions.
|
||||
cout << "\nFind the minimum of the test_function" << endl;
|
||||
|
||||
|
@ -306,7 +306,7 @@ int main()
|
|||
// At this point the correct value of (3,5,1,7) should be found and stored in starting_point
|
||||
cout << "test_function solution:\n" << starting_point << endl;
|
||||
|
||||
// Now lets try it again with the conjugate gradient algorithm.
|
||||
// Now let's try it again with the conjugate gradient algorithm.
|
||||
starting_point = -4,5,99,3;
|
||||
find_min_using_approximate_derivatives(cg_search_strategy(),
|
||||
objective_delta_stop_strategy(1e-7),
|
||||
|
@ -315,7 +315,7 @@ int main()
|
|||
|
||||
|
||||
|
||||
// Finally, lets try the BOBYQA algorithm. This is a technique specially
|
||||
// Finally, let's try the BOBYQA algorithm. This is a technique specially
|
||||
// designed to minimize a function in the absence of derivative information.
|
||||
// Generally speaking, it is the method of choice if derivatives are not available.
|
||||
starting_point = -4,5,99,3;
|
||||
|
|
|
@ -296,8 +296,8 @@ int main()
|
|||
|
||||
|
||||
|
||||
// Now lets test out the Shor 9 bit encoding
|
||||
cout << "\n\n\n\nNow lets try playing around with Shor's 9bit error correcting code" << endl;
|
||||
// Now let's test out the Shor 9 bit encoding
|
||||
cout << "\n\n\n\nNow let's try playing around with Shor's 9bit error correcting code" << endl;
|
||||
|
||||
// Reset the quantum register to contain a single bit
|
||||
reg.set_num_bits(1);
|
||||
|
|
|
@ -36,7 +36,7 @@ int main()
|
|||
|
||||
|
||||
|
||||
// Now lets make some vector objects that can hold our samples
|
||||
// Now let's make some vector objects that can hold our samples
|
||||
std::vector<sample_type> samples;
|
||||
std::vector<double> labels;
|
||||
|
||||
|
|
|
@ -47,7 +47,7 @@ int main()
|
|||
std::vector<sample_type> samples;
|
||||
std::vector<double> labels;
|
||||
|
||||
// Now lets put some data into our samples and labels objects. We do this
|
||||
// Now let's put some data into our samples and labels objects. We do this
|
||||
// by looping over a bunch of points and labeling them according to their
|
||||
// distance from the origin.
|
||||
for (int r = -20; r <= 20; ++r)
|
||||
|
@ -141,11 +141,11 @@ int main()
|
|||
learned_function.normalizer = normalizer; // save normalization information
|
||||
learned_function.function = trainer.train(samples, labels); // perform the actual RVM training and save the results
|
||||
|
||||
// print out the number of relevance vectors in the resulting decision function
|
||||
// Print out the number of relevance vectors in the resulting decision function.
|
||||
cout << "\nnumber of relevance vectors in our learned_function is "
|
||||
<< learned_function.function.basis_vectors.size() << endl;
|
||||
|
||||
// now lets try this decision_function on some samples we haven't seen before
|
||||
// Now let's try this decision_function on some samples we haven't seen before
|
||||
sample_type sample;
|
||||
|
||||
sample(0) = 3.123;
|
||||
|
@ -209,7 +209,7 @@ int main()
|
|||
serialize(learned_pfunct,fout);
|
||||
fout.close();
|
||||
|
||||
// now lets open that file back up and load the function object it contains
|
||||
// Now let's open that file back up and load the function object it contains.
|
||||
ifstream fin("saved_function.dat",ios::binary);
|
||||
deserialize(learned_pfunct, fin);
|
||||
|
||||
|
|
|
@ -95,7 +95,7 @@ int main()
|
|||
serialize(test,fout);
|
||||
fout.close();
|
||||
|
||||
// now lets open that file back up and load the function object it contains
|
||||
// Now let's open that file back up and load the function object it contains.
|
||||
ifstream fin("saved_function.dat",ios::binary);
|
||||
deserialize(test, fin);
|
||||
|
||||
|
|
|
@ -192,7 +192,7 @@ int main()
|
|||
sequence_segmenter<feature_extractor> segmenter = trainer.train(samples, segments);
|
||||
|
||||
|
||||
// Lets print out all the segments our segmenter detects.
|
||||
// Let's print out all the segments our segmenter detects.
|
||||
for (unsigned long i = 0; i < samples.size(); ++i)
|
||||
{
|
||||
// get all the detected segments in samples[i]
|
||||
|
@ -205,7 +205,7 @@ int main()
|
|||
}
|
||||
|
||||
|
||||
// Now lets test it on a new sentence and see what it detects.
|
||||
// Now let's test it on a new sentence and see what it detects.
|
||||
std::vector<std::string> sentence(split("There once was a man from Nantucket whose name rhymed with Bob Bucket"));
|
||||
std::vector<std::pair<unsigned long,unsigned long> > seg = segmenter(sentence);
|
||||
for (unsigned long j = 0; j < seg.size(); ++j)
|
||||
|
|
|
@ -47,7 +47,7 @@ int main()
|
|||
std::vector<sample_type> samples;
|
||||
std::vector<double> labels;
|
||||
|
||||
// Now lets put some data into our samples and labels objects. We do this by looping
|
||||
// Now let's put some data into our samples and labels objects. We do this by looping
|
||||
// over a bunch of points and labeling them according to their distance from the
|
||||
// origin.
|
||||
for (int r = -20; r <= 20; ++r)
|
||||
|
@ -149,7 +149,7 @@ int main()
|
|||
cout << "\nnumber of support vectors in our learned_function is "
|
||||
<< learned_function.function.basis_vectors.size() << endl;
|
||||
|
||||
// now lets try this decision_function on some samples we haven't seen before
|
||||
// Now let's try this decision_function on some samples we haven't seen before.
|
||||
sample_type sample;
|
||||
|
||||
sample(0) = 3.123;
|
||||
|
@ -214,7 +214,7 @@ int main()
|
|||
serialize(learned_pfunct,fout);
|
||||
fout.close();
|
||||
|
||||
// now lets open that file back up and load the function object it contains
|
||||
// Now let's open that file back up and load the function object it contains.
|
||||
ifstream fin("saved_function.dat",ios::binary);
|
||||
deserialize(learned_pfunct, fin);
|
||||
|
||||
|
@ -242,7 +242,7 @@ int main()
|
|||
cout << "\ncross validation accuracy with only 10 support vectors: "
|
||||
<< cross_validate_trainer(reduced2(trainer,10), samples, labels, 3);
|
||||
|
||||
// Lets print out the original cross validation score too for comparison.
|
||||
// Let's print out the original cross validation score too for comparison.
|
||||
cout << "cross validation accuracy with all the original support vectors: "
|
||||
<< cross_validate_trainer(trainer, samples, labels, 3);
|
||||
|
||||
|
|
|
@ -67,7 +67,7 @@ int main()
|
|||
|
||||
center = 20, 20;
|
||||
|
||||
// Now lets go into a loop and randomly generate 1000 samples.
|
||||
// Now let's go into a loop and randomly generate 1000 samples.
|
||||
srand(time(0));
|
||||
for (int i = 0; i < 10000; ++i)
|
||||
{
|
||||
|
@ -96,7 +96,7 @@ int main()
|
|||
}
|
||||
}
|
||||
|
||||
// Now we have trained our SVM. Lets see how well it did.
|
||||
// Now we have trained our SVM. Let's see how well it did.
|
||||
// Each of these statements prints out the output of the SVM given a particular sample.
|
||||
// The SVM outputs a number > 0 if a sample is predicted to be in the +1 class and < 0
|
||||
// if a sample is predicted to be in the -1 class.
|
||||
|
@ -123,7 +123,7 @@ int main()
|
|||
// function. To support this the dlib library provides functions for converting an online
|
||||
// training object like svm_pegasos into a batch training object.
|
||||
|
||||
// First lets clear out anything in the trainer object.
|
||||
// First let's clear out anything in the trainer object.
|
||||
trainer.clear();
|
||||
|
||||
// Now to begin with, you might want to compute the cross validation score of a trainer object
|
||||
|
|
|
@ -38,7 +38,7 @@ int main()
|
|||
typedef matrix<double,2,1> sample_type;
|
||||
|
||||
|
||||
// Now lets make some testing data. To make it really simple, lets
|
||||
// Now let's make some testing data. To make it really simple, let's
|
||||
// suppose that vectors with positive values in the first dimension
|
||||
// should rank higher than other vectors. So what we do is make
|
||||
// examples of relevant (i.e. high ranking) and non-relevant (i.e. low
|
||||
|
|
|
@ -45,7 +45,7 @@ int main()
|
|||
// description of what this parameter does.
|
||||
trainer.set_lambda(0.00001);
|
||||
|
||||
// Lets also use the svm trainer specially optimized for the linear_kernel and
|
||||
// Let's also use the svm trainer specially optimized for the linear_kernel and
|
||||
// sparse_linear_kernel.
|
||||
svm_c_linear_trainer<kernel_type> linear_trainer;
|
||||
// This trainer solves the "C" formulation of the SVM. See the documentation for
|
||||
|
@ -59,7 +59,7 @@ int main()
|
|||
sample_type sample;
|
||||
|
||||
|
||||
// Now lets go into a loop and randomly generate 10000 samples.
|
||||
// Now let's go into a loop and randomly generate 10000 samples.
|
||||
srand(time(0));
|
||||
double label = +1;
|
||||
for (int i = 0; i < 10000; ++i)
|
||||
|
@ -87,11 +87,11 @@ int main()
|
|||
labels.push_back(label);
|
||||
}
|
||||
|
||||
// In addition to the rule we learned with the pegasos trainer lets also use our linear_trainer
|
||||
// to learn a decision rule.
|
||||
// In addition to the rule we learned with the pegasos trainer, let's also use our
|
||||
// linear_trainer to learn a decision rule.
|
||||
decision_function<kernel_type> df = linear_trainer.train(samples, labels);
|
||||
|
||||
// Now we have trained our SVMs. Lets test them out a bit.
|
||||
// Now we have trained our SVMs. Let's test them out a bit.
|
||||
// Each of these statements prints the output of the SVMs given a particular sample.
|
||||
// Each SVM outputs a number > 0 if a sample is predicted to be in the +1 class and < 0
|
||||
// if a sample is predicted to be in the -1 class.
|
||||
|
|
|
@ -245,7 +245,7 @@ public:
|
|||
// are the four virtual functions defined below.
|
||||
|
||||
|
||||
// So lets make an empty 9-dimensional PSI vector
|
||||
// So let's make an empty 9-dimensional PSI vector
|
||||
feature_vector_type psi(get_num_dimensions());
|
||||
psi = 0; // zero initialize it
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
cmake --build . --config Release
|
||||
Note that you may need to install CMake (www.cmake.org) for this to work.
|
||||
|
||||
Next, lets assume you have a folder of images called /tmp/images. These images
|
||||
Next, let's assume you have a folder of images called /tmp/images. These images
|
||||
should contain examples of the objects you want to learn to detect. You will
|
||||
use the imglab tool to label these objects. Do this by typing the following
|
||||
./imglab -c mydataset.xml /tmp/images
|
||||
|
|
|
@ -139,7 +139,7 @@ int main()
|
|||
typedef ukf_kernel<sample_type> kernel_type;
|
||||
|
||||
|
||||
// Now lets generate some training data
|
||||
// Now let's generate some training data
|
||||
std::vector<sample_type> samples;
|
||||
std::vector<double> labels;
|
||||
for (double r = -20; r <= 20; r += 0.9)
|
||||
|
@ -177,7 +177,7 @@ int main()
|
|||
trainer.use_classification_loss_for_loo_cv();
|
||||
|
||||
|
||||
// Finally, lets test how good our new kernel is by doing some leave-one-out cross-validation.
|
||||
// Finally, let's test how good our new kernel is by doing some leave-one-out cross-validation.
|
||||
cout << "\ndoing leave-one-out cross-validation" << endl;
|
||||
for (double sigma = 0.01; sigma <= 100; sigma *= 3)
|
||||
{
|
||||
|
|
Loading…
Reference in New Issue