From b81236793074f1f54974da1bd28d955a09c156d8 Mon Sep 17 00:00:00 2001 From: Davis King Date: Sun, 29 Nov 2009 18:59:24 +0000 Subject: [PATCH] Updated the example programs so that there isn't this confusing use of the phase "support vectors" all over the place. Also fixed them to compile now that I renamed the support_vectors field in decision_function to basis_vectors. --HG-- extra : convert_revision : svn%3Afdd8eb12-d10e-0410-9acb-85c331704f74/trunk%403279 --- examples/kcentroid_ex.cpp | 4 ++-- examples/kkmeans_ex.cpp | 12 ++++++------ examples/rank_features_ex.cpp | 4 ++-- examples/rvm_ex.cpp | 12 ++++++------ examples/svm_ex.cpp | 4 ++-- 5 files changed, 18 insertions(+), 18 deletions(-) diff --git a/examples/kcentroid_ex.cpp b/examples/kcentroid_ex.cpp index 41b4e79a5..1756f7e29 100644 --- a/examples/kcentroid_ex.cpp +++ b/examples/kcentroid_ex.cpp @@ -48,9 +48,9 @@ int main() // you need to set. The first argument to the constructor is the kernel we wish to // use. The second is a parameter that determines the numerical accuracy with which // the object will perform the centroid estimation. Generally, smaller values - // give better results but cause the algorithm to attempt to use more support vectors + // give better results but cause the algorithm to attempt to use more dictionary vectors // (and thus run slower and use more memory). The third argument, however, is the - // maximum number of support vectors a kcentroid is allowed to use. So you can use + // maximum number of dictionary vectors a kcentroid is allowed to use. So you can use // it to control the runtime complexity. kcentroid test(kernel_type(0.1),0.01, 15); diff --git a/examples/kkmeans_ex.cpp b/examples/kkmeans_ex.cpp index 4029e34e4..78101ff24 100644 --- a/examples/kkmeans_ex.cpp +++ b/examples/kkmeans_ex.cpp @@ -46,9 +46,9 @@ int main() // you need to set. The first argument to the constructor is the kernel we wish to // use. The second is a parameter that determines the numerical accuracy with which // the object will perform part of the learning algorithm. Generally, smaller values - // give better results but cause the algorithm to attempt to use more support vectors + // give better results but cause the algorithm to attempt to use more dictionary vectors // (and thus run slower and use more memory). The third argument, however, is the - // maximum number of support vectors a kcentroid is allowed to use. So you can use + // maximum number of dictionary vectors a kcentroid is allowed to use. So you can use // it to control the runtime complexity. kcentroid kc(kernel_type(0.1),0.01, 8); @@ -133,13 +133,13 @@ int main() cout << test(samples[i+2*num]) << "\n"; } - // Now print out how many support vectors each center used. Note that + // Now print out how many dictionary vectors each center used. Note that // the maximum number of 8 was reached. If you went back to the kcentroid // constructor and changed the 8 to some bigger number you would see that these // numbers would go up. However, 8 is all we need to correctly cluster this dataset. - cout << "num sv for center 0: " << test.get_kcentroid(0).dictionary_size() << endl; - cout << "num sv for center 1: " << test.get_kcentroid(1).dictionary_size() << endl; - cout << "num sv for center 2: " << test.get_kcentroid(2).dictionary_size() << endl; + cout << "num dictionary vectors for center 0: " << test.get_kcentroid(0).dictionary_size() << endl; + cout << "num dictionary vectors for center 1: " << test.get_kcentroid(1).dictionary_size() << endl; + cout << "num dictionary vectors for center 2: " << test.get_kcentroid(2).dictionary_size() << endl; } diff --git a/examples/rank_features_ex.cpp b/examples/rank_features_ex.cpp index ee42339fc..ca8017630 100644 --- a/examples/rank_features_ex.cpp +++ b/examples/rank_features_ex.cpp @@ -108,9 +108,9 @@ int main() // you need to set. The first argument to the constructor is the kernel we wish to // use. The second is a parameter that determines the numerical accuracy with which // the object will perform part of the ranking algorithm. Generally, smaller values - // give better results but cause the algorithm to attempt to use more support vectors + // give better results but cause the algorithm to attempt to use more dictionary vectors // (and thus run slower and use more memory). The third argument, however, is the - // maximum number of support vectors a kcentroid is allowed to use. So you can use + // maximum number of dictionary vectors a kcentroid is allowed to use. So you can use // it to put an upper limit on the runtime complexity. kcentroid kc(kernel_type(gamma), 0.001, 25); diff --git a/examples/rvm_ex.cpp b/examples/rvm_ex.cpp index dea566458..9806a5e82 100644 --- a/examples/rvm_ex.cpp +++ b/examples/rvm_ex.cpp @@ -136,9 +136,9 @@ int main() learned_function.normalizer = normalizer; // save normalization information learned_function.function = trainer.train(samples, labels); // perform the actual RVM training and save the results - // print out the number of support vectors in the resulting decision function - cout << "\nnumber of support vectors in our learned_function is " - << learned_function.function.support_vectors.nr() << endl; + // print out the number of relevance vectors in the resulting decision function + cout << "\nnumber of relevance vectors in our learned_function is " + << learned_function.function.basis_vectors.nr() << endl; // now lets try this decision_function on some samples we haven't seen before sample_type sample; @@ -171,10 +171,10 @@ int main() learned_pfunct.function = train_probabilistic_decision_function(trainer, samples, labels, 3); // Now we have a function that returns the probability that a given sample is of the +1 class. - // print out the number of support vectors in the resulting decision function. + // print out the number of relevance vectors in the resulting decision function. // (it should be the same as in the one above) - cout << "\nnumber of support vectors in our learned_pfunct is " - << learned_pfunct.function.decision_funct.support_vectors.nr() << endl; + cout << "\nnumber of relevance vectors in our learned_pfunct is " + << learned_pfunct.function.decision_funct.basis_vectors.nr() << endl; sample(0) = 3.123; sample(1) = 2; diff --git a/examples/svm_ex.cpp b/examples/svm_ex.cpp index 5891858bd..d6fbe4fec 100644 --- a/examples/svm_ex.cpp +++ b/examples/svm_ex.cpp @@ -144,7 +144,7 @@ int main() // print out the number of support vectors in the resulting decision function cout << "\nnumber of support vectors in our learned_function is " - << learned_function.function.support_vectors.nr() << endl; + << learned_function.function.basis_vectors.nr() << endl; // now lets try this decision_function on some samples we haven't seen before sample_type sample; @@ -180,7 +180,7 @@ int main() // print out the number of support vectors in the resulting decision function. // (it should be the same as in the one above) cout << "\nnumber of support vectors in our learned_pfunct is " - << learned_pfunct.function.decision_funct.support_vectors.nr() << endl; + << learned_pfunct.function.decision_funct.basis_vectors.nr() << endl; sample(0) = 3.123; sample(1) = 2;