我为libsvm实现了OpenCV/C++包装器。在对SVM参数(RBF内核)进行网格搜索时,预测总是返回相同的标签。我创建了人工数据集,这些数据集具有非常容易分离的数据(并尝试预测了我刚刚接受过训练的数据),但是仍然返回相同的标签。
我使用了libsvm的MATLAB实现,并在同一数据集上实现了高精度。我在设置问题上肯定做错了事,但是我已经阅读了很多自述文件,但我找不到问题。
这是我设置libsvm问题的方法,其中数据是OpenCV Mat:
const int rowSize = data.rows;
const int colSize = data.cols;
this->_svmProblem = new svm_problem;
std::memset(this->_svmProblem,0,sizeof(svm_problem));
//dynamically allocate the X matrix...
this->_svmProblem->x = new svm_node*[rowSize];
for(int row = 0; row < rowSize; ++row)
this->_svmProblem->x[row] = new svm_node[colSize + 1];
//...and the y vector
this->_svmProblem->y = new double[rowSize];
this->_svmProblem->l = rowSize;
for(int row = 0; row < rowSize; ++row)
{
for(int col = 0; col < colSize; ++col)
{
//set the index and the value. indexing starts at 1.
this->_svmProblem->x[row][col].index = col + 1;
double tempVal = (double)data.at<float>(row,col);
this->_svmProblem->x[row][col].value = tempVal;
}
this->_svmProblem->x[row][colSize].index = -1;
this->_svmProblem->x[row][colSize].value = 0;
//add the label to the y array, and feature vector to X matrix
double tempVal = (double)labels.at<float>(row);
this->_svmProblem->y[row] = tempVal;
}
}/*createProblem()*/
this->_svmParameter = new svm_parameter;
std::memset(this->_svmParameter,0,sizeof(svm_parameter));
this->_svmParameter->svm_type = svmParams.svmType;
this->_svmParameter->kernel_type = svmParams.kernalType;
this->_svmParameter->C = svmParams.C;
this->_svmParameter->gamma = svmParams.gamma;
this->_svmParameter->nr_weight = 0;
this->_svmParameter->eps = 0.001;
this->_svmParameter->degree = 1;
this->_svmParameter->shrinking = 0;
this->_svmParameter->probability = 1;
this->_svmParameter->cache_size = 100;
this->_svmModel = svm_train(this->_svmProblem, this->_svmParameter);
float pred = (float)svm_predict(this->_svmModel, x[i]);
for(int i = 0; i < rowSize; ++i)
{
std::cout << "[";
for(int j = 0; j < colSize + 1; ++j)
{
std::cout << " (" << this->_svmProblem->x[i][j].index << ", " << this->_svmProblem->x[i][j].value << ")";
}
std::cout << "]" << " <" << this->_svmProblem->y[i] << ">" << std::endl;
}
[ (1, -1) (2, 0) (-1, 0)] <1>
[ (1, -0.92394) (2, 0) (-1, 0)] <1>
[ (1, -0.7532) (2, 0) (-1, 0)] <1>
[ (1, -0.75977) (2, 0) (-1, 0)] <1>
[ (1, -0.75337) (2, 0) (-1, 0)] <1>
[ (1, -0.76299) (2, 0) (-1, 0)] <1>
[ (1, -0.76527) (2, 0) (-1, 0)] <1>
[ (1, -0.74631) (2, 0) (-1, 0)] <1>
[ (1, -0.85153) (2, 0) (-1, 0)] <1>
[ (1, -0.72436) (2, 0) (-1, 0)] <1>
[ (1, -0.76485) (2, 0) (-1, 0)] <1>
[ (1, -0.72936) (2, 0) (-1, 0)] <1>
[ (1, -0.94004) (2, 0) (-1, 0)] <1>
[ (1, -0.92756) (2, 0) (-1, 0)] <1>
[ (1, -0.9688) (2, 0) (-1, 0)] <1>
[ (1, 0.05193) (2, 0) (-1, 0)] <3>
[ (1, -0.048488) (2, 0) (-1, 0)] <3>
[ (1, 0.070436) (2, 0) (-1, 0)] <3>
[ (1, 0.15191) (2, 0) (-1, 0)] <3>
[ (1, -0.07331) (2, 0) (-1, 0)] <3>
[ (1, 0.019786) (2, 0) (-1, 0)] <3>
[ (1, -0.072793) (2, 0) (-1, 0)] <3>
[ (1, 0.16157) (2, 0) (-1, 0)] <3>
[ (1, -0.057188) (2, 0) (-1, 0)] <3>
[ (1, -0.11187) (2, 0) (-1, 0)] <3>
[ (1, 0.15886) (2, 0) (-1, 0)] <3>
[ (1, -0.0701) (2, 0) (-1, 0)] <3>
[ (1, -0.17816) (2, 0) (-1, 0)] <3>
[ (1, 0.12305) (2, 0) (-1, 0)] <3>
[ (1, 0.058615) (2, 0) (-1, 0)] <3>
[ (1, 0.80203) (2, 0) (-1, 0)] <1>
[ (1, 0.734) (2, 0) (-1, 0)] <1>
[ (1, 0.9072) (2, 0) (-1, 0)] <1>
[ (1, 0.88061) (2, 0) (-1, 0)] <1>
[ (1, 0.83903) (2, 0) (-1, 0)] <1>
[ (1, 0.86604) (2, 0) (-1, 0)] <1>
[ (1, 1) (2, 0) (-1, 0)] <1>
[ (1, 0.77988) (2, 0) (-1, 0)] <1>
[ (1, 0.8578) (2, 0) (-1, 0)] <1>
[ (1, 0.79559) (2, 0) (-1, 0)] <1>
[ (1, 0.99545) (2, 0) (-1, 0)] <1>
[ (1, 0.78376) (2, 0) (-1, 0)] <1>
[ (1, 0.72177) (2, 0) (-1, 0)] <1>
[ (1, 0.72619) (2, 0) (-1, 0)] <1>
[ (1, 0.80149) (2, 0) (-1, 0)] <1>
[ (1, 0.092327) (2, -1) (-1, 0)] <2>
[ (1, 0.019054) (2, -1) (-1, 0)] <2>
[ (1, 0.15287) (2, -1) (-1, 0)] <2>
[ (1, -0.1471) (2, -1) (-1, 0)] <2>
[ (1, -0.068182) (2, -1) (-1, 0)] <2>
[ (1, -0.094567) (2, -1) (-1, 0)] <2>
[ (1, -0.17071) (2, -1) (-1, 0)] <2>
[ (1, -0.16646) (2, -1) (-1, 0)] <2>
[ (1, -0.030421) (2, -1) (-1, 0)] <2>
[ (1, 0.094346) (2, -1) (-1, 0)] <2>
[ (1, -0.14408) (2, -1) (-1, 0)] <2>
[ (1, 0.090025) (2, -1) (-1, 0)] <2>
[ (1, 0.043706) (2, -1) (-1, 0)] <2>
[ (1, 0.15065) (2, -1) (-1, 0)] <2>
[ (1, -0.11751) (2, -1) (-1, 0)] <2>
[ (1, -0.02324) (2, 1) (-1, 0)] <2>
[ (1, 0.0080356) (2, 1) (-1, 0)] <2>
[ (1, -0.17752) (2, 1) (-1, 0)] <2>
[ (1, 0.011135) (2, 1) (-1, 0)] <2>
[ (1, -0.029063) (2, 1) (-1, 0)] <2>
[ (1, 0.15398) (2, 1) (-1, 0)] <2>
[ (1, 0.097746) (2, 1) (-1, 0)] <2>
[ (1, 0.01018) (2, 1) (-1, 0)] <2>
[ (1, 0.015592) (2, 1) (-1, 0)] <2>
[ (1, -0.062793) (2, 1) (-1, 0)] <2>
[ (1, 0.014444) (2, 1) (-1, 0)] <2>
[ (1, -0.1205) (2, 1) (-1, 0)] <2>
[ (1, -0.18011) (2, 1) (-1, 0)] <2>
[ (1, 0.010521) (2, 1) (-1, 0)] <2>
[ (1, 0.036914) (2, 1) (-1, 0)] <2>
std::vector<double> CList, GList;
double baseNum = 2.0;
for(double j = -5; j <= 15; j += 2) //-5 and 15
CList.push_back(pow(baseNum,j));
for(double j = -15; j <= 3; j += 2) //-15 and 3
GList.push_back(pow(baseNum,j));
for(auto CIt = CList.begin(); CIt != CList.end(); ++CIt) //for all C's
{
double C = *CIt;
for(auto GIt = GList.begin(); GIt != GList.end(); ++GIt) //for all gamma's
{
double gamma = *GIt;
svmParams.svmType = C_SVC;
svmParams.kernalType = RBF;
svmParams.C = C;
svmParams.gamma = gamma;
......training code etc..........
CList = 2.^(-15:2:15);%(-5:2:15);
GList = 2.^(-15:2:15);%(-15:2:3);
cmd = ['-q -s 0 -t 2 -c ', num2str(C), ' -g ', num2str(gamma)];
model = ovrtrain(yTrain,xTrain,cmd);
svm_node** LibSVM::createNode(INPUT const cv::Mat& data)
{
const int rowSize = data.rows;
const int colSize = data.cols;
//dynamically allocate the X matrix...
svm_node** x = new svm_node*[rowSize];
if(x == NULL)
throw MLInterfaceException("Could not allocate SVM Node Array.");
for(int row = 0; row < rowSize; ++row)
{
x[row] = new svm_node[colSize + 1]; //+1 here for the index-terminating -1
if(x[row] == NULL)
throw MLInterfaceException("Could not allocate SVM Node.");
}
for(int row = 0; row < rowSize; ++row)
{
for(int col = 0; col < colSize; ++col)
{
double tempVal = data.at<double>(row,col);
x[row][col].value = tempVal;
}
x[row][colSize].index = -1;
x[row][colSize].value = 0;
}
return x;
} /*createNode()*/
cv::Mat LibSVM::predict(INPUT const cv::Mat& data)
{
if(this->_svmModel == NULL)
throw MLInterfaceException("Cannot predict; no model has been trained or loaded.");
cv::Mat predMat;
//create the libsvm representation of data
svm_node** x = this->createNode(data);
//perform prediction for each feature vector
for(int i = 0; i < data.rows; ++i)
{
double pred = svm_predict(this->_svmModel, x[i]);
predMat.push_back<double>(pred);
}
//delete all rows and columns of x
for(int i = 0; i < data.rows; ++i)
delete[] x[i];
delete[] x;
return predMat;
}
x[row][col].index = col + 1; //indexing starts at 1
最佳答案
查看您的gamma值会很有用,因为您的数据没有被标准化,这会带来很大的不同。
libsvm中的 Gamma 与超球面半径成反比,因此,如果这些球面相对于输入范围而言过小,则将始终激活所有内容,然后模型将始终输出相同的值。
因此,两个建议是:1)将输入值缩放到[-1,1]范围。 2)播放 Gamma 值。
关于c++ - libsvm(C++)始终输出相同的预测,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/18816686/
为了将Cucumber用于命令行脚本,我按照提供的说明安装了arubagem。它在我的Gemfile中,我可以验证是否安装了正确的版本并且我已经包含了require'aruba/cucumber'在'features/env.rb'中为了确保它能正常工作,我写了以下场景:@announceScenario:Testingcucumber/arubaGivenablankslateThentheoutputfrom"ls-la"shouldcontain"drw"假设事情应该失败。它确实失败了,但失败的原因是错误的:@announceScenario:Testingcucumber/ar
我的瘦服务器配置了nginx,我的ROR应用程序正在它们上运行。在我发布代码更新时运行thinrestart会给我的应用程序带来一些停机时间。我试图弄清楚如何优雅地重启正在运行的Thin实例,但找不到好的解决方案。有没有人能做到这一点? 最佳答案 #Restartjustthethinserverdescribedbythatconfigsudothin-C/etc/thin/mysite.ymlrestartNginx将继续运行并代理请求。如果您将Nginx设置为使用多个上游服务器,例如server{listen80;server
我正在使用puppet为ruby程序提供一组常量。我需要提供一组主机名,我的程序将对其进行迭代。在我之前使用的bash脚本中,我只是将它作为一个puppet变量hosts=>"host1,host2"我将其提供给bash脚本作为HOSTS=显然这对ruby不太适用——我需要它的格式hosts=["host1","host2"]自从phosts和putsmy_array.inspect提供输出["host1","host2"]我希望使用其中之一。不幸的是,我终其一生都无法弄清楚如何让它发挥作用。我尝试了以下各项:我发现某处他们指出我需要在函数调用前放置“function_”……这
这是一道面试题,我没有答对,但还是很好奇怎么解。你有N个人的大家庭,分别是1,2,3,...,N岁。你想给你的大家庭拍张照片。所有的家庭成员都排成一排。“我是家里的friend,建议家庭成员安排如下:”1岁的家庭成员坐在这一排的最左边。每两个坐在一起的家庭成员的年龄相差不得超过2岁。输入:整数N,1≤N≤55。输出:摄影师可以拍摄的照片数量。示例->输入:4,输出:4符合条件的数组:[1,2,3,4][1,2,4,3][1,3,2,4][1,3,4,2]另一个例子:输入:5输出:6符合条件的数组:[1,2,3,4,5][1,2,3,5,4][1,2,4,3,5][1,2,4,5,3][
我有一个这样的哈希数组:[{:foo=>2,:date=>Sat,01Sep2014},{:foo2=>2,:date=>Sat,02Sep2014},{:foo3=>3,:date=>Sat,01Sep2014},{:foo4=>4,:date=>Sat,03Sep2014},{:foo5=>5,:date=>Sat,02Sep2014}]如果:date相同,我想合并哈希值。我对上面数组的期望是:[{:foo=>2,:foo3=>3,:date=>Sat,01Sep2014},{:foo2=>2,:foo5=>5:date=>Sat,02Sep2014},{:foo4=>4,:dat
我想使用spawn(针对多个并发子进程)在Ruby中执行一个外部进程,并将标准输出或标准错误收集到一个字符串中,其方式类似于使用Python的子进程Popen.communicate()可以完成的操作。我尝试将:out/:err重定向到一个新的StringIO对象,但这会生成一个ArgumentError,并且临时重新定义$stdxxx会混淆子进程的输出。 最佳答案 如果你不喜欢popen,这是我的方法:r,w=IO.pipepid=Process.spawn(command,:out=>w,:err=>[:child,:out])
如何将send与+=一起使用?a=20;a.send"+=",10undefinedmethod`+='for20:Fixnuma=20;a+=10=>30 最佳答案 恐怕你不能。+=不是方法,而是语法糖。参见http://www.ruby-doc.org/docs/ProgrammingRuby/html/tut_expressions.html它说Incommonwithmanyotherlanguages,Rubyhasasyntacticshortcut:a=a+2maybewrittenasa+=2.你能做的最好的事情是:
我想知道Ruby用来在命令行打印这些东西的输出流:irb(main):001:0>a="test"=>"test"irb(main):002:0>putsatest=>nilirb(main):003:0>a=>"test"$stdout是否用于irb(main):002:0>和irb(main):003:0>?而且,在这两次调用之间,$stdout的值是否有任何变化?另外,有人能告诉我打印/写入这些内容的Ruby源代码吗? 最佳答案 是的。而且很容易向自己测试/证明。在命令行试试这个:ruby-e'puts"foo"'>test.
让多条路线去同一条路的最优雅的方式是什么ControllerAction?我有:get'dashboard',to:'dashboard#index'get'dashboard/pending',to:'dashboard#index'get'dashboard/live',to:'dashboard#index'get'dashboard/sold',to:'dashboard#index'这很丑陋。有什么“更优雅”的建议吗?一个类轮的奖励积分。 最佳答案 为什么不只有一个路由和一个Controller操作,并根据传递给它的参数来
我在使用自定义RailsFormBuilder时遇到了问题,从昨天晚上开始我就发疯了。基本上我想对我的构建器方法之一有一个可选block,以便我可以在我的主要content_tag中显示其他内容。:defform_field(method,&block)content_tag(:div,class:'field')doconcatlabel(method,"Label#{method}")concattext_field(method)capture(&block)ifblock_given?endend当我在我的一个Slim模板中调用该方法时,如下所示:=f.form_field:e