[LON-CAPA-cvs] cvs: modules /minaeibi c183_2wayCV.m

minaeibi lon-capa-cvs@mail.lon-capa.org
Tue, 03 Sep 2002 13:06:23 -0000


minaeibi		Tue Sep  3 09:06:23 2002 EDT

  Added files:                 
    /modules/minaeibi	c183_2wayCV.m 
  Log:
  non-tree classifiers: matlab code for 2-fold cross validation
  
  

Index: modules/minaeibi/c183_2wayCV.m
+++ modules/minaeibi/c183_2wayCV.m
%This program compares the error rates of PHY183 data by using Byaes, 1NN, Knn, MLP, and Parzen Classifiers.
close all;
clear all;

run_times = 10;

l_col=7;
ClassNo=3;
load f183.txt;

dataf=f183;

k1=find(dataf(:,l_col)==1);
k2=find(dataf(:,l_col)==2);
k3=find(dataf(:,l_col)==3);
B=[];
B1=[];
B2=[];
B3=[];
for i=1:size(k1,1)
    B=[B;dataf(k1(i),:)];
    B1=[B1;dataf(k1(i),:)];
end
for i=1:size(k2,1)
    B=[B;dataf(k2(i),:)];
    B2=[B2;dataf(k2(i),:)];
end
for i=1:size(k3,1)
    B=[B;dataf(k3(i),:)];
    B3=[B3;dataf(k3(i),:)];
end

nn1=floor(size(B1,1)/2);
nn2=floor(size(B2,1)/2);
nn3=floor(size(B3,1)/2);

target = [repmat([1 0 0],nn1,1); repmat([0 1 0],nn2,1); repmat([0 0 1],nn3,1)];

First_Column=1;
Second_Column=6;
Feature_no=Second_Column-First_Column;
k_knn=3; %floor(sqrt(Feature_no));

Column_No = Second_Column-First_Column + 1;
lab_split = [ones(1,nn1) ones(1,nn2)*2 ones(1,nn3)*3];

data = B(:,First_Column:Second_Column);

%Column_No = Column_No + 1;
%for i=1:252
%    data(i,Column_No)=B(i,4);
%end

%Normalize the data
for k=1:Column_No
    data(:,k)=(data(:,k)-mean(data(:,k)))/std(data(:,k));
end

test = zeros(125,Column_No);
train = zeros(125,Column_No);


round_err_bayes = 0;
round_err_1nn = 0;
round_err_knn = 0;
round_err_parzen = 0;
round_err_mlp = 0;
round_err_cmc = 0;
round_err_oracle = 0;

for round = 1:run_times
    %Randomly separate each class to training and testing set.
    [c1_train,test1,index1] = randompick((data(1:68,:)),nn1);
    [c2_train,test2,index2] = randompick(data(70:163,:),nn2);
    [c3_train,test3,index3] = randompick(data(165:252,:),nn3);
    
    test_index(1:34)=index1(35:68);
    test_index(35:81)=(index2(47:93))+69;
    test_index(82:125)=(index3(45:88))+105;

    test(1:34,:) = test1;
    test(35:81,:) = test2;
    test(82:125,:) = test3;
    
    train(1:34,:) = c1_train;
    train(35:81,:) = c2_train;
    train(82:125,:) = c3_train;
    
    %Calculate Sample mean and sample covariance.
    m_1 = mean(c1_train); v_1 = cov(c1_train);
    m_2 = mean(c2_train); v_2 = cov(c2_train);
    m_3 = mean(c3_train); v_3 = cov(c3_train);
    
    %Calculate discriminant functions for every testing samples and count error classifications.
    error_bayes = 0;
    error_1nn = 0;
    error_knn = 0;
    error_mlp = 0;
    error_parzen = 0;
    error_oracle = 0;
    error_cmc = 0;
    
    %Get the 1nn classification result in eachClass
    [eachClass1, nearestSampleIndex, knnmat] = knn([train lab_split'], [test lab_split'], 1);
  
    %Get the knn classification result in eachClass
    [eachClass, nearestSampleIndex, knnmat] = knn([train lab_split'], [test lab_split'], k_knn);
    clear nearestSampleIndex; clear knnmat;

    %Get the Parzen Window classification result in eachClass
    
    %[m,s,p,sig]=fit_sphere(train, target);
    % sigma=trainparzen(train,target,max(sig),1,max(sig)/8);
    class=flagmax(parzen_classify(test,train,target,.5));
    
    %Get the MLP classification result in eachClass
    [w,bias,error]=trainmlp(train,target,[3 3],0.01);
    out = flagmax(mlp(test, w, bias));
    clear w; clear bias; clear error;
    
    for i = 1:size(test,1)
        % Bayes decision rule
        x = test(i,:);
        g(1) = (x-m_1)*(-0.5)*inv(v_1)*(x-m_1)'-0.5*log(det(v_1))+log(size(test1,1)/size(test,1));
        g(2) = (x-m_2)*(-0.5)*inv(v_2)*(x-m_2)'-0.5*log(det(v_2))+log(size(test2,1)/size(test,1));            
        g(3) = (x-m_3)*(-0.5)*inv(v_3)*(x-m_3)'-0.5*log(det(v_3))+log(size(test3,1)/size(test,1));
        [C,I] = max([g(1) g(2) g(3)]);
        
        flag(1)=0;flag(2)=0;flag(3)=0;flag(4)=0;flag(5)=0;
        % Calculate error for Bayes
        if I~=B(test_index(i),size(B,2))
            error_bayes = error_bayes + 1;
            flag(1)=1;
        end   %if
            
        % Calculate error for 1NN
        if (eachClass1(i))~=B(test_index(i),size(B,2))
            error_1nn = error_1nn + 1;
            flag(2)=1;
        end   %if            
        
        % Calculate error for kNN
        if (eachClass(i))~=B(test_index(i),size(B,2))
            error_knn = error_knn + 1;
            flag(3)=1;
        end   %if            
        
        % Calculate error for parzen
        if (sum(target(i,:)==class(i,:))~=size(class,2))
            error_parzen = error_parzen + 1;
            flag(4)=1;
        end   %if            
        
        % Calculate error for MLP
        if (sum(target(i,:)==out(i,:))~=size(class,2))
            error_mlp = error_mlp + 1;
            flag(5)=1;
        end   %if
    
        s_flag=sum(flag);
        if(s_flag>3)
            error_cmc=error_cmc+1;
            if(s_flag==5)
                error_oracle=error_oracle+1;
            end %if
        end %if
        
    end   %for
    
    % Calculate error rate for plug-in
    error_rate_bayes(round) = error_bayes/size(test,1); round_err_bayes = round_err_bayes + error_rate_bayes(round);
    
    % Calculate error rate for 1nn
    error_rate_1nn(round) = error_1nn/size(test,1);round_err_1nn = round_err_1nn + error_rate_1nn(round);

    % Calculate error rate for knn
    error_rate_knn(round) = error_knn/size(test,1);round_err_knn = round_err_knn + error_rate_knn(round);

    % Calculate error rate for parzen
    error_rate_parzen(round) = error_parzen/size(test,1); round_err_parzen = round_err_parzen + error_rate_parzen(round);

    % Calculate error rate for mlp
    error_rate_mlp(round) = error_mlp/size(target,1); round_err_mlp = round_err_mlp + error_rate_mlp(round);

    % Calculate error rate for cmc
    error_rate_cmc(round) = error_cmc/size(target,1); round_err_cmc = round_err_cmc + error_rate_cmc(round);

    % Calculate error rate for oracle
    error_rate_oracle(round) = error_oracle/size(target,1); round_err_oracle = round_err_oracle + error_rate_oracle(round);

end   %round

avg_err_bayes = round_err_bayes/run_times; std_dev_bayes = std(error_rate_bayes);
avg_err_1nn = round_err_1nn/run_times; std_dev_1nn = std(error_rate_1nn);
avg_err_knn = round_err_knn/run_times; std_dev_knn = std(error_rate_knn);
avg_err_parzen = round_err_parzen/run_times; std_dev_parzen = std(error_rate_parzen);
avg_err_mlp = round_err_mlp/run_times; std_dev_mlp = std(error_rate_mlp);
avg_err_cmc = round_err_cmc/run_times; std_dev_cmc = std(error_rate_cmc);
avg_err_oracle = round_err_oracle/run_times; std_dev_oracle = std(error_rate_oracle);


fprintf('\n\n=======================================================\n');
fprintf('Bayes\t%5.4f\t%5.4f\n', avg_err_bayes, std_dev_bayes );
fprintf('1NN\t%5.4f\t%5.4f\n', avg_err_1nn, std_dev_1nn );
fprintf('KNN\t%5.4f\t%5.4f\n', avg_err_knn, std_dev_knn );
fprintf('Parzen\t%5.4f\t%5.4f\n', avg_err_parzen, std_dev_parzen );
fprintf('MLP\t%5.4f\t%5.4f\n', avg_err_mlp, std_dev_mlp );
fprintf('CMC\t%5.4f\t%5.4f\n', avg_err_cmc, std_dev_cmc );
fprintf('Oracle\t%5.4f\t%5.4f\n', avg_err_oracle, std_dev_oracle );
%fprintf('KNN Performance = %5.2f%%\


plot(error_rate_bayes,'k','Marker','s');
hold on;
plot(error_rate_1nn,'m','LineStyle','-');
hold on;
plot(error_rate_knn,'r','Marker','*');
hold on;
plot(error_rate_parzen,'b','LineStyle',':');
hold on;
plot(error_rate_mlp,'g','Marker','o');
hold on;
plot(error_rate_cmc,'k','Marker','v');
hold on;
plot(error_rate_oracle,'k','Marker','.');

legend('Bayes','1-NN','K-NN','Parzen','MLP','CMC','Oracle');
xlabel('Round in 2-fold Cross Validation');
ylabel('Error Rate');
title('LON-CAPA: Comparison of classifiers on PHY183 Data Set');