Wednesday, July 22, 2015

Perceptron Learning Algorithm : Java Implementation

Tutorials: 

Perceptron Learning Algorithm:
  • Initialize the weights.
  • Pick a learning rate m.
  • Do the following until stopping condition is satisfied:
    •     For each training instance (example)
    •     Calculate localError =  true (known) output - given output
    •     adjust weights[] = weights[] + LEARNING_RATE*localError*x[i];
    •     adjust weights[] = weights[] + LEARNING_RATE*localError*y[i];
    •     update bias (weight[2] = weight[2] + LEARNING_RATE*localError;)
    •     update globalError = globalError + localError*localError; (squared error).
  • The decision boundary equation is given by weights[0]*x + weights[1]*y + weights[2] = 0;
  • Test the adjusted variables on randomly generated inputs.
Java Implementation (Credits : Dr. Noureddin Sadawi) :

public class CaltechEx1 {
    static int MAX_ITER = 100;
    static double LEARNING_RATE = 0.001;
    static int NUM_INSTANCES = 100;
    static int theta = 0;
    
    public static void main(String[] args){
        //three variables (features)
        double[] x = new double[NUM_INSTANCES];
        double[] y = new double[NUM_INSTANCES]; 
        int[] outputs = new int[NUM_INSTANCES];
        
        //fifty random points of class 1
        for(int i=0;i<NUM_INSTANCES/2;++i){
            x[i] = randomNumber(-1, 0);
            y[i] = randomNumber(-1, 0);
            outputs[i] = 1;
            System.out.println(x[i]+"\t"+y[i]+"\t"+outputs[i]);
        }
        
        //fifty random points of class 0
        for(int i=50;i<NUM_INSTANCES;i++){
            x[i] = randomNumber(0.01, 1);
            y[i] = randomNumber(0.01, 1);
            outputs[i] = 0;
            System.out.println(x[i]+"\t"+y[i]+"\t"+outputs[i]);
        }
        
        double[] weights = new double[3]; //2 for input variables and one for bias
        double localError, globalError;
        int p, iteration, output;
        
        weights[0] = randomNumber(0, 1);
        weights[1] = randomNumber(0, 1);
        weights[2] = randomNumber(0, 1); 
        
        
        iteration = 0;
        do{
            iteration++;
            globalError = 0;
            //loop through all instances (complete one epoch)
            for(p = 0;p<NUM_INSTANCES;p++){
                //calculate predicted class
                output = calculateOutput(theta, weights, x[p], y[p]);
                //difference between predicted and actual class values
                localError = outputs[p] - output; 
                //update weights
                weights[0] += LEARNING_RATE*localError*x[p];
                weights[1] += LEARNING_RATE*localError*y[p];
                
                //update bias
                weights[2] += LEARNING_RATE*localError;
                
                globalError += (localError*localError);  
            }
            /*Root Mean Squared Error*/
            System.out.println("Iteration "+iteration+" : RMSE = "+Math.sqrt(globalError/NUM_INSTANCES));
        }while(globalError != 0 && iteration < MAX_ITER);
        
        System.out.println("\n========\nDecision boundary equation:");
        System.out.println(weights[0]+"*x "+weights[1]+"*y + "+weights[2]+" = 0");
        
        
        //generate 10 new random pointns and check their classes
        //notice the range of -1 and 1 means the new point could be of class 1 or 0
        //-10 to 10 covers all the ranges we used in generating the 50 classes of 1s and 0s
        
        for(int j=0;j<100;++j){
            double x1 = randomNumber(-1, 1);
            double y1 = randomNumber(-1, 1);
            
            output = calculateOutput(theta, weights, x1, y1);
            System.out.println("\n=======\nNew Random Point:");
            System.out.println("x = "+x1+",y = "+y1);
            System.out.println("class = "+output);
        }
        
        double avg1 = 0, avg2 = 0;
        for(int j=0;j<10;++j){
        
            int mis=0;

            //P[f(x)!=g(x) for N = 10
            //fifty random points of class 1
            for(int i=0;i<5;++i){
                double x1 = randomNumber(-1, 0);
                double y1 = randomNumber(-1, 0);
                output = calculateOutput(theta, weights, x1, y1);
                if(output != 1)mis++; 
            }

            //fifty random points of class 0
            for(int i=0;i<5;i++){
                double x1 = randomNumber(0.01, 1);
                double y1 = randomNumber(0.01, 1);
                output = calculateOutput(theta, weights, x1, y1);
                if(output != 0)mis++; 
            }
            avg1+=(double)mis/10;
//            System.out.println("P[f(x)!=g(x) (N=10) = "+((double)mis/10));

            
            mis = 0;
            //P[f(x)!=g(x) for N = 100
            //fifty random points of class 1
            for(int i=0;i<50;++i){
                double x1 = randomNumber(-1, 0);
                double y1 = randomNumber(-1, 0);
                output = calculateOutput(theta, weights, x1, y1);
                if(output != 1)mis++; 
            }

            //fifty random points of class 0
            for(int i=0;i<50;i++){
                double x1 = randomNumber(0.1, 1);
                double y1 = randomNumber(0.1, 1);
                output = calculateOutput(theta, weights, x1, y1);
                if(output != 0)mis++; 
            }

            avg2+=(double)mis/100;
        }
        System.out.println("P[f(x)!=g(x) (N=10) over 10*10 samples = "+avg1/10);
        System.out.println("P[f(x)!=g(x) (N=100) over 10*100 samples = "+avg2/10);
    }
    
    /**
     * returns a random double value within a given range
     * @param min the minimum value of the required range(int)
     * @param max the maximum value of the required range(int)
     * @return a random double value between min and max
     */
    public static double randomNumber(double min, double max){
        double d = min+Math.random()*(max-min);
        return d;
    }
    
    /**
     * returns either 1 or 0 using a threshold function 
     * @param theta an integer value for the threshold
     * @param weights the array of weights
     * @param x the x input value
     * @param y the y input value
     * @param z the z input value
     * @return 1 or 0
     */
    static int calculateOutput(int theta, double weights[], double x, double y){
        double sum  = x*weights[0] + y*weights[1] + weights[2];
        return sum>=theta ? 1:0;
    }
}


Friday, July 3, 2015

Anomaly Detection and Recommender Systems

My solutions to Week 9 Exercises (Anomaly Detection and Recommender Systems) -

1) Estimate Gaussian Parameters [ estimateGaussian.m ]

function [mu sigma2] = estimateGaussian(X)
%ESTIMATEGAUSSIAN This function estimates the parameters of a 
%Gaussian distribution using the data in X
%   [mu sigma2] = estimateGaussian(X), 
%   The input X is the dataset with each n-dimensional data point in one row
%   The output is an n-dimensional vector mu, the mean of the data set
%   and the variances sigma^2, an n x 1 vector
% 

% Useful variables
[m, n] = size(X);

% You should return these values correctly
mu = zeros(n, 1);
sigma2 = zeros(n, 1);

% ====================== YOUR CODE HERE ======================
% Instructions: Compute the mean of the data and the variances
%               In particular, mu(i) should contain the mean of
%               the data for the i-th feature and sigma2(i)
%               should contain variance of the i-th feature.
%

onesMatrix = ones(1, size(X, 1));
mu = (onesMatrix * X)/m;

for j = 1:n
 sigma2(j) = sum((X(:, j)-mu(j)).^2)/m;
end

% other way for calculating variance 
% http://stackoverflow.com/questions/5967940/matlab-quickly-subtract-1xn-array-from-mxn-matrix-elements
% http://stackoverflow.com/questions/2651267/why-is-sumx-1-the-sum-of-the-columns-in-matlab

sigma2 = sum(bsxfun(@minus, X, mu).^2, 1)/m; 

% using mean and var functions
% mu = mean(X);
% sigma2 = var(X, 1);

% =============================================================

end

2) Select Threshold [ selectThreshold.m ]

function [bestEpsilon bestF1] = selectThreshold(yval, pval)
%SELECTTHRESHOLD Find the best threshold (epsilon) to use for selecting
%outliers
%   [bestEpsilon bestF1] = SELECTTHRESHOLD(yval, pval) finds the best
%   threshold to use for selecting outliers based on the results from a
%   validation set (pval) and the ground truth (yval).
%

bestEpsilon = 0;
bestF1 = 0;
F1 = 0;

stepsize = (max(pval) - min(pval)) / 1000;
for epsilon = min(pval):stepsize:max(pval)
    
    % ====================== YOUR CODE HERE ======================
    % Instructions: Compute the F1 score of choosing epsilon as the
    %               threshold and place the value in F1. The code at the
    %               end of the loop will compare the F1 score for this
    %               choice of epsilon and set it to be the best epsilon if
    %               it is better than the current choice of epsilon.
    %               
    % Note: You can use predictions = (pval < epsilon) to get a binary vector
    %       of 0's and 1's of the outlier predictions

 cvPredictions = size(size(pval, 1), 1);
 
 for i=1:size(pval, 1)
  if pval(i)>=epsilon cvPredictions(i) = 0;
  else cvPredictions(i) = 1; end 
 end
 
 fp = sum((cvPredictions'==1) & (yval==0));
 tp = sum((cvPredictions'==1) & (yval==1));
 fn = sum((cvPredictions'==0) & (yval==1));

 
 prec = tp/(tp+fp);
 rec = tp/(tp+fn);
 
 F1 = (2*prec*rec)/(prec+rec);
    % =============================================================

    if F1 > bestF1
       bestF1 = F1;
       bestEpsilon = epsilon;
    end
end

end

3, 4, 5, 6) Collaborative Filtering Cost, Collaborative Filtering Gradient, Regularized Cost, Regularized Gradient [ cofiCostFunc.m ]

function [J, grad] = cofiCostFunc(params, Y, R, num_users, num_movies, ...
                                  num_features, lambda)
%COFICOSTFUNC Collaborative filtering cost function
%   [J, grad] = COFICOSTFUNC(params, Y, R, num_users, num_movies, ...
%   num_features, lambda) returns the cost and gradient for the
%   collaborative filtering problem.
%

% Unfold the U and W matrices from params
X = reshape(params(1:num_movies*num_features), num_movies, num_features);
Theta = reshape(params(num_movies*num_features+1:end), ...
                num_users, num_features);

            
% You need to return the following values correctly

J = 0;
X_grad = zeros(size(X));
Theta_grad = zeros(size(Theta));

% ====================== YOUR CODE HERE ======================
% Instructions: Compute the cost function and gradient for collaborative
%               filtering. Concretely, you should first implement the cost
%               function (without regularization) and make sure it is
%               matches our costs. After that, you should implement the 
%               gradient and use the checkCostFunction routine to check
%               that the gradient is correct. Finally, you should implement
%               regularization.
%
% Notes: X - num_movies x num_features matrix of movie features
%        Theta - num_users x num_features matrix of user features
%        Y - num_movies x num_users matrix of user ratings of movies
%        R - num_movies x num_users matrix, where R(i, j) = 1 if the 
%            i-th movie was rated by the j-th user
%
% You should set the following variables correctly:
%
%        X_grad - num_movies x num_features matrix, containing the 
%                 partial derivatives w.r.t. to each element of X
%        Theta_grad - num_users x num_features matrix, containing the 
%                     partial derivatives w.r.t. to each element of Theta
% 

J = sum(sum(((X*Theta'-Y).^2).*R))/2;


% temp = 0;
% for i=1:num_movies
%  for j=1:num_users
%   if R(i, j)==1 temp = temp + ((Theta(j, :))*X(i, :)' - Y(i,j))^2;
%   end
%  end
% end
%
% J = temp/2; 

% Gradient - Unvectorized

%for i = 1:num_movies
% for k=1:num_features
%  t = 0;
%  for j=1:num_users
%   if R(i, j)==1 t = t+ sum(Theta(j, :)*X(i, :)' - Y(i,j))*Theta(j, k); end
%  end
%  X_grad(i, k) = t;
% end
%end

%for j=1:num_users
% for k=1:num_features
%  t = 0;
%  for i=1:num_movies
%   if R(i, j)==1 t = t + sum(Theta(j, :)*X(i, :)' - Y(i, j))*X(i, k); end
%  end
%  Theta_grad(j, k) = t;
% end
%end



%Gradient - Vectorized

for i=1:num_movies
 idx = find(R(i, :)==1);
 ThetaTemp = Theta(idx, :);
 YTemp = Y(i, idx);
 X_grad(i, :) = ((X(i, :)*ThetaTemp' - YTemp)*ThetaTemp);
 
 reg = lambda * X(i, :);
 X_grad(i, :) = X_grad(i, :)+reg;
end

for j=1:num_users
 idx = find(R(:, j)==1);
 ThetaTemp = Theta(j, :);
 XTemp = X(idx, :);
 YTemp = Y(idx, j);
 first = (XTemp*ThetaTemp' - YTemp)';
 Theta_grad(j, :) = (first*XTemp);
 
 reg = lambda * Theta(j, :);
 Theta_grad(j, :) = Theta_grad(j, :)+reg;
end

ThetaSquared = sum(sum(Theta .^ 2));
XSquared = sum(sum(X .^ 2));

reg = ((ThetaSquared + XSquared)*lambda)/2;
J = J+reg;
% =============================================================

grad = [X_grad(:); Theta_grad(:)];

end