Friday, July 3, 2015

Anomaly Detection and Recommender Systems

My solutions to Week 9 Exercises (Anomaly Detection and Recommender Systems) -

1) Estimate Gaussian Parameters [ estimateGaussian.m ]

function [mu sigma2] = estimateGaussian(X)
%ESTIMATEGAUSSIAN This function estimates the parameters of a 
%Gaussian distribution using the data in X
%   [mu sigma2] = estimateGaussian(X), 
%   The input X is the dataset with each n-dimensional data point in one row
%   The output is an n-dimensional vector mu, the mean of the data set
%   and the variances sigma^2, an n x 1 vector
% 

% Useful variables
[m, n] = size(X);

% You should return these values correctly
mu = zeros(n, 1);
sigma2 = zeros(n, 1);

% ====================== YOUR CODE HERE ======================
% Instructions: Compute the mean of the data and the variances
%               In particular, mu(i) should contain the mean of
%               the data for the i-th feature and sigma2(i)
%               should contain variance of the i-th feature.
%

onesMatrix = ones(1, size(X, 1));
mu = (onesMatrix * X)/m;

for j = 1:n
 sigma2(j) = sum((X(:, j)-mu(j)).^2)/m;
end

% other way for calculating variance 
% http://stackoverflow.com/questions/5967940/matlab-quickly-subtract-1xn-array-from-mxn-matrix-elements
% http://stackoverflow.com/questions/2651267/why-is-sumx-1-the-sum-of-the-columns-in-matlab

sigma2 = sum(bsxfun(@minus, X, mu).^2, 1)/m; 

% using mean and var functions
% mu = mean(X);
% sigma2 = var(X, 1);

% =============================================================

end

2) Select Threshold [ selectThreshold.m ]

function [bestEpsilon bestF1] = selectThreshold(yval, pval)
%SELECTTHRESHOLD Find the best threshold (epsilon) to use for selecting
%outliers
%   [bestEpsilon bestF1] = SELECTTHRESHOLD(yval, pval) finds the best
%   threshold to use for selecting outliers based on the results from a
%   validation set (pval) and the ground truth (yval).
%

bestEpsilon = 0;
bestF1 = 0;
F1 = 0;

stepsize = (max(pval) - min(pval)) / 1000;
for epsilon = min(pval):stepsize:max(pval)
    
    % ====================== YOUR CODE HERE ======================
    % Instructions: Compute the F1 score of choosing epsilon as the
    %               threshold and place the value in F1. The code at the
    %               end of the loop will compare the F1 score for this
    %               choice of epsilon and set it to be the best epsilon if
    %               it is better than the current choice of epsilon.
    %               
    % Note: You can use predictions = (pval < epsilon) to get a binary vector
    %       of 0's and 1's of the outlier predictions

 cvPredictions = size(size(pval, 1), 1);
 
 for i=1:size(pval, 1)
  if pval(i)>=epsilon cvPredictions(i) = 0;
  else cvPredictions(i) = 1; end 
 end
 
 fp = sum((cvPredictions'==1) & (yval==0));
 tp = sum((cvPredictions'==1) & (yval==1));
 fn = sum((cvPredictions'==0) & (yval==1));

 
 prec = tp/(tp+fp);
 rec = tp/(tp+fn);
 
 F1 = (2*prec*rec)/(prec+rec);
    % =============================================================

    if F1 > bestF1
       bestF1 = F1;
       bestEpsilon = epsilon;
    end
end

end

3, 4, 5, 6) Collaborative Filtering Cost, Collaborative Filtering Gradient, Regularized Cost, Regularized Gradient [ cofiCostFunc.m ]

function [J, grad] = cofiCostFunc(params, Y, R, num_users, num_movies, ...
                                  num_features, lambda)
%COFICOSTFUNC Collaborative filtering cost function
%   [J, grad] = COFICOSTFUNC(params, Y, R, num_users, num_movies, ...
%   num_features, lambda) returns the cost and gradient for the
%   collaborative filtering problem.
%

% Unfold the U and W matrices from params
X = reshape(params(1:num_movies*num_features), num_movies, num_features);
Theta = reshape(params(num_movies*num_features+1:end), ...
                num_users, num_features);

            
% You need to return the following values correctly

J = 0;
X_grad = zeros(size(X));
Theta_grad = zeros(size(Theta));

% ====================== YOUR CODE HERE ======================
% Instructions: Compute the cost function and gradient for collaborative
%               filtering. Concretely, you should first implement the cost
%               function (without regularization) and make sure it is
%               matches our costs. After that, you should implement the 
%               gradient and use the checkCostFunction routine to check
%               that the gradient is correct. Finally, you should implement
%               regularization.
%
% Notes: X - num_movies x num_features matrix of movie features
%        Theta - num_users x num_features matrix of user features
%        Y - num_movies x num_users matrix of user ratings of movies
%        R - num_movies x num_users matrix, where R(i, j) = 1 if the 
%            i-th movie was rated by the j-th user
%
% You should set the following variables correctly:
%
%        X_grad - num_movies x num_features matrix, containing the 
%                 partial derivatives w.r.t. to each element of X
%        Theta_grad - num_users x num_features matrix, containing the 
%                     partial derivatives w.r.t. to each element of Theta
% 

J = sum(sum(((X*Theta'-Y).^2).*R))/2;


% temp = 0;
% for i=1:num_movies
%  for j=1:num_users
%   if R(i, j)==1 temp = temp + ((Theta(j, :))*X(i, :)' - Y(i,j))^2;
%   end
%  end
% end
%
% J = temp/2; 

% Gradient - Unvectorized

%for i = 1:num_movies
% for k=1:num_features
%  t = 0;
%  for j=1:num_users
%   if R(i, j)==1 t = t+ sum(Theta(j, :)*X(i, :)' - Y(i,j))*Theta(j, k); end
%  end
%  X_grad(i, k) = t;
% end
%end

%for j=1:num_users
% for k=1:num_features
%  t = 0;
%  for i=1:num_movies
%   if R(i, j)==1 t = t + sum(Theta(j, :)*X(i, :)' - Y(i, j))*X(i, k); end
%  end
%  Theta_grad(j, k) = t;
% end
%end



%Gradient - Vectorized

for i=1:num_movies
 idx = find(R(i, :)==1);
 ThetaTemp = Theta(idx, :);
 YTemp = Y(i, idx);
 X_grad(i, :) = ((X(i, :)*ThetaTemp' - YTemp)*ThetaTemp);
 
 reg = lambda * X(i, :);
 X_grad(i, :) = X_grad(i, :)+reg;
end

for j=1:num_users
 idx = find(R(:, j)==1);
 ThetaTemp = Theta(j, :);
 XTemp = X(idx, :);
 YTemp = Y(idx, j);
 first = (XTemp*ThetaTemp' - YTemp)';
 Theta_grad(j, :) = (first*XTemp);
 
 reg = lambda * Theta(j, :);
 Theta_grad(j, :) = Theta_grad(j, :)+reg;
end

ThetaSquared = sum(sum(Theta .^ 2));
XSquared = sum(sum(X .^ 2));

reg = ((ThetaSquared + XSquared)*lambda)/2;
J = J+reg;
% =============================================================

grad = [X_grad(:); Theta_grad(:)];

end

No comments:

Post a Comment