From cfcd8b02b0b8bcf4150b937283ef26c2b786e430 Mon Sep 17 00:00:00 2001 From: julien Lengrand-Lambert Date: Sat, 12 Dec 2015 11:15:26 +0100 Subject: [PATCH] Finishes Week 6 --- machine-learning-ex5/ex5/ex5.m | 4 ++-- machine-learning-ex5/ex5/polyFeatures.m | 10 ++++------ machine-learning-ex5/ex5/token.mat | Bin 262 -> 262 bytes machine-learning-ex5/ex5/validationCurve.m | 10 ++++++---- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/machine-learning-ex5/ex5/ex5.m b/machine-learning-ex5/ex5/ex5.m index 1df5a1c..be23ba0 100644 --- a/machine-learning-ex5/ex5/ex5.m +++ b/machine-learning-ex5/ex5/ex5.m @@ -123,7 +123,7 @@ for i = 1:m end fprintf('Program paused. Press enter to continue.\n'); -pause; +% pause; %% =========== Part 6: Feature Mapping for Polynomial Regression ============= % One solution to this is to use polynomial regression. You should now @@ -153,7 +153,7 @@ fprintf('Normalized Training Example 1:\n'); fprintf(' %f \n', X_poly(1, :)); fprintf('\nProgram paused. Press enter to continue.\n'); -pause; +% pause; diff --git a/machine-learning-ex5/ex5/polyFeatures.m b/machine-learning-ex5/ex5/polyFeatures.m index f496f48..2643a38 100644 --- a/machine-learning-ex5/ex5/polyFeatures.m +++ b/machine-learning-ex5/ex5/polyFeatures.m @@ -15,11 +15,9 @@ X_poly = zeros(numel(X), p); % % - - - - +for i = 1:p + X_poly(:, i) = X.^i; +end % ========================================================================= - -end +end \ No newline at end of file diff --git a/machine-learning-ex5/ex5/token.mat b/machine-learning-ex5/ex5/token.mat index 452c919109d09ab02d3d354d31b4926d2c0b3256..f036d5d5c68ba028b2610f0421707c7807a575cf 100644 GIT binary patch delta 28 jcmZo;YGazX7C5v delta 28 jcmZo;YGazX5$Ch diff --git a/machine-learning-ex5/ex5/validationCurve.m b/machine-learning-ex5/ex5/validationCurve.m index 24b56bc..14f184e 100644 --- a/machine-learning-ex5/ex5/validationCurve.m +++ b/machine-learning-ex5/ex5/validationCurve.m @@ -41,10 +41,12 @@ error_val = zeros(length(lambda_vec), 1); - - - - +for i = 1:length(lambda_vec) + lambda = lambda_vec(i); + [theta] = trainLinearReg(X, y, lambda); + error_train(i) = linearRegCostFunction(X, y, theta, 0); + error_val(i) = linearRegCostFunction(Xval, yval, theta, 0); +end