From 530aa9e509fee512599733fd067bfbed493c0309 Mon Sep 17 00:00:00 2001 From: Andrew Maas Date: Tue, 24 Sep 2013 21:55:29 -0700 Subject: [PATCH] Version 0.9 of new exercises. Copied from private repo --- cnn/cnnConvolve.m | 70 + cnn/cnnCost.m | 137 ++ cnn/cnnExercise.m | 106 ++ cnn/cnnInitParams.m | 43 + cnn/cnnParamsToStack.m | 39 + cnn/cnnPool.m | 36 + cnn/cnnTrain.m | 104 ++ cnn/computeNumericalGradient.m | 41 + cnn/minFuncSGD.m | 80 ++ common/display_network.m | 101 ++ common/loadMNISTImages.m | 26 + common/loadMNISTLabels.m | 19 + common/minFunc_2012/autoDif/autoGrad.m | 1 + common/minFunc_2012/autoDif/autoHess.m | 1 + common/minFunc_2012/autoDif/autoHv.m | 13 + common/minFunc_2012/autoDif/autoTensor.m | 1 + common/minFunc_2012/autoDif/derivativeCheck.m | 42 + .../autoDif/fastDerivativeCheck.m | 1 + common/minFunc_2012/example_derivativeCheck.m | 52 + common/minFunc_2012/example_minFunc.m | 75 ++ .../logisticExample/LogisticDiagPrecond.m | 20 + .../minFunc_2012/logisticExample/LogisticHv.m | 8 + .../logisticExample/LogisticLoss.m | 36 + .../logisticExample/example_minFunc_LR.m | 79 ++ .../logisticExample/mylogsumexp.m | 8 + common/minFunc_2012/mexAll.m | 7 + common/minFunc_2012/minFunc/ArmijoBacktrack.m | 139 ++ common/minFunc_2012/minFunc/WolfeLineSearch.m | 359 +++++ .../minFunc/compiled/lbfgsAddC.mexa64 | Bin 0 -> 8184 bytes .../minFunc/compiled/lbfgsAddC.mexmaci64 | Bin 0 -> 8800 bytes .../minFunc/compiled/lbfgsAddC.mexw64 | Bin 0 -> 7168 bytes .../minFunc/compiled/lbfgsC.mexa64 | Bin 0 -> 8187 bytes .../minFunc/compiled/lbfgsC.mexglx | Bin 0 -> 7733 bytes .../minFunc/compiled/lbfgsC.mexmac | Bin 0 -> 9500 bytes .../minFunc/compiled/lbfgsC.mexmaci | Bin 0 -> 12660 bytes .../minFunc/compiled/lbfgsC.mexmaci64 | Bin 0 -> 8800 bytes .../minFunc/compiled/lbfgsC.mexw32 | Bin 0 -> 7168 bytes .../minFunc/compiled/lbfgsC.mexw64 | Bin 0 -> 9728 bytes .../minFunc/compiled/lbfgsProdC.mexa64 | Bin 0 -> 8322 bytes .../minFunc/compiled/lbfgsProdC.mexmaci64 | Bin 0 -> 8992 bytes .../minFunc/compiled/lbfgsProdC.mexw64 | Bin 0 -> 10752 bytes .../minFunc/compiled/mcholC.mexa64 | Bin 0 -> 12621 bytes .../minFunc/compiled/mcholC.mexglx | Bin 0 -> 13046 bytes .../minFunc/compiled/mcholC.mexmac | Bin 0 -> 9936 bytes .../minFunc/compiled/mcholC.mexmaci64 | Bin 0 -> 13184 bytes .../minFunc/compiled/mcholC.mexw32 | Bin 0 -> 8192 bytes .../minFunc/compiled/mcholC.mexw64 | Bin 0 -> 12288 bytes common/minFunc_2012/minFunc/conjGrad.m | 86 ++ common/minFunc_2012/minFunc/dampedUpdate.m | 43 + common/minFunc_2012/minFunc/isLegal.m | 2 + common/minFunc_2012/minFunc/lbfgs.m | 40 + common/minFunc_2012/minFunc/lbfgsAdd.m | 32 + common/minFunc_2012/minFunc/lbfgsProd.m | 32 + common/minFunc_2012/minFunc/lbfgsUpdate.m | 21 + common/minFunc_2012/minFunc/mchol.m | 60 + common/minFunc_2012/minFunc/mcholinc.m | 25 + common/minFunc_2012/minFunc/mex/lbfgsAddC.c | 34 + common/minFunc_2012/minFunc/mex/lbfgsC.c | 115 ++ common/minFunc_2012/minFunc/mex/lbfgsProdC.c | 93 ++ common/minFunc_2012/minFunc/mex/mcholC.c | 199 +++ common/minFunc_2012/minFunc/minFunc.m | 1170 +++++++++++++++++ .../minFunc/minFunc_processInputOptions.m | 168 +++ common/minFunc_2012/minFunc/polyinterp.m | 144 ++ common/minFunc_2012/minFunc/precondDiag.m | 2 + common/minFunc_2012/minFunc/precondTriu.m | 2 + common/minFunc_2012/minFunc/precondTriuDiag.m | 2 + common/minFunc_2012/minFunc/taylorModel.m | 38 + common/minFunc_2012/rosenbrock.m | 4 + common/samplePatches.m | 25 + ex1/binary_classifier_accuracy.m | 3 + ex1/ex1_load_mnist.m | 50 + ex1/ex1a_linreg.m | 90 ++ ex1/ex1b_logreg.m | 56 + ex1/ex1c_softmax.m | 58 + ex1/grad_check.m | 28 + ex1/housing.data | 506 +++++++ ex1/linear_regression.m | 24 + ex1/linear_regression_vec.m | 20 + ex1/logistic_regression.m | 24 + ex1/logistic_regression_vec.m | 21 + ex1/multi_classifier_accuracy.m | 5 + ex1/sigmoid.m | 2 + ex1/softmax_regression_vec.m | 32 + multilayer_supervised/initialize_weights.m | 22 + multilayer_supervised/load_preprocess_mnist.m | 14 + multilayer_supervised/params2stack.m | 43 + multilayer_supervised/run_train.m | 58 + multilayer_supervised/stack2params.m | 38 + multilayer_supervised/supervised_dnn_cost.m | 42 + pca/pca_gen.m | 119 ++ rica/bsxfunwrap.m | 28 + rica/l2rowscaled.m | 7 + rica/l2rowscaledg.m | 18 + rica/preprocess.m | 5 + rica/readme.txt | 3 + rica/removeDC.m | 20 + rica/runSoftICA.m | 49 + rica/showBases.m | 7 + rica/softICACost.m | 15 + rica/zca2.m | 10 + stl/feedfowardRICA.m | 75 ++ stl/stlExercise.m | 162 +++ 102 files changed, 5635 insertions(+) create mode 100644 cnn/cnnConvolve.m create mode 100644 cnn/cnnCost.m create mode 100644 cnn/cnnExercise.m create mode 100644 cnn/cnnInitParams.m create mode 100644 cnn/cnnParamsToStack.m create mode 100644 cnn/cnnPool.m create mode 100644 cnn/cnnTrain.m create mode 100644 cnn/computeNumericalGradient.m create mode 100644 cnn/minFuncSGD.m create mode 100644 common/display_network.m create mode 100644 common/loadMNISTImages.m create mode 100644 common/loadMNISTLabels.m create mode 100644 common/minFunc_2012/autoDif/autoGrad.m create mode 100644 common/minFunc_2012/autoDif/autoHess.m create mode 100644 common/minFunc_2012/autoDif/autoHv.m create mode 100644 common/minFunc_2012/autoDif/autoTensor.m create mode 100644 common/minFunc_2012/autoDif/derivativeCheck.m create mode 100644 common/minFunc_2012/autoDif/fastDerivativeCheck.m create mode 100644 common/minFunc_2012/example_derivativeCheck.m create mode 100644 common/minFunc_2012/example_minFunc.m create mode 100644 common/minFunc_2012/logisticExample/LogisticDiagPrecond.m create mode 100644 common/minFunc_2012/logisticExample/LogisticHv.m create mode 100644 common/minFunc_2012/logisticExample/LogisticLoss.m create mode 100644 common/minFunc_2012/logisticExample/example_minFunc_LR.m create mode 100644 common/minFunc_2012/logisticExample/mylogsumexp.m create mode 100644 common/minFunc_2012/mexAll.m create mode 100644 common/minFunc_2012/minFunc/ArmijoBacktrack.m create mode 100644 common/minFunc_2012/minFunc/WolfeLineSearch.m create mode 100755 common/minFunc_2012/minFunc/compiled/lbfgsAddC.mexa64 create mode 100755 common/minFunc_2012/minFunc/compiled/lbfgsAddC.mexmaci64 create mode 100644 common/minFunc_2012/minFunc/compiled/lbfgsAddC.mexw64 create mode 100755 common/minFunc_2012/minFunc/compiled/lbfgsC.mexa64 create mode 100755 common/minFunc_2012/minFunc/compiled/lbfgsC.mexglx create mode 100644 common/minFunc_2012/minFunc/compiled/lbfgsC.mexmac create mode 100644 common/minFunc_2012/minFunc/compiled/lbfgsC.mexmaci create mode 100644 common/minFunc_2012/minFunc/compiled/lbfgsC.mexmaci64 create mode 100644 common/minFunc_2012/minFunc/compiled/lbfgsC.mexw32 create mode 100644 common/minFunc_2012/minFunc/compiled/lbfgsC.mexw64 create mode 100755 common/minFunc_2012/minFunc/compiled/lbfgsProdC.mexa64 create mode 100755 common/minFunc_2012/minFunc/compiled/lbfgsProdC.mexmaci64 create mode 100644 common/minFunc_2012/minFunc/compiled/lbfgsProdC.mexw64 create mode 100755 common/minFunc_2012/minFunc/compiled/mcholC.mexa64 create mode 100644 common/minFunc_2012/minFunc/compiled/mcholC.mexglx create mode 100644 common/minFunc_2012/minFunc/compiled/mcholC.mexmac create mode 100644 common/minFunc_2012/minFunc/compiled/mcholC.mexmaci64 create mode 100644 common/minFunc_2012/minFunc/compiled/mcholC.mexw32 create mode 100644 common/minFunc_2012/minFunc/compiled/mcholC.mexw64 create mode 100644 common/minFunc_2012/minFunc/conjGrad.m create mode 100644 common/minFunc_2012/minFunc/dampedUpdate.m create mode 100644 common/minFunc_2012/minFunc/isLegal.m create mode 100644 common/minFunc_2012/minFunc/lbfgs.m create mode 100644 common/minFunc_2012/minFunc/lbfgsAdd.m create mode 100644 common/minFunc_2012/minFunc/lbfgsProd.m create mode 100644 common/minFunc_2012/minFunc/lbfgsUpdate.m create mode 100644 common/minFunc_2012/minFunc/mchol.m create mode 100644 common/minFunc_2012/minFunc/mcholinc.m create mode 100644 common/minFunc_2012/minFunc/mex/lbfgsAddC.c create mode 100644 common/minFunc_2012/minFunc/mex/lbfgsC.c create mode 100644 common/minFunc_2012/minFunc/mex/lbfgsProdC.c create mode 100644 common/minFunc_2012/minFunc/mex/mcholC.c create mode 100644 common/minFunc_2012/minFunc/minFunc.m create mode 100644 common/minFunc_2012/minFunc/minFunc_processInputOptions.m create mode 100644 common/minFunc_2012/minFunc/polyinterp.m create mode 100644 common/minFunc_2012/minFunc/precondDiag.m create mode 100644 common/minFunc_2012/minFunc/precondTriu.m create mode 100644 common/minFunc_2012/minFunc/precondTriuDiag.m create mode 100644 common/minFunc_2012/minFunc/taylorModel.m create mode 100644 common/minFunc_2012/rosenbrock.m create mode 100644 common/samplePatches.m create mode 100644 ex1/binary_classifier_accuracy.m create mode 100644 ex1/ex1_load_mnist.m create mode 100644 ex1/ex1a_linreg.m create mode 100644 ex1/ex1b_logreg.m create mode 100644 ex1/ex1c_softmax.m create mode 100644 ex1/grad_check.m create mode 100644 ex1/housing.data create mode 100644 ex1/linear_regression.m create mode 100644 ex1/linear_regression_vec.m create mode 100644 ex1/logistic_regression.m create mode 100644 ex1/logistic_regression_vec.m create mode 100644 ex1/multi_classifier_accuracy.m create mode 100644 ex1/sigmoid.m create mode 100644 ex1/softmax_regression_vec.m create mode 100644 multilayer_supervised/initialize_weights.m create mode 100644 multilayer_supervised/load_preprocess_mnist.m create mode 100644 multilayer_supervised/params2stack.m create mode 100644 multilayer_supervised/run_train.m create mode 100644 multilayer_supervised/stack2params.m create mode 100644 multilayer_supervised/supervised_dnn_cost.m create mode 100644 pca/pca_gen.m create mode 100644 rica/bsxfunwrap.m create mode 100644 rica/l2rowscaled.m create mode 100644 rica/l2rowscaledg.m create mode 100644 rica/preprocess.m create mode 100644 rica/readme.txt create mode 100644 rica/removeDC.m create mode 100644 rica/runSoftICA.m create mode 100644 rica/showBases.m create mode 100644 rica/softICACost.m create mode 100644 rica/zca2.m create mode 100644 stl/feedfowardRICA.m create mode 100644 stl/stlExercise.m diff --git a/cnn/cnnConvolve.m b/cnn/cnnConvolve.m new file mode 100644 index 0000000..0c793b7 --- /dev/null +++ b/cnn/cnnConvolve.m @@ -0,0 +1,70 @@ +function convolvedFeatures = cnnConvolve(filterDim, numFilters, images, W, b) +%cnnConvolve Returns the convolution of the features given by W and b with +%the given images +% +% Parameters: +% filterDim - filter (feature) dimension +% numFilters - number of feature maps +% images - large images to convolve with, matrix in the form +% images(r, c, image number) +% W, b - W, b for features from the sparse autoencoder +% W is of shape (filterDim,filterDim,numFilters) +% b is of shape (numFilters,1) +% +% Returns: +% convolvedFeatures - matrix of convolved features in the form +% convolvedFeatures(imageRow, imageCol, featureNum, imageNum) + +numImages = size(images, 3); +imageDim = size(images, 1); +convDim = imageDim - filterDim + 1; + +convolvedFeatures = zeros(convDim, convDim, numFilters, numImages); + +% Instructions: +% Convolve every filter with every image here to produce the +% (imageDim - filterDim + 1) x (imageDim - filterDim + 1) x numFeatures x numImages +% matrix convolvedFeatures, such that +% convolvedFeatures(imageRow, imageCol, featureNum, imageNum) is the +% value of the convolved featureNum feature for the imageNum image over +% the region (imageRow, imageCol) to (imageRow + filterDim - 1, imageCol + filterDim - 1) +% +% Expected running times: +% Convolving with 100 images should take less than 30 seconds +% Convolving with 5000 images should take around 2 minutes +% (So to save time when testing, you should convolve with less images, as +% described earlier) + + +for imageNum = 1:numImages + for filterNum = 1:numFilters + + % convolution of image with feature matrix + convolvedImage = zeros(convDim, convDim); + % Obtain the feature (filterDim x filterDim) needed during the convolution + %%% YOUR CODE HERE %%% + + % Flip the feature matrix because of the definition of convolution, as explained later + filter = rot90(squeeze(filter),2); + + % Obtain the image + im = squeeze(images(:, :, imageNum)); + + % Convolve "filter" with "im", adding the result to convolvedImage + % be sure to do a 'valid' convolution + %%% YOUR CODE HERE %%% + + + % Add the bias unit + % Then, apply the sigmoid function to get the hidden activation + %%% YOUR CODE HERE %%% + + + + convolvedFeatures(:, :, filterNum, imageNum) = convolvedImage; + end +end + + +end + diff --git a/cnn/cnnCost.m b/cnn/cnnCost.m new file mode 100644 index 0000000..1feebf8 --- /dev/null +++ b/cnn/cnnCost.m @@ -0,0 +1,137 @@ +function [cost, grad, preds] = cnnCost(theta,images,labels,numClasses,... + filterDim,numFilters,poolDim,pred) +% Calcualte cost and gradient for a single layer convolutional neural +% network followed by a softmax layer with cross entropy objective. +% +% Parameters: +% theta - unrolled parameter vector +% images - stores images in imageDim x imageDim x numImges array +% numClasses - number of classes to predict +% filterDim - dimension of convolutional filter +% numFilters - number of convolutional filters +% poolDim - dimension of pooling area +% pred - boolean only forward propagate and return predictions +% +% +% Returns: +% cost - cross entropy cost +% grad - gradient with respect to theta (if pred==False) +% preds - list of predictions for each example (if pred==True) + + +if ~exist('pred','var') + pred = false; +end; + + +imageDim = size(images,1); % height/width of image +numImages = size(images,3); % number of images + +%% Reshape parameters and setup gradient matrices + +% Wc is filterDim x filterDim x numFilters parameter matrix +% bc is the corresponding bias + +% Wd is numClasses x hiddenSize parameter matrix where hiddenSize is the +% number of output units from the convolutional layer +% bd is corresponding bias +[Wc, Wd, bc, bd] = cnnParamsToStack(theta,imageDim,filterDim,numFilters,... + poolDim,numClasses); + +% Same sizes as Wc,Wd,bc,bd. Used to hold gradient w.r.t above params. +Wc_grad = zeros(size(Wc)); +Wd_grad = zeros(size(Wd)); +bc_grad = zeros(size(bc)); +bd_grad = zeros(size(bd)); + + +%%====================================================================== +%% STEP 1a: Forward Propagation +% In this step you will forward propagate the input through the +% convolutional and subsampling (mean pooling) layers. You will then use +% the responses from the convolution and pooling layer as the input to a +% standard softmax layer. + +%% Convolutional Layer +% For each image and each filter, convolve the image with the filter, add +% the bias and apply the sigmoid nonlinearity. Then subsample the +% convolved activations with mean pooling. Store the results of the +% convolution in activations and the results of the pooling in +% activationsPooled. You will need to save the convolved activations for +% backpropagation. +convDim = imageDim-filterDim+1; % dimension of convolved output +outputDim = (convDim)/poolDim; % dimension of subsampled output + +% convDim x convDim x numFilters x numImages tensor for storing activations +activations = zeros(convDim,convDim,numFilters,numImages); + +% outputDim x outputDim x numFilters x numImages tensor for storing +% subsampled activations +activationsPooled = zeros(outputDim,outputDim,numFilters,numImages); + +%%% YOUR CODE HERE %%% + + +% Reshape activations into 2-d matrix, hiddenSize x numImages, +% for Softmax layer +activationsPooled = reshape(activationsPooled,[],numImages); + +%% Softmax Layer +% Forward propagate the pooled activations calculated above into a +% standard softmax layer. For your convenience we have reshaped +% activationPooled into a hiddenSize x numImages matrix. Store the +% results in probs. + +% numClasses x numImages for storing probability that each image belongs to +% each class. +probs = zeros(numClasses,numImages); + +%%% YOUR CODE HERE %%% + + +%%====================================================================== +%% STEP 1b: Calculate Cost +% In this step you will use the labels given as input and the probs +% calculate above to evaluate the cross entropy objective. Store your +% results in cost. + +cost = 0; % save objective into cost + +%%% YOUR CODE HERE %%% + + +% Makes predictions given probs and returns without backproagating errors. +if pred + [~,preds] = max(probs,[],1); + preds = preds'; + grad = 0; + return; +end; + +%%====================================================================== +%% STEP 1c: Backpropagation +% Backpropagate errors through the softmax and convolutional/subsampling +% layers. Store the errors for the next step to calculate the gradient. +% Backpropagating the error w.r.t the softmax layer is as usual. To +% backpropagate through the pooling layer, you will need to upsample the +% error with respect to the pooling layer for each filter and each image. +% Use the kron function and a matrix of ones to do this upsampling +% quickly. + +%%% YOUR CODE HERE %%% + +%%====================================================================== +%% STEP 1c: Gradient Calculation +% After backpropagating the errors above, we can use them to calculate the +% gradient with respect to all the parameters. The gradient w.r.t the +% softmax layer is calculated as usual. To calculate the gradient w.r.t. +% a filter in the convolutional layer, convolve the backpropagated error +% for that fileter with each image and aggregate over images. + +%%% YOUR CODE HERE %%% + + +%% Unroll gradient into grad vector for minFunc +grad = [Wc_grad(:) ; Wd_grad(:) ; bc_grad(:) ; bd_grad(:)]; + +end \ No newline at end of file diff --git a/cnn/cnnExercise.m b/cnn/cnnExercise.m new file mode 100644 index 0000000..43f0808 --- /dev/null +++ b/cnn/cnnExercise.m @@ -0,0 +1,106 @@ +%% Convolution and Pooling Exercise + +% Instructions +% ------------ +% +% This file contains code that helps you get started on the +% convolution and pooling exercise. In this exercise, you will only +% need to modify cnnConvolve.m and cnnPool.m. You will not need to modify +% this file. + +%%====================================================================== +%% STEP 0: Initialization and Load Data +% Here we initialize some parameters used for the exercise. + +imageDim = 28; % image dimension + +filterDim = 8; % filter dimension +numFilters = 100; % number of feature maps + +numImages = 60000; % number of images + +poolDim = 3; % dimension of pooling region + +% Here we load MNIST training images +addpath ../common/; +images = loadMNISTImages('../common/train-images-idx3-ubyte'); +images = reshape(images,imageDim,imageDim,numImages); + +W = randn(filterDim,filterDim,numFilters); +b = rand(numFilters); + +%%====================================================================== +%% STEP 1: Implement and test convolution +% In this step, you will implement the convolution and test it on +% on a small part of the data set to ensure that you have implemented +% this step correctly. + +%% STEP 1a: Implement convolution +% Implement convolution in the function cnnConvolve in cnnConvolve.m + +%% Use only the first 8 images for testing +convImages = images(:, :, 1:8); + +% NOTE: Implement cnnConvolve in cnnConvolve.m first! +convolvedFeatures = cnnConvolve(filterDim, numFilters, convImages, W, b); + +%% STEP 1b: Checking your convolution +% To ensure that you have convolved the features correctly, we have +% provided some code to compare the results of your convolution with +% activations from the sparse autoencoder + +% For 1000 random points +for i = 1:1000 + filterNum = randi([1, numFilters]); + imageNum = randi([1, 8]); + imageRow = randi([1, imageDim - filterDim + 1]); + imageCol = randi([1, imageDim - filterDim + 1]); + + patch = convImages(imageRow:imageRow + filterDim - 1, imageCol:imageCol + filterDim - 1, imageNum); + + feature = sum(sum(patch.*W(:,:,filterNum)))+b(filterNum); + feature = 1./(1+exp(-feature)); + + if abs(feature - convolvedFeatures(imageRow, imageCol,filterNum, imageNum)) > 1e-9 + fprintf('Convolved feature does not match test feature\n'); + fprintf('Filter Number : %d\n', filterNum); + fprintf('Image Number : %d\n', imageNum); + fprintf('Image Row : %d\n', imageRow); + fprintf('Image Column : %d\n', imageCol); + fprintf('Convolved feature : %0.5f\n', convolvedFeatures(imageRow, imageCol, filterNum, imageNum)); + fprintf('Test feature : %0.5f\n', feature); + error('Convolved feature does not match test feature'); + end +end + +disp('Congratulations! Your convolution code passed the test.'); + +%%====================================================================== +%% STEP 2: Implement and test pooling +% Implement pooling in the function cnnPool in cnnPool.m + +%% STEP 2a: Implement pooling +% NOTE: Implement cnnPool in cnnPool.m first! +pooledFeatures = cnnPool(poolDim, convolvedFeatures); + +%% STEP 2b: Checking your pooling +% To ensure that you have implemented pooling, we will use your pooling +% function to pool over a test matrix and check the results. + +testMatrix = reshape(1:64, 8, 8); +expectedMatrix = [mean(mean(testMatrix(1:4, 1:4))) mean(mean(testMatrix(1:4, 5:8))); ... + mean(mean(testMatrix(5:8, 1:4))) mean(mean(testMatrix(5:8, 5:8))); ]; + +testMatrix = reshape(testMatrix, 8, 8, 1, 1); + +pooledFeatures = squeeze(cnnPool(4, testMatrix)); + +if ~isequal(pooledFeatures, expectedMatrix) + disp('Pooling incorrect'); + disp('Expected'); + disp(expectedMatrix); + disp('Got'); + disp(pooledFeatures); +else + disp('Congratulations! Your pooling code passed the test.'); +end diff --git a/cnn/cnnInitParams.m b/cnn/cnnInitParams.m new file mode 100644 index 0000000..1ae7045 --- /dev/null +++ b/cnn/cnnInitParams.m @@ -0,0 +1,43 @@ +function theta = cnnInitParams(imageDim,filterDim,numFilters,... + poolDim,numClasses) +% Initialize parameters for a single layer convolutional neural +% network followed by a softmax layer. +% +% Parameters: +% imageDim - height/width of image +% filterDim - dimension of convolutional filter +% numFilters - number of convolutional filters +% poolDim - dimension of pooling area +% numClasses - number of classes to predict +% +% +% Returns: +% theta - unrolled parameter vector with initialized weights + +%% Initialize parameters randomly based on layer sizes. +assert(filterDim < imageDim,'filterDim must be less that imageDim'); + +Wc = 1e-1*randn(filterDim,filterDim,numFilters); + +outDim = imageDim - filterDim + 1; % dimension of convolved image + +% assume outDim is multiple of poolDim +assert(mod(outDim,poolDim)==0,... + 'poolDim must divide imageDim - filterDim + 1'); + +outDim = outDim/poolDim; +hiddenSize = outDim^2*numFilters; + +r = sqrt(6) / sqrt(numClasses+hiddenSize+1); % we'll choose weights uniformly from the interval [-r, r] +Wd = rand(numClasses, hiddenSize) * 2 * r - r; + +bc = zeros(numFilters, 1); +bd = zeros(numClasses, 1); + +% Convert weights and bias gradients to the vector form. +% This step will "unroll" (flatten and concatenate together) all +% your parameters into a vector, which can then be used with minFunc. +theta = [Wc(:) ; Wd(:) ; bc(:) ; bd(:)]; + +end + diff --git a/cnn/cnnParamsToStack.m b/cnn/cnnParamsToStack.m new file mode 100644 index 0000000..3a36f68 --- /dev/null +++ b/cnn/cnnParamsToStack.m @@ -0,0 +1,39 @@ +function [Wc, Wd, bc, bd] = cnnParamsToStack(theta,imageDim,filterDim,... + numFilters,poolDim,numClasses) +% Converts unrolled parameters for a single layer convolutional neural +% network followed by a softmax layer into structured weight +% tensors/matrices and corresponding biases +% +% Parameters: +% theta - unrolled parameter vectore +% imageDim - height/width of image +% filterDim - dimension of convolutional filter +% numFilters - number of convolutional filters +% poolDim - dimension of pooling area +% numClasses - number of classes to predict +% +% +% Returns: +% Wc - filterDim x filterDim x numFilters parameter matrix +% Wd - numClasses x hiddenSize parameter matrix, hiddenSize is +% calculated as numFilters*((imageDim-filterDim+1)/poolDim)^2 +% bc - bias for convolution layer of size numFilters x 1 +% bd - bias for dense layer of size hiddenSize x 1 + +outDim = (imageDim - filterDim + 1)/poolDim; +hiddenSize = outDim^2*numFilters; + +%% Reshape theta +indS = 1; +indE = filterDim^2*numFilters; +Wc = reshape(theta(indS:indE),filterDim,filterDim,numFilters); +indS = indE+1; +indE = indE+hiddenSize*numClasses; +Wd = reshape(theta(indS:indE),numClasses,hiddenSize); +indS = indE+1; +indE = indE+numFilters; +bc = theta(indS:indE); +bd = theta(indE+1:end); + + +end \ No newline at end of file diff --git a/cnn/cnnPool.m b/cnn/cnnPool.m new file mode 100644 index 0000000..9e039f7 --- /dev/null +++ b/cnn/cnnPool.m @@ -0,0 +1,36 @@ +function pooledFeatures = cnnPool(poolDim, convolvedFeatures) +%cnnPool Pools the given convolved features +% +% Parameters: +% poolDim - dimension of pooling region +% convolvedFeatures - convolved features to pool (as given by cnnConvolve) +% convolvedFeatures(imageRow, imageCol, featureNum, imageNum) +% +% Returns: +% pooledFeatures - matrix of pooled features in the form +% pooledFeatures(poolRow, poolCol, featureNum, imageNum) +% + +numImages = size(convolvedFeatures, 4); +numFilters = size(convolvedFeatures, 3); +convolvedDim = size(convolvedFeatures, 1); + +pooledFeatures = zeros(convolvedDim / poolDim, ... + convolvedDim / poolDim, numFilters, numImages); + +% Instructions: +% Now pool the convolved features in regions of poolDim x poolDim, +% to obtain the +% (convolvedDim/poolDim) x (convolvedDim/poolDim) x numFeatures x numImages +% matrix pooledFeatures, such that +% pooledFeatures(poolRow, poolCol, featureNum, imageNum) is the +% value of the featureNum feature for the imageNum image pooled over the +% corresponding (poolRow, poolCol) pooling region. +% +% Use mean pooling here. + +%%% YOUR CODE HERE %%% + + +end + diff --git a/cnn/cnnTrain.m b/cnn/cnnTrain.m new file mode 100644 index 0000000..5ac045a --- /dev/null +++ b/cnn/cnnTrain.m @@ -0,0 +1,104 @@ +%% Convolution Neural Network Exercise + +% Instructions +% ------------ +% +% This file contains code that helps you get started in building a single. +% layer convolutional nerual network. In this exercise, you will only +% need to modify cnnCost.m and cnnminFuncSGD.m. You will not need to +% modify this file. + +%%====================================================================== +%% STEP 0: Initialize Parameters and Load Data +% Here we initialize some parameters used for the exercise. + +% Configuration +imageDim = 28; +numClasses = 10; % Number of classes (MNIST images fall into 10 classes) +filterDim = 9; % Filter size for conv layer (should divide imageDim) +numFilters = 10; % Number of filters for conv layer +poolDim = 2; % Pooling dimension, (should divide imageDim-filterDim+1) + +% Load MNIST Train +addpath ../common/; +images = loadMNISTImages('../common/train-images-idx3-ubyte'); +images = reshape(images,imageDim,imageDim,[]); +labels = loadMNISTLabels('../common/train-labels-idx1-ubyte'); +labels(labels==0) = 10; % Remap 0 to 10 + +% Initialize Parameters +theta = cnnInitParams(imageDim,filterDim,numFilters,poolDim,numClasses); + +%%====================================================================== +%% STEP 1: Implement convNet Objective +% Implement the function cnnCost.m. + +%%====================================================================== +%% STEP 2: Gradient Check +% Use the file computeNumericalGradient.m to check the gradient +% calculation for your cnnCost.m function. You may need to add the +% appropriate path or copy the file to this directory. + +DEBUG=true; % set this to true to check gradient +if DEBUG + % To speed up gradient checking, we will use a reduced network and + % a debugging data set + db_numFilters = 2; + db_filterDim = 9; + db_poolDim = 5; + db_images = images(:,:,1:10); + db_labels = labels(1:10); + db_theta = cnnInitParams(imageDim,db_filterDim,db_numFilters,... + db_poolDim,numClasses); + + [cost grad] = cnnCost(db_theta,db_images,db_labels,numClasses,... + db_filterDim,db_numFilters,db_poolDim); + + + % Check gradients + numGrad = computeNumericalGradient( @(x) cnnCost(x,db_images,... + db_labels,numClasses,db_filterDim,... + db_numFilters,db_poolDim), db_theta); + + % Use this to visually compare the gradients side by side + disp([numGrad grad]); + + diff = norm(numGrad-grad)/norm(numGrad+grad); + % Should be small. In our implementation, these values are usually + % less than 1e-9. + disp(diff); + + assert(diff < 1e-9,... + 'Difference too large. Check your gradient computation again'); + +end; + +%%====================================================================== +%% STEP 3: Learn Parameters +% Implement minFuncSGD.m, then train the model. + +options.epochs = 3; +options.minibatch = 256; +options.alpha = 1e-1; +options.momentum = .95; + +opttheta = minFuncSGD(@(x,y,z) cnnCost(x,y,z,numClasses,filterDim,... + numFilters,poolDim),theta,images,labels,options); + +%%====================================================================== +%% STEP 4: Test +% Test the performance of the trained model using the MNIST test set. Your +% accuracy should be above 97% after 3 epochs of training + +testImages = loadMNISTImages('../common/t10k-images-idx3-ubyte'); +testImages = reshape(testImages,imageDim,imageDim,[]); +testLabels = loadMNISTLabels('../common/t10k-labels-idx1-ubyte'); +testLabels(testLabels==0) = 10; % Remap 0 to 10 + +[~,cost,preds]=cnnCost(opttheta,testImages,testLabels,numClasses,... + filterDim,numFilters,poolDim,true); + +acc = sum(preds==testLabels)/length(preds); + +% Accuracy should be around 97.4% after 3 epochs +fprintf('Accuracy is %f\n',acc); diff --git a/cnn/computeNumericalGradient.m b/cnn/computeNumericalGradient.m new file mode 100644 index 0000000..a9a5cef --- /dev/null +++ b/cnn/computeNumericalGradient.m @@ -0,0 +1,41 @@ +function numgrad = computeNumericalGradient(J, theta) +% numgrad = computeNumericalGradient(J, theta) +% theta: a vector of parameters +% J: a function that outputs a real-number. Calling y = J(theta) will return the +% function value at theta. + +% Initialize numgrad with zeros +numgrad = zeros(size(theta)); + +%% ---------- YOUR CODE HERE -------------------------------------- +% Instructions: +% Implement numerical gradient checking, and return the result in numgrad. +% (See Section 2.3 of the lecture notes.) +% You should write code so that numgrad(i) is (the numerical approximation to) the +% partial derivative of J with respect to the i-th input argument, evaluated at theta. +% I.e., numgrad(i) should be the (approximately) the partial derivative of J with +% respect to theta(i). +% +% Hint: You will probably want to compute the elements of numgrad one at a time. + +epsilon = 1e-4; + +for i =1:length(numgrad) + oldT = theta(i); + theta(i)=oldT+epsilon; + pos = J(theta); + theta(i)=oldT-epsilon; + neg = J(theta); + numgrad(i) = (pos-neg)/(2*epsilon); + theta(i)=oldT; + if mod(i,100)==0 + fprintf('Done with %d\n',i); + end; +end; + + + + + +%% --------------------------------------------------------------- +end diff --git a/cnn/minFuncSGD.m b/cnn/minFuncSGD.m new file mode 100644 index 0000000..b62d518 --- /dev/null +++ b/cnn/minFuncSGD.m @@ -0,0 +1,80 @@ +function [opttheta] = minFuncSGD(funObj,theta,data,labels,... + options) +% Runs stochastic gradient descent with momentum to optimize the parameters +% for the given objective. +% +% Parameters: +% funObj - function handle which accepts as input theta, data, labels +% and returns cost and gradient w.r.t to theta. +% theta - unrolled parameter vector +% data - stores data in m x n x numExamples tensor +% labels - corresponding labels in numExamples x 1 vector +% options - struct to store specific options for optimization +% +% Returns: +% opttheta - optimized parameter vector +% +% Options (* required) +% epochs* - number of epochs through data +% alpha* - initial learning rate +% minibatch* - size of minibatch +% momentum - momentum constant, defualts to 0.9 + + +%%====================================================================== +%% Setup +assert(all(isfield(options,{'epochs','alpha','minibatch'})),... + 'Some options not defined'); +if ~isfield(options,'momentum') + options.momentum = 0.9; +end; +epochs = options.epochs; +alpha = options.alpha; +minibatch = options.minibatch; +m = length(labels); % training set size +% Setup for momentum +mom = 0.5; +momIncrease = 10; +velocity = zeros(size(theta)); + +%%====================================================================== +%% SGD loop +it = 0; +for e = 1:epochs + + % randomly permute indices of data for quick minibatch sampling + rp = randperm(m); + + for s=1:minibatch:(m-minibatch+1) + it = it + 1; + + % increase momentum after momIncrease iterations + if it == momIncrease + mom = options.momentum; + end; + + % get next randomly selected minibatch + mb_data = data(:,:,rp(s:s+minibatch-1)); + mb_labels = labels(rp(s:s+minibatch-1)); + + % evaluate the objective function on the next minibatch + [cost grad] = funObj(theta,mb_data,mb_labels); + + % Instructions: Add in the weighted velocity vector to the gradient + % evaluated above. Then update the current weights theta according + % to the sgd update rule with alpha as the learning rate. Finally + % update the velocity vector. + + %%% YOUR CODE HERE %%% + + fprintf('Epoch %d: Cost on iteration %d is %f\n',e,it,cost); + end; + + % aneal learning rate by factor of two after each epoch + alpha = alpha/2.0; + +end; + +opttheta = theta; + +end \ No newline at end of file diff --git a/common/display_network.m b/common/display_network.m new file mode 100644 index 0000000..b68500f --- /dev/null +++ b/common/display_network.m @@ -0,0 +1,101 @@ +function [h, array] = display_network(A, opt_normalize, opt_graycolor, cols, opt_colmajor) +% This function visualizes filters in matrix A. Each column of A is a +% filter. We will reshape each column into a square image and visualizes +% on each cell of the visualization panel. +% All other parameters are optional, usually you do not need to worry +% about it. +% opt_normalize: whether we need to normalize the filter so that all of +% them can have similar contrast. Default value is true. +% opt_graycolor: whether we use gray as the heat map. Default is true. +% cols: how many columns are there in the display. Default value is the +% squareroot of the number of columns in A. +% opt_colmajor: you can switch convention to row major for A. In that +% case, each row of A is a filter. Default value is false. +warning off all + +if ~exist('opt_normalize', 'var') || isempty(opt_normalize) + opt_normalize= true; +end + +if ~exist('opt_graycolor', 'var') || isempty(opt_graycolor) + opt_graycolor= true; +end + +if ~exist('opt_colmajor', 'var') || isempty(opt_colmajor) + opt_colmajor = false; +end + +% rescale +A = A - mean(A(:)); + +if opt_graycolor, colormap(gray); end + +% compute rows, cols +[L M]=size(A); +sz=sqrt(L); +buf=1; +if ~exist('cols', 'var') + if floor(sqrt(M))^2 ~= M + n=ceil(sqrt(M)); + while mod(M, n)~=0 && n<1.2*sqrt(M), n=n+1; end + m=ceil(M/n); + else + n=sqrt(M); + m=n; + end +else + n = cols; + m = ceil(M/n); +end + +array=-ones(buf+m*(sz+buf),buf+n*(sz+buf)); + +if ~opt_graycolor + array = 0.1.* array; +end + + +if ~opt_colmajor + k=1; + for i=1:m + for j=1:n + if k>M, + continue; + end + clim=max(abs(A(:,k))); + if opt_normalize + array(buf+(i-1)*(sz+buf)+(1:sz),buf+(j-1)*(sz+buf)+(1:sz))=reshape(A(:,k),sz,sz)/clim; + else + array(buf+(i-1)*(sz+buf)+(1:sz),buf+(j-1)*(sz+buf)+(1:sz))=reshape(A(:,k),sz,sz)/max(abs(A(:))); + end + k=k+1; + end + end +else + k=1; + for j=1:n + for i=1:m + if k>M, + continue; + end + clim=max(abs(A(:,k))); + if opt_normalize + array(buf+(i-1)*(sz+buf)+(1:sz),buf+(j-1)*(sz+buf)+(1:sz))=reshape(A(:,k),sz,sz)/clim; + else + array(buf+(i-1)*(sz+buf)+(1:sz),buf+(j-1)*(sz+buf)+(1:sz))=reshape(A(:,k),sz,sz); + end + k=k+1; + end + end +end + +if opt_graycolor + h=imagesc(array,'EraseMode','none',[-1 1]); +else + h=imagesc(array,'EraseMode','none',[-1 1]); +end +axis image off + +drawnow; + +warning on all diff --git a/common/loadMNISTImages.m b/common/loadMNISTImages.m new file mode 100644 index 0000000..6eb2304 --- /dev/null +++ b/common/loadMNISTImages.m @@ -0,0 +1,26 @@ +function images = loadMNISTImages(filename) +%loadMNISTImages returns a 28x28x[number of MNIST images] matrix containing +%the raw MNIST images + +fp = fopen(filename, 'rb'); +assert(fp ~= -1, ['Could not open ', filename, '']); + +magic = fread(fp, 1, 'int32', 0, 'ieee-be'); +assert(magic == 2051, ['Bad magic number in ', filename, '']); + +numImages = fread(fp, 1, 'int32', 0, 'ieee-be'); +numRows = fread(fp, 1, 'int32', 0, 'ieee-be'); +numCols = fread(fp, 1, 'int32', 0, 'ieee-be'); + +images = fread(fp, inf, 'unsigned char'); +images = reshape(images, numCols, numRows, numImages); +images = permute(images,[2 1 3]); + +fclose(fp); + +% Reshape to #pixels x #examples +images = reshape(images, size(images, 1) * size(images, 2), size(images, 3)); +% Convert to double and rescale to [0,1] +images = double(images) / 255; + +end diff --git a/common/loadMNISTLabels.m b/common/loadMNISTLabels.m new file mode 100644 index 0000000..06c07a4 --- /dev/null +++ b/common/loadMNISTLabels.m @@ -0,0 +1,19 @@ +function labels = loadMNISTLabels(filename) +%loadMNISTLabels returns a [number of MNIST images]x1 matrix containing +%the labels for the MNIST images + +fp = fopen(filename, 'rb'); +assert(fp ~= -1, ['Could not open ', filename, '']); + +magic = fread(fp, 1, 'int32', 0, 'ieee-be'); +assert(magic == 2049, ['Bad magic number in ', filename, '']); + +numLabels = fread(fp, 1, 'int32', 0, 'ieee-be'); + +labels = fread(fp, inf, 'unsigned char'); + +assert(size(labels,1) == numLabels, 'Mismatch in label count'); + +fclose(fp); + +end diff --git a/common/minFunc_2012/autoDif/autoGrad.m b/common/minFunc_2012/autoDif/autoGrad.m new file mode 100644 index 0000000..b9f5b4a --- /dev/null +++ b/common/minFunc_2012/autoDif/autoGrad.m @@ -0,0 +1 @@ +function [f,g] = autoGrad(x,type,funObj,varargin) % [f,g] = autoGrad(x,useComplex,funObj,varargin) % % Numerically compute gradient of objective function from function values % % type = % 1 - forward-differencing (p+1 evaluations) % 2 - central-differencing (more accurate, but requires 2p evaluations) % 3 - complex-step derivative (most accurate and only requires p evaluations, but only works for certain objectives) p = length(x); if type == 1 % Use Finite Differencing f = funObj(x,varargin{:}); mu = 2*sqrt(1e-12)*(1+norm(x)); diff = zeros(p,1); for j = 1:p e_j = zeros(p,1); e_j(j) = 1; diff(j,1) = funObj(x + mu*e_j,varargin{:}); end g = (diff-f)/mu; elseif type == 3 % Use Complex Differentials mu = 1e-150; diff = zeros(p,1); for j = 1:p e_j = zeros(p,1); e_j(j) = 1; diff(j,1) = funObj(x + mu*i*e_j,varargin{:}); end f = mean(real(diff)); g = imag(diff)/mu; else % Use Central Differencing mu = 2*sqrt(1e-12)*(1+norm(x)); diff1 = zeros(p,1); diff2 = zeros(p,1); for j = 1:p e_j = zeros(p,1); e_j(j) = 1; diff1(j,1) = funObj(x + mu*e_j,varargin{:}); diff2(j,1) = funObj(x - mu*e_j,varargin{:}); end f = mean([diff1;diff2]); g = (diff1 - diff2)/(2*mu); end if 0 % DEBUG CODE [fReal gReal] = funObj(x,varargin{:}); [fReal f] [gReal g] diff pause; end \ No newline at end of file diff --git a/common/minFunc_2012/autoDif/autoHess.m b/common/minFunc_2012/autoDif/autoHess.m new file mode 100644 index 0000000..7916fe2 --- /dev/null +++ b/common/minFunc_2012/autoDif/autoHess.m @@ -0,0 +1 @@ +function [f,g,H] = autoHess(x,type,funObj,varargin) % Numerically compute Hessian of objective function from gradient values p = length(x); if type == 1 % Use finite differencing mu = 2*sqrt(1e-12)*(1+norm(x)); [f,g] = funObj(x,varargin{:}); diff = zeros(p); for j = 1:p e_j = zeros(p,1); e_j(j) = 1; [f diff(:,j)] = funObj(x + mu*e_j,varargin{:}); end H = (diff-repmat(g,[1 p]))/mu; elseif type == 3 % Use Complex Differentials mu = 1e-150; diff = zeros(p); for j = 1:p e_j = zeros(p,1); e_j(j) = 1; [f(j) diff(:,j)] = funObj(x + mu*i*e_j,varargin{:}); end f = mean(real(f)); g = mean(real(diff),2); H = imag(diff)/mu; else % Use central differencing mu = 2*sqrt(1e-12)*(1+norm(x)); f1 = zeros(p,1); f2 = zeros(p,1); diff1 = zeros(p); diff2 = zeros(p); for j = 1:p e_j = zeros(p,1); e_j(j) = 1; [f1(j) diff1(:,j)] = funObj(x + mu*e_j,varargin{:}); [f2(j) diff2(:,j)] = funObj(x - mu*e_j,varargin{:}); end f = mean([f1;f2]); g = mean([diff1 diff2],2); H = (diff1-diff2)/(2*mu); end % Make sure H is symmetric H = (H+H')/2; if 0 % DEBUG CODE [fReal gReal HReal] = funObj(x,varargin{:}); [fReal f] [gReal g] [HReal H] pause; end \ No newline at end of file diff --git a/common/minFunc_2012/autoDif/autoHv.m b/common/minFunc_2012/autoDif/autoHv.m new file mode 100644 index 0000000..c714347 --- /dev/null +++ b/common/minFunc_2012/autoDif/autoHv.m @@ -0,0 +1,13 @@ +function [Hv] = autoHv(v,x,g,useComplex,funObj,varargin) +% [Hv] = autoHv(v,x,g,useComplex,funObj,varargin) +% +% Numerically compute Hessian-vector product H*v of funObj(x,varargin{:}) +% based on gradient values + +if useComplex + mu = 1e-150i; +else + mu = 2*sqrt(1e-12)*(1+norm(x))/norm(v); +end +[f,finDif] = funObj(x + v*mu,varargin{:}); +Hv = (finDif-g)/mu; \ No newline at end of file diff --git a/common/minFunc_2012/autoDif/autoTensor.m b/common/minFunc_2012/autoDif/autoTensor.m new file mode 100644 index 0000000..84521e0 --- /dev/null +++ b/common/minFunc_2012/autoDif/autoTensor.m @@ -0,0 +1 @@ +function [f,g,H,T] = autoTensor(x,type,funObj,varargin) % [f,g,H,T] = autoTensor(x,useComplex,funObj,varargin) % Numerically compute Tensor of 3rd-derivatives of objective function from Hessian values p = length(x); if type == 2 mu = 2*sqrt(1e-12)*(1+norm(x)); f1 = zeros(p,1); f2 = zeros(p,2); g1 = zeros(p); g2 = zeros(p); diff = zeros(p,p,p); for j = 1:p e_j = zeros(p,1); e_j(j) = 1; [f1(j) g1(:,j) diff1(:,:,j)] = funObj(x + mu*e_j,varargin{:}); [f2(j) g2(:,j) diff2(:,:,j)] = funObj(x + mu*e_j,varargin{:}); end f = mean([f1;f2]); g = mean([g1 g2],2); H = mean(cat(3,diff1,diff2),3); T = (diff1-diff2)/(2*mu); elseif type == 3 % Use Complex Differentials mu = 1e-150; f = zeros(p,1); g = zeros(p); diff = zeros(p,p,p); for j = 1:p e_j = zeros(p,1); e_j(j) = 1; [f(j) g(:,j) diff(:,:,j)] = funObj(x + mu*i*e_j,varargin{:}); end f = mean(real(f)); g = mean(real(g),2); H = mean(real(diff),3); T = imag(diff)/mu; else % Use finite differencing mu = 2*sqrt(1e-12)*(1+norm(x)); [f,g,H] = funObj(x,varargin{:}); diff = zeros(p,p,p); for j = 1:p e_j = zeros(p,1); e_j(j) = 1; [~ ~ diff(:,:,j)] = funObj(x + mu*e_j,varargin{:}); end T = (diff-repmat(H,[1 1 p]))/mu; end \ No newline at end of file diff --git a/common/minFunc_2012/autoDif/derivativeCheck.m b/common/minFunc_2012/autoDif/derivativeCheck.m new file mode 100644 index 0000000..82c0365 --- /dev/null +++ b/common/minFunc_2012/autoDif/derivativeCheck.m @@ -0,0 +1,42 @@ +function diff = derivativeCheck(funObj,x,order,type,varargin) +% diff = derivativeCheck(funObj,x,order,useComplex,varargin) +% +% type = 1 (simple forward-difference) +% type = 2 (central differencing - default) +% type = 3 (complex-step deriative) + +if nargin < 3 + order = 1; % Only check gradient by default + if nargin < 4 + type = 2; % Use central-differencing by default + end +end + +if order == 2 + [f,g,H] = funObj(x,varargin{:}); + + fprintf('Checking Hessian...\n'); + [f2,g2,H2] = autoHess(x,type,funObj,varargin{:}); + + fprintf('Max difference between user and numerical hessian: %e\n',max(abs(H(:)-H2(:)))); + if max(abs(H(:)-H2(:))) > 1e-4 + H + H2 + diff = abs(H-H2) + pause; + end +else + [f,g] = funObj(x,varargin{:}); + + fprintf('Checking Gradient...\n'); + [f2,g2] = autoGrad(x,type,funObj,varargin{:}); + + fprintf('Max difference between user and numerical gradient: %e\n',max(abs(g-g2))); + if max(abs(g-g2)) > 1e-4 + fprintf('User NumDif:\n'); + [g g2] + diff = abs(g-g2) + pause + end +end + diff --git a/common/minFunc_2012/autoDif/fastDerivativeCheck.m b/common/minFunc_2012/autoDif/fastDerivativeCheck.m new file mode 100644 index 0000000..6f7c0f1 --- /dev/null +++ b/common/minFunc_2012/autoDif/fastDerivativeCheck.m @@ -0,0 +1 @@ +function diff = derivativeCheck(funObj,x,order,type,varargin) % diff = fastDerivativeCheck(funObj,x,order,varargin) if nargin < 3 order = 1; % Only check gradient by default if nargin < 4 type = 2; % Use central-differencing by default end end p = length(x); d = sign(randn(p,1)); if order == 2 fprintf('Checking Hessian-vector product along random direction:\n'); [f,g,H] = funObj(x,varargin{:}); Hv = H*d; if type == 1 % Use Finite Differencing mu = 2*sqrt(1e-12)*(1+norm(x))/(1+norm(x)); [diff,diffa] = funObj(x+d*mu,varargin{:}); Hv2 = (diffa-g)/mu; elseif type == 3 % Use Complex Differentials mu = 1e-150; [diff,diffa] = funObj(x+d*mu*i,varargin{:}); Hv2 = imag(diffa-g)/mu; else % Use Central Differencing mu = 2*sqrt(1e-12)*(1+norm(x))/(1+norm(x)); [diff1,diffa] = funObj(x+d*mu,varargin{:}); [diff2,diffb] = funObj(x-d*mu,varargin{:}); Hv2 = (diffa-diffb)/(2*mu); end fprintf('Max difference between user and numerical Hessian-vector product: %e\n',max(abs(Hv-Hv2))); else fprintf('Checking Gradient along random direction:\n'); [f,g] = funObj(x,varargin{:}); gtd = g'*d; if type == 1 % Use Finite Differencing mu = 2*sqrt(1e-12)*(1+norm(x))/(1+norm(x)); diff = funObj(x+d*mu,varargin{:}); gtd2 = (diff-f)/mu; elseif type == 3 % Use Complex Differentials mu = 1e-150; [diff,diffa] = funObj(x+d*mu*i,varargin{:}); gtd2 = imag(diff)/mu; else % Use Central Differencing mu = 2*sqrt(1e-12)*(1+norm(x))/(1+norm(x)); diff1 = funObj(x+d*mu,varargin{:}); diff2 = funObj(x-d*mu,varargin{:}); gtd2 = (diff1-diff2)/(2*mu); end fprintf('Max difference between user and numerical directional-derivative: %e\n',max(abs(gtd-gtd2))); end \ No newline at end of file diff --git a/common/minFunc_2012/example_derivativeCheck.m b/common/minFunc_2012/example_derivativeCheck.m new file mode 100644 index 0000000..2713c14 --- /dev/null +++ b/common/minFunc_2012/example_derivativeCheck.m @@ -0,0 +1,52 @@ +clear all + +nInst = 250; +nVars = 10; +X = randn(nInst,nVars); +w = randn(nVars,1); +y = sign(X*w + randn(nInst,1)); + +wTest = randn(nVars,1); + +fprintf('Testing gradient using forward-differencing...\n'); +order = 1; +derivativeCheck(@LogisticLoss,wTest,order,1,X,y); + +fprintf('Testing gradient using central-differencing...\n'); +derivativeCheck(@LogisticLoss,wTest,order,2,X,y); + +fprintf('Testing gradient using complex-step derivative...\n'); +derivativeCheck(@LogisticLoss,wTest,order,3,X,y); + +fprintf('\n\n\n'); +pause + +fprintf('Testing Hessian using forward-differencing\n'); +order = 2; +derivativeCheck(@LogisticLoss,wTest,order,1,X,y); + +fprintf('Testing Hessian using central-differencing\n'); +order = 2; +derivativeCheck(@LogisticLoss,wTest,order,2,X,y); + +fprintf('Testing Hessian using complex-step derivative\n'); +order = 2; +derivativeCheck(@LogisticLoss,wTest,order,3,X,y); + +fprintf('\n\n\n'); +pause + +fprintf('Testing gradient using fastDerivativeCheck...\n'); +order = 1; +fastDerivativeCheck(@LogisticLoss,wTest,order,1,X,y); +fastDerivativeCheck(@LogisticLoss,wTest,order,2,X,y); +fastDerivativeCheck(@LogisticLoss,wTest,order,3,X,y); + +fprintf('\n\n\n'); +pause + +fprintf('Testing Hessian using fastDerivativeCheck...\n'); +order = 2; +fastDerivativeCheck(@LogisticLoss,wTest,order,1,X,y); +fastDerivativeCheck(@LogisticLoss,wTest,order,2,X,y); +fastDerivativeCheck(@LogisticLoss,wTest,order,3,X,y); diff --git a/common/minFunc_2012/example_minFunc.m b/common/minFunc_2012/example_minFunc.m new file mode 100644 index 0000000..0d7e455 --- /dev/null +++ b/common/minFunc_2012/example_minFunc.m @@ -0,0 +1,75 @@ +% Runs various limited-memory solvers on 2D rosenbrock function for 25 +% function evaluations +maxFunEvals = 25; + +fprintf('Result after %d evaluations of limited-memory solvers on 2D rosenbrock:\n',maxFunEvals); + +fprintf('---------------------------------------\n'); +fprintf('x1 = %.4f, x2 = %.4f (starting point)\n',0,0); +fprintf('x1 = %.4f, x2 = %.4f (optimal solution)\n',1,1); +fprintf('---------------------------------------\n'); + +if exist('minimize') == 2 + % Minimize.m - conjugate gradient method + x = minimize([0 0]', 'rosenbrock', -maxFunEvals); + fprintf('x1 = %.4f, x2 = %.4f (minimize.m by C. Rasmussen)\n',x(1),x(2)); +end + +options = []; +options.display = 'none'; +options.maxFunEvals = maxFunEvals; + +% Steepest Descent +options.Method = 'sd'; +x = minFunc(@rosenbrock,[0 0]',options); +fprintf('x1 = %.4f, x2 = %.4f (minFunc with steepest descent)\n',x(1),x(2)); + +% Cyclic Steepest Descent +options.Method = 'csd'; +x = minFunc(@rosenbrock,[0 0]',options); +fprintf('x1 = %.4f, x2 = %.4f (minFunc with cyclic steepest descent)\n',x(1),x(2)); + +% Barzilai & Borwein +options.Method = 'bb'; +options.bbType = 1; +x = minFunc(@rosenbrock,[0 0]',options); +fprintf('x1 = %.4f, x2 = %.4f (minFunc with spectral gradient descent)\n',x(1),x(2)); + +% Hessian-Free Newton +options.Method = 'newton0'; +x = minFunc(@rosenbrock,[0 0]',options); +fprintf('x1 = %.4f, x2 = %.4f (minFunc with Hessian-free Newton)\n',x(1),x(2)); + +% Hessian-Free Newton w/ L-BFGS preconditioner +options.Method = 'pnewton0'; +x = minFunc(@rosenbrock,[0 0]',options); +fprintf('x1 = %.4f, x2 = %.4f (minFunc with preconditioned Hessian-free Newton)\n',x(1),x(2)); + +% Conjugate Gradient +options.Method = 'cg'; +x = minFunc(@rosenbrock,[0 0]',options); +fprintf('x1 = %.4f, x2 = %.4f (minFunc with conjugate gradient)\n',x(1),x(2)); + +% Scaled conjugate Gradient +options.Method = 'scg'; +x = minFunc(@rosenbrock,[0 0]',options); +fprintf('x1 = %.4f, x2 = %.4f (minFunc with scaled conjugate gradient)\n',x(1),x(2)); + +% Preconditioned Conjugate Gradient +options.Method = 'pcg'; +x = minFunc(@rosenbrock,[0 0]',options); +fprintf('x1 = %.4f, x2 = %.4f (minFunc with preconditioned conjugate gradient)\n',x(1),x(2)); + +% Default: L-BFGS (default) +options.Method = 'lbfgs'; +x = minFunc(@rosenbrock,[0 0]',options); +fprintf('x1 = %.4f, x2 = %.4f (minFunc with limited-memory BFGS - default)\n',x(1),x(2)); + +fprintf('---------------------------------------\n'); + + + + + + + diff --git a/common/minFunc_2012/logisticExample/LogisticDiagPrecond.m b/common/minFunc_2012/logisticExample/LogisticDiagPrecond.m new file mode 100644 index 0000000..ea62557 --- /dev/null +++ b/common/minFunc_2012/logisticExample/LogisticDiagPrecond.m @@ -0,0 +1,20 @@ +function [m] = LogisticHv(v,w,X,y) +% v(feature,1) - vector that we will apply diagonal preconditioner to +% w(feature,1) +% X(instance,feature) +% y(instance,1) + +sig = 1./(1+exp(-y.*(X*w))); + +% Compute diagonals of Hessian +sig = sig.*(1-sig); +for i = 1:length(w) + h(i,1) = (sig.*X(:,i))'*X(:,i); +end + +% Apply preconditioner +m = v./h; + +% Exact preconditioner +%H = X'*diag(sig.*(1-sig))*X; +%m = H\v; diff --git a/common/minFunc_2012/logisticExample/LogisticHv.m b/common/minFunc_2012/logisticExample/LogisticHv.m new file mode 100644 index 0000000..92cad59 --- /dev/null +++ b/common/minFunc_2012/logisticExample/LogisticHv.m @@ -0,0 +1,8 @@ +function [Hv] = LogisticHv(v,w,X,y) +% v(feature,1) - vector that we will multiply Hessian by +% w(feature,1) +% X(instance,feature) +% y(instance,1) + +sig = 1./(1+exp(-y.*(X*w))); +Hv = X.'*(sig.*(1-sig).*(X*v)); diff --git a/common/minFunc_2012/logisticExample/LogisticLoss.m b/common/minFunc_2012/logisticExample/LogisticLoss.m new file mode 100644 index 0000000..64344cd --- /dev/null +++ b/common/minFunc_2012/logisticExample/LogisticLoss.m @@ -0,0 +1,36 @@ +function [nll,g,H,T] = LogisticLoss(w,X,y) +% w(feature,1) +% X(instance,feature) +% y(instance,1) + +[n,p] = size(X); + +Xw = X*w; +yXw = y.*Xw; + +nll = sum(mylogsumexp([zeros(n,1) -yXw])); + +if nargout > 1 + if nargout > 2 + sig = 1./(1+exp(-yXw)); + g = -X.'*(y.*(1-sig)); + else + %g = -X.'*(y./(1+exp(yXw))); + g = -(X.'*(y./(1+exp(yXw)))); + end +end + +if nargout > 2 + H = X.'*diag(sparse(sig.*(1-sig)))*X; +end + +if nargout > 3 + T = zeros(p,p,p); + for j1 = 1:p + for j2 = 1:p + for j3 = 1:p + T(j1,j2,j3) = sum(y(:).^3.*X(:,j1).*X(:,j2).*X(:,j3).*sig.*(1-sig).*(1-2*sig)); + end + end + end +end \ No newline at end of file diff --git a/common/minFunc_2012/logisticExample/example_minFunc_LR.m b/common/minFunc_2012/logisticExample/example_minFunc_LR.m new file mode 100644 index 0000000..d808ab8 --- /dev/null +++ b/common/minFunc_2012/logisticExample/example_minFunc_LR.m @@ -0,0 +1,79 @@ +clear all + +nInst = 500; +nVars = 200; +X = randn(nInst,nVars); +w = randn(nVars,1); +y = sign(X*w + randn(nInst,1)); + +w_init = zeros(nVars,1); +funObj = @(w)LogisticLoss(w,X,y); + +fprintf('\nRunning Steepest Descent\n'); +options.Method = 'sd'; +minFunc(@LogisticLoss,w_init,options,X,y); +pause; + +fprintf('\nRunning Cyclic Steepest Descent\n'); +options.Method = 'csd'; +minFunc(@LogisticLoss,w_init,options,X,y); +pause; + +fprintf('\nRunning Conjugate Gradient\n'); +options.Method = 'cg'; +minFunc(@LogisticLoss,w_init,options,X,y); +pause; + +fprintf('\nRunning Scaled Conjugate Gradient\n'); +options.Method = 'scg'; +minFunc(@LogisticLoss,w_init,options,X,y); +pause; + +fprintf('\nRunning Preconditioned Conjugate Gradient (Diagonal preconditioner)\n'); +options.Method = 'pcg'; +options.precFunc = @LogisticDiagPrecond; +minFunc(@LogisticLoss,w_init,options,X,y); +pause; + +fprintf('\nRunning Preconditioned Conjugate Gradient (L-BFGS preconditioner)\n'); +options.Method = 'pcg'; +options.precFunc = []; +minFunc(@LogisticLoss,w_init,options,X,y); +pause; + +fprintf('\nRunning Hessian-Free Newton w/ numerical Hessian-Vector products\n'); +options.Method = 'newton0'; +minFunc(@LogisticLoss,w_init,options,X,y); +pause; + +fprintf('\nRunning Preconditioned Hessian-Free Newton w/ numerical Hessian-Vector products (Diagonal preconditioner)\n'); +options.Method = 'pnewton0'; +options.precFunc = @LogisticDiagPrecond; +minFunc(@LogisticLoss,w_init,options,X,y); +pause; + +fprintf('\nRunning Preconditioned Hessian-Free Newton w/ numerical Hessian-Vector products (L-BFGS preconditioner)\n'); +options.Method = 'pnewton0'; +options.precFunc = []; +minFunc(@LogisticLoss,w_init,options,X,y); +pause; + +fprintf('\nRunning Hessian-Free Newton w/ analytic Hessian-Vector products\n'); +options.Method = 'newton0'; +options.HvFunc = @LogisticHv; +minFunc(@LogisticLoss,w_init,options,X,y); +pause; + +fprintf('\nRunning Preconditioned Hessian-Free Newton w/ analytic Hessian-Vector products (Diagonal preconditioner)\n'); +options.Method = 'pnewton0'; +options.HvFunc = @LogisticHv; +options.precFunc = @LogisticDiagPrecond; +minFunc(@LogisticLoss,w_init,options,X,y); +pause; + +fprintf('\nRunning Preconditioned Hessian-Free Newton w/ analytic Hessian-Vector products (L-BFGS preconditioner)\n'); +options.Method = 'pnewton0'; +options.precFunc = []; +options.HvFunc = @LogisticHv; +minFunc(@LogisticLoss,w_init,options,X,y); +pause; \ No newline at end of file diff --git a/common/minFunc_2012/logisticExample/mylogsumexp.m b/common/minFunc_2012/logisticExample/mylogsumexp.m new file mode 100644 index 0000000..3a49c20 --- /dev/null +++ b/common/minFunc_2012/logisticExample/mylogsumexp.m @@ -0,0 +1,8 @@ +function lse = mylogsumexp(b) +% does logsumexp across columns +B = max(b,[],2); +lse = log(sum(exp(b-repmat(B,[1 size(b,2)])),2))+B; + +% Old version that used repmatC +%lse = log(sum(exp(b-repmatC(B,[1 size(b,2)])),2))+B; +end \ No newline at end of file diff --git a/common/minFunc_2012/mexAll.m b/common/minFunc_2012/mexAll.m new file mode 100644 index 0000000..985ef84 --- /dev/null +++ b/common/minFunc_2012/mexAll.m @@ -0,0 +1,7 @@ +% minFunc +fprintf('Compiling minFunc files...\n'); +mex -outdir minFunc/compiled minFunc/mex/mcholC.c +mex -outdir minFunc/compiled minFunc/mex/lbfgsC.c +mex -outdir minFunc/compiled minFunc/mex/lbfgsAddC.c +mex -outdir minFunc/compiled minFunc/mex/lbfgsProdC.c + diff --git a/common/minFunc_2012/minFunc/ArmijoBacktrack.m b/common/minFunc_2012/minFunc/ArmijoBacktrack.m new file mode 100644 index 0000000..9b5587a --- /dev/null +++ b/common/minFunc_2012/minFunc/ArmijoBacktrack.m @@ -0,0 +1,139 @@ +function [t,x_new,f_new,g_new,funEvals,H] = ArmijoBacktrack(... + x,t,d,f,fr,g,gtd,c1,LS_interp,LS_multi,progTol,debug,doPlot,saveHessianComp,funObj,varargin) +% [t,x_new,f_new,g_new,funEvals,H] = ArmijoBacktrack(... +% x,t,d,f,fr,g,gtd,c1,LS_interp,LS_multi,progTol,debug,doPlot,saveHessianComp,funObj,varargin) +% +% Backtracking linesearch to satisfy Armijo condition +% +% Inputs: +% x: starting location +% t: initial step size +% d: descent direction +% f: function value at starting location +% fr: reference function value (usually funObj(x)) +% gtd: directional derivative at starting location +% c1: sufficient decrease parameter +% debug: display debugging information +% LS_interp: type of interpolation +% progTol: minimum allowable step length +% doPlot: do a graphical display of interpolation +% funObj: objective function +% varargin: parameters of objective function +% +% Outputs: +% t: step length +% f_new: function value at x+t*d +% g_new: gradient value at x+t*d +% funEvals: number function evaluations performed by line search +% H: Hessian at initial guess (only computed if requested) +% +% recet change: LS changed to LS_interp and LS_multi + +% Evaluate the Objective and Gradient at the Initial Step +if nargout == 6 + [f_new,g_new,H] = funObj(x + t*d,varargin{:}); +else + [f_new,g_new] = funObj(x+t*d,varargin{:}); +end +funEvals = 1; + +while f_new > fr + c1*t*gtd || ~isLegal(f_new) + temp = t; + + if LS_interp == 0 || ~isLegal(f_new) + % Ignore value of new point + if debug + fprintf('Fixed BT\n'); + end + t = 0.5*t; + elseif LS_interp == 1 || ~isLegal(g_new) + % Use function value at new point, but not its derivative + if funEvals < 2 || LS_multi == 0 || ~isLegal(f_prev) + % Backtracking w/ quadratic interpolation based on two points + if debug + fprintf('Quad BT\n'); + end + t = polyinterp([0 f gtd; t f_new sqrt(-1)],doPlot,0,t); + else + % Backtracking w/ cubic interpolation based on three points + if debug + fprintf('Cubic BT\n'); + end + t = polyinterp([0 f gtd; t f_new sqrt(-1); t_prev f_prev sqrt(-1)],doPlot,0,t); + end + else + % Use function value and derivative at new point + + if funEvals < 2 || LS_multi == 0 || ~isLegal(f_prev) + % Backtracking w/ cubic interpolation w/ derivative + if debug + fprintf('Grad-Cubic BT\n'); + end + t = polyinterp([0 f gtd; t f_new g_new'*d],doPlot,0,t); + elseif ~isLegal(g_prev) + % Backtracking w/ quartic interpolation 3 points and derivative + % of two + if debug + fprintf('Grad-Quartic BT\n'); + end + t = polyinterp([0 f gtd; t f_new g_new'*d; t_prev f_prev sqrt(-1)],doPlot,0,t); + else + % Backtracking w/ quintic interpolation of 3 points and derivative + % of two + if debug + fprintf('Grad-Quintic BT\n'); + end + t = polyinterp([0 f gtd; t f_new g_new'*d; t_prev f_prev g_prev'*d],doPlot,0,t); + end + end + + % Adjust if change in t is too small/large + if t < temp*1e-3 + if debug + fprintf('Interpolated Value Too Small, Adjusting\n'); + end + t = temp*1e-3; + elseif t > temp*0.6 + if debug + fprintf('Interpolated Value Too Large, Adjusting\n'); + end + t = temp*0.6; + end + + % Store old point if doing three-point interpolation + if LS_multi + f_prev = f_new; + t_prev = temp; + if LS_interp == 2 + g_prev = g_new; + end + end + + if ~saveHessianComp && nargout == 6 + [f_new,g_new,H] = funObj(x + t*d,varargin{:}); + else + [f_new,g_new] = funObj(x + t*d,varargin{:}); + end + funEvals = funEvals+1; + + % Check whether step size has become too small + if max(abs(t*d)) <= progTol + if debug + fprintf('Backtracking Line Search Failed\n'); + end + t = 0; + f_new = f; + g_new = g; + break; + end +end + +% Evaluate Hessian at new point +if nargout == 6 && funEvals > 1 && saveHessianComp + [f_new,g_new,H] = funObj(x + t*d,varargin{:}); + funEvals = funEvals+1; +end + +x_new = x + t*d; + +end diff --git a/common/minFunc_2012/minFunc/WolfeLineSearch.m b/common/minFunc_2012/minFunc/WolfeLineSearch.m new file mode 100644 index 0000000..30b684b --- /dev/null +++ b/common/minFunc_2012/minFunc/WolfeLineSearch.m @@ -0,0 +1,359 @@ +function [t,f_new,g_new,funEvals,H] = WolfeLineSearch(... + x,t,d,f,g,gtd,c1,c2,LS_interp,LS_multi,maxLS,progTol,debug,doPlot,saveHessianComp,funObj,varargin) +% +% Bracketing Line Search to Satisfy Wolfe Conditions +% +% Inputs: +% x: starting location +% t: initial step size +% d: descent direction +% f: function value at starting location +% g: gradient at starting location +% gtd: directional derivative at starting location +% c1: sufficient decrease parameter +% c2: curvature parameter +% debug: display debugging information +% LS_interp: type of interpolation +% maxLS: maximum number of iterations +% progTol: minimum allowable step length +% doPlot: do a graphical display of interpolation +% funObj: objective function +% varargin: parameters of objective function +% +% Outputs: +% t: step length +% f_new: function value at x+t*d +% g_new: gradient value at x+t*d +% funEvals: number function evaluations performed by line search +% H: Hessian at initial guess (only computed if requested + +% Evaluate the Objective and Gradient at the Initial Step +if nargout == 5 + [f_new,g_new,H] = funObj(x + t*d,varargin{:}); +else + [f_new,g_new] = funObj(x+t*d,varargin{:}); +end +funEvals = 1; +gtd_new = g_new'*d; + +% Bracket an Interval containing a point satisfying the +% Wolfe criteria + +LSiter = 0; +t_prev = 0; +f_prev = f; +g_prev = g; +gtd_prev = gtd; +nrmD = max(abs(d)); +done = 0; + +while LSiter < maxLS + + %% Bracketing Phase + if ~isLegal(f_new) || ~isLegal(g_new) + if debug + fprintf('Extrapolated into illegal region, switching to Armijo line-search\n'); + end + t = (t + t_prev)/2; + % Do Armijo + if nargout == 5 + [t,x_new,f_new,g_new,armijoFunEvals,H] = ArmijoBacktrack(... + x,t,d,f,f,g,gtd,c1,LS_interp,LS_multi,progTol,debug,doPlot,saveHessianComp,... + funObj,varargin{:}); + else + [t,x_new,f_new,g_new,armijoFunEvals] = ArmijoBacktrack(... + x,t,d,f,f,g,gtd,c1,LS_interp,LS_multi,progTol,debug,doPlot,saveHessianComp,... + funObj,varargin{:}); + end + funEvals = funEvals + armijoFunEvals; + return; + end + + + if f_new > f + c1*t*gtd || (LSiter > 1 && f_new >= f_prev) + bracket = [t_prev t]; + bracketFval = [f_prev f_new]; + bracketGval = [g_prev g_new]; + break; + elseif abs(gtd_new) <= -c2*gtd + bracket = t; + bracketFval = f_new; + bracketGval = g_new; + done = 1; + break; + elseif gtd_new >= 0 + bracket = [t_prev t]; + bracketFval = [f_prev f_new]; + bracketGval = [g_prev g_new]; + break; + end + temp = t_prev; + t_prev = t; + minStep = t + 0.01*(t-temp); + maxStep = t*10; + if LS_interp <= 1 + if debug + fprintf('Extending Braket\n'); + end + t = maxStep; + elseif LS_interp == 2 + if debug + fprintf('Cubic Extrapolation\n'); + end + t = polyinterp([temp f_prev gtd_prev; t f_new gtd_new],doPlot,minStep,maxStep); + elseif LS_interp == 3 + t = mixedExtrap(temp,f_prev,gtd_prev,t,f_new,gtd_new,minStep,maxStep,debug,doPlot); + end + + f_prev = f_new; + g_prev = g_new; + gtd_prev = gtd_new; + if ~saveHessianComp && nargout == 5 + [f_new,g_new,H] = funObj(x + t*d,varargin{:}); + else + [f_new,g_new] = funObj(x + t*d,varargin{:}); + end + funEvals = funEvals + 1; + gtd_new = g_new'*d; + LSiter = LSiter+1; +end + +if LSiter == maxLS + bracket = [0 t]; + bracketFval = [f f_new]; + bracketGval = [g g_new]; +end + +%% Zoom Phase + +% We now either have a point satisfying the criteria, or a bracket +% surrounding a point satisfying the criteria +% Refine the bracket until we find a point satisfying the criteria +insufProgress = 0; +Tpos = 2; +LOposRemoved = 0; +while ~done && LSiter < maxLS + + % Find High and Low Points in bracket + [f_LO LOpos] = min(bracketFval); + HIpos = -LOpos + 3; + + % Compute new trial value + if LS_interp <= 1 || ~isLegal(bracketFval) || ~isLegal(bracketGval) + if debug + fprintf('Bisecting\n'); + end + t = mean(bracket); + elseif LS_interp == 2 + if debug + fprintf('Grad-Cubic Interpolation\n'); + end + t = polyinterp([bracket(1) bracketFval(1) bracketGval(:,1)'*d + bracket(2) bracketFval(2) bracketGval(:,2)'*d],doPlot); + else + % Mixed Case %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + nonTpos = -Tpos+3; + if LOposRemoved == 0 + oldLOval = bracket(nonTpos); + oldLOFval = bracketFval(nonTpos); + oldLOGval = bracketGval(:,nonTpos); + end + t = mixedInterp(bracket,bracketFval,bracketGval,d,Tpos,oldLOval,oldLOFval,oldLOGval,debug,doPlot); + end + + + % Test that we are making sufficient progress + if min(max(bracket)-t,t-min(bracket))/(max(bracket)-min(bracket)) < 0.1 + if debug + fprintf('Interpolation close to boundary'); + end + if insufProgress || t>=max(bracket) || t <= min(bracket) + if debug + fprintf(', Evaluating at 0.1 away from boundary\n'); + end + if abs(t-max(bracket)) < abs(t-min(bracket)) + t = max(bracket)-0.1*(max(bracket)-min(bracket)); + else + t = min(bracket)+0.1*(max(bracket)-min(bracket)); + end + insufProgress = 0; + else + if debug + fprintf('\n'); + end + insufProgress = 1; + end + else + insufProgress = 0; + end + + % Evaluate new point + if ~saveHessianComp && nargout == 5 + [f_new,g_new,H] = funObj(x + t*d,varargin{:}); + else + [f_new,g_new] = funObj(x + t*d,varargin{:}); + end + funEvals = funEvals + 1; + gtd_new = g_new'*d; + LSiter = LSiter+1; + + armijo = f_new < f + c1*t*gtd; + if ~armijo || f_new >= f_LO + % Armijo condition not satisfied or not lower than lowest + % point + bracket(HIpos) = t; + bracketFval(HIpos) = f_new; + bracketGval(:,HIpos) = g_new; + Tpos = HIpos; + else + if abs(gtd_new) <= - c2*gtd + % Wolfe conditions satisfied + done = 1; + elseif gtd_new*(bracket(HIpos)-bracket(LOpos)) >= 0 + % Old HI becomes new LO + bracket(HIpos) = bracket(LOpos); + bracketFval(HIpos) = bracketFval(LOpos); + bracketGval(:,HIpos) = bracketGval(:,LOpos); + if LS_interp == 3 + if debug + fprintf('LO Pos is being removed!\n'); + end + LOposRemoved = 1; + oldLOval = bracket(LOpos); + oldLOFval = bracketFval(LOpos); + oldLOGval = bracketGval(:,LOpos); + end + end + % New point becomes new LO + bracket(LOpos) = t; + bracketFval(LOpos) = f_new; + bracketGval(:,LOpos) = g_new; + Tpos = LOpos; + end + + if ~done && abs(bracket(1)-bracket(2))*nrmD < progTol + if debug + fprintf('Line-search bracket has been reduced below progTol\n'); + end + break; + end + +end + +%% +if LSiter == maxLS + if debug + fprintf('Line Search Exceeded Maximum Line Search Iterations\n'); + end +end + +[f_LO LOpos] = min(bracketFval); +t = bracket(LOpos); +f_new = bracketFval(LOpos); +g_new = bracketGval(:,LOpos); + + + +% Evaluate Hessian at new point +if nargout == 5 && funEvals > 1 && saveHessianComp + [f_new,g_new,H] = funObj(x + t*d,varargin{:}); + funEvals = funEvals + 1; +end + +end + + +%% +function [t] = mixedExtrap(x0,f0,g0,x1,f1,g1,minStep,maxStep,debug,doPlot); +alpha_c = polyinterp([x0 f0 g0; x1 f1 g1],doPlot,minStep,maxStep); +alpha_s = polyinterp([x0 f0 g0; x1 sqrt(-1) g1],doPlot,minStep,maxStep); +if alpha_c > minStep && abs(alpha_c - x1) < abs(alpha_s - x1) + if debug + fprintf('Cubic Extrapolation\n'); + end + t = alpha_c; +else + if debug + fprintf('Secant Extrapolation\n'); + end + t = alpha_s; +end +end + +%% +function [t] = mixedInterp(bracket,bracketFval,bracketGval,d,Tpos,oldLOval,oldLOFval,oldLOGval,debug,doPlot); + +% Mixed Case %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +nonTpos = -Tpos+3; + +gtdT = bracketGval(:,Tpos)'*d; +gtdNonT = bracketGval(:,nonTpos)'*d; +oldLOgtd = oldLOGval'*d; +if bracketFval(Tpos) > oldLOFval + alpha_c = polyinterp([oldLOval oldLOFval oldLOgtd + bracket(Tpos) bracketFval(Tpos) gtdT],doPlot); + alpha_q = polyinterp([oldLOval oldLOFval oldLOgtd + bracket(Tpos) bracketFval(Tpos) sqrt(-1)],doPlot); + if abs(alpha_c - oldLOval) < abs(alpha_q - oldLOval) + if debug + fprintf('Cubic Interpolation\n'); + end + t = alpha_c; + else + if debug + fprintf('Mixed Quad/Cubic Interpolation\n'); + end + t = (alpha_q + alpha_c)/2; + end +elseif gtdT'*oldLOgtd < 0 + alpha_c = polyinterp([oldLOval oldLOFval oldLOgtd + bracket(Tpos) bracketFval(Tpos) gtdT],doPlot); + alpha_s = polyinterp([oldLOval oldLOFval oldLOgtd + bracket(Tpos) sqrt(-1) gtdT],doPlot); + if abs(alpha_c - bracket(Tpos)) >= abs(alpha_s - bracket(Tpos)) + if debug + fprintf('Cubic Interpolation\n'); + end + t = alpha_c; + else + if debug + fprintf('Quad Interpolation\n'); + end + t = alpha_s; + end +elseif abs(gtdT) <= abs(oldLOgtd) + alpha_c = polyinterp([oldLOval oldLOFval oldLOgtd + bracket(Tpos) bracketFval(Tpos) gtdT],... + doPlot,min(bracket),max(bracket)); + alpha_s = polyinterp([oldLOval sqrt(-1) oldLOgtd + bracket(Tpos) bracketFval(Tpos) gtdT],... + doPlot,min(bracket),max(bracket)); + if alpha_c > min(bracket) && alpha_c < max(bracket) + if abs(alpha_c - bracket(Tpos)) < abs(alpha_s - bracket(Tpos)) + if debug + fprintf('Bounded Cubic Extrapolation\n'); + end + t = alpha_c; + else + if debug + fprintf('Bounded Secant Extrapolation\n'); + end + t = alpha_s; + end + else + if debug + fprintf('Bounded Secant Extrapolation\n'); + end + t = alpha_s; + end + + if bracket(Tpos) > oldLOval + t = min(bracket(Tpos) + 0.66*(bracket(nonTpos) - bracket(Tpos)),t); + else + t = max(bracket(Tpos) + 0.66*(bracket(nonTpos) - bracket(Tpos)),t); + end +else + t = polyinterp([bracket(nonTpos) bracketFval(nonTpos) gtdNonT + bracket(Tpos) bracketFval(Tpos) gtdT],doPlot); +end +end \ No newline at end of file diff --git a/common/minFunc_2012/minFunc/compiled/lbfgsAddC.mexa64 b/common/minFunc_2012/minFunc/compiled/lbfgsAddC.mexa64 new file mode 100755 index 0000000000000000000000000000000000000000..3da047b32dc624987448dd242f43567aa36f7541 GIT binary patch literal 8184 zcmeHMYit}>6~62BD~)4sLP{Nn;!%`HX-F40j)OyG;*DS9$a&Nc2m~|Um%SV9!`huK zPNg&kr?i$axB?+ns?7Vtkr7P|o=kLEfb-j7&=x^VpGLZX2c)0yN?GPq4{8{+V+%Ify z`Nm6EB~aqejR+A(rw_n(TM_%SCGegSIF(Do<4nTr>YsiHc=z4tIk|t1H%}LClf~6^ruY25QY()@EfDigg+L2CM=9&PaFN=v1r;1 zr#fQ(bUK_C@$f`glU;{M&sc` z8lGw6b4^Vm77fHFyy>K5!px1EkyO|p0`8S2LE;XHSYUK4?F)rEyol>RbWj}cIxTwo zdXIJ(2fPQoEnFiTbz_`Z1I>kzLcfEdzRfhgXq>;MEUB-`pnLTxAxKAw+L03ZTn44~ zrj%)jZlcjSFC`(GJeuTq+_St`XW`V`_93=p!_`!j{&#FRGFVG0KG|9|u~gMIoaPEA zj}7NJOuBj-u7-eg(QG)^ffRdfxV=33)Cbks3oytwpxLqyqfGkL&FUMuma`9OBDW`o z|GLI@a3n`4)bgDi(w+dx)YQw1QvM3bRHfy4Dfg31RaySIl#h{2Ram|x<)b81Q!dXz zrgG>r74K})g#O&TS(aOr#hBwXG})%XLFm(WeS-s;fIiiB4YtCp(P!Fn;IcoVD6^0L zvPu(uvwMC7f_V3#>3i%w{q=vk^{Mw=`r)OtR@c}`m7I6!(=tC_Zu?D`{)VzXyp+1T zP;nKk>x}QK!#76j8ZV-VWZyP|kYWf0FdJarOy($Q?XW)E)(ute4z<(pjk!6p@qNkn zMc)vLii*|uPQRtk9L}yj$mM#cZ}!fdug-n|UZ1`J-jlr}@i)NNXWy3m0(dR^D)BSt z_hw%K^S8SF^V40oNY-atevKk!BY-c?XKGM@ACmlnW$(!vU}l=0`|#QQGv{wj{nb@> z<-s)=h}rE`dhmDpOwEnn>6+WUGc`9se@{(uc;UjkXz+Se+BfX`n$Mtm*b~)>>O!5= zT8XMpt>(-fXz$NJsB47$el_}R2F<*h%iN>RJ3}$#_&S2}$nKO=-Uuf`T0E0BwLn;l zCd}poD4J`h`$?L^G=S#*oy)a?-h$f*C?%f9u4np%Yr^B&SyNR#htaniI`TUOpK&s* z^>o+PAFJE^_3BA+q~XbZ&AWFIydMv`U4#&GfdF5v=UZhRTPjaYReig1w&FYG-*uP$ z0zpZ)kv3XjqXjlvV50^8=Pf{6KP3)qNoz9W^kPBjS&QJU%}UQZS;qlqO%F5TNV+U6|Gm4-`TS2y-&G`Ii=_Q zOJy$}QTS51#V|ghWZs8HlpVjbaen{GS>@N1J+?ntAI=xsoRm*1J=$5y+>R?sE?4@G z9D0@Q|DV)qMf3Q9>X08RdR5T{MQMfSM_bMwm+^&9HYYk?{sH(OBs5Xh=vY;!j6}H*_umPm7u< zOY?L%MW>(w!BAdGXTUTSj`_)7apN&lc%z9Z{>?CsR9>9ZqR=Hne$y|!;fOJs^2ftQ zBm`fUgg0m=Q)%RHk+VT45z`-!29bXfVesdbv8_0PG^~TkIF4#&(Z>ESz@6}+G0Wqv z)OQOGt%ezk!VB(0j5+q_dB}9HvgCQi^MM1<`ytIi_UC!ZR8s?;@+BMg=lRnPjAFAt z&wHjk&tW1H&l{F?Ff^ywpXU)%&WFp-c1(xhPji-Kp2tk7ew3W$w@|#tf}uIi{yfi_ zYRZuOsq9>T1&cHoj!hV^J4{CiMB?(ZKiBI#bawwG5|Ehkx<~%hPfq{mfl;}5o#l0! zDdz(dnN)Zd#W%s({U_Brz*L?E^44qTeUHLs9sc}Xk?E47!`Y8Re$V00`wG)qHdNGE z{+ED^S|~J*tD4Iu{D=KaTk;<?kX8P=6(;}BeoWtU`1AhPqzcG(9Kb0r zLqQnVzg2x7YE}MR&r<$>fr9Ga#0HA8|2_wcjwD;D0yqVuWwJ9N99HC2WZ^8Fw1yd=72OW zUAQ@LwnMAL*SA9#@G{&pczB~$fzC+#wes`)pnDY;R=;+&p76YzP;urQaY~i{qKvau z+;Q~RixT&UNe&E|(kpmeMf&kI;8gxOwZD=rrPuKkYqz(g_Fm4}FK+`c6YIyVwT^ES zD~@vh3HC0W@_1dAVe$8O$=u87s(l#$Na4R-n=tW~&kyzK^FR^%RRAMj%L zP6tcidI@|GxW+5HNoqV6Hp3zB zp_b++1SyPYXo93>VI9~)nRxsh=hJnfQ)biM?>pXQuteD~zm|(U+R0qjZpTpIusq1o zHC_}^WqI?y>7)_yCqgk??%~5a)Hv>0*MGw3{PGFk@!k$$^z@xP>gzL3c6SeS4H|>K zqkUbt8+XpHZ>Ppu{ym!o80N$UV6KS z7ZsHzDY?e8K@jze1o1=kixor!{SdLH5vx*>`qeMO1r?-VYYXlA-tNrZZmyMnRM-b^ z-~8Updo%l+IoO+-A1?lJxs?#1i4YP5Z3kUyCS(>Y%1xlX8wjB^HoQM(ySxw43kk!J zlRz5M!m24XwUSwHyFT9{b_{afF$8gdm2DFsueu?n##Anm&r-uICI%YE_jZTVx|;#FR>_&Ye^d3 zx2Qj#(Ni>|kLRzDZzSYI@+m^>;jfn1DNw4V>3AWY(>V6d?-I*5!r~C)8sTz1AqROG zEv}P6Aeho2IVQVm`1-ht#dU&td2UKmQ`uBK{%ZMnI$Uo&wjNb4QL4ilahA%YS!fua z$P)2jT#@7fQ47f6FJaM7=Bku{$5WFDe)0O#!8Jm%97W;USezpu}46fiZlL zCBq!<^?VE9F3_b5t-@jT`@c?q{NB;K?>c_|4)8@liRApggJ7{87fB){4PXu|`Y}D) zRlqREWN9-YpMk~l2q+4!D^%>?{Tm0^ASH-zb9omfCkB0kpuA7sB$~~~Q(A#e#?5qp zHj~Je!muDR30{BAahf;qn&aZ8r^AN0fHQ;rMWcY@Lp97)a?6(ReeS@9IGZhjxZ@_0 zNos@#++j9XZe9XH9$wuWH#%jQS}y#cCy*25Tb}`+0iOY%0iOY%f&V=N^60M0ZTrqz zmON_p?18Vd(Cx(P*$ssSTgH^~ukt?m33*J8jVk4DmFeHRl=7kLK3V<3 zDT``(rFz^ccd6yCtM5ByNiCnKzU!2Gt8YT#Y^ZM;f_s(9y>9?451*>efSp-3p9Hs9 z#s3WR;_QzvKQKRhs(fa^vQ|SM{W(+#Y+6t%ZI_^yD9IITMfviVCdE2Aw0%XL@9a>^ zodK{RwH#|E#WTwEiLR&Q1M<`IGcx_=E2Xld58|r$G5^~;@Q#P!^U~^>v7blm=bZgK z3eSvgZoP+O6XO#G)%28dVe#fA~-x0;f2SEP!a{Ptu=_9Z37?n z_xB~}9pq{vkhnk&NjF4kPAd%;3Xd5RF*w2pU++fnl^)T|p-fKGjZ9uQ=>3rh zRz-N#NCDv>hoi}OHg3D3wo5f0%Ek=?+AtBYmxqgb(u8zGY$D5eq-pP+z)nUBo~NRX zFpGO8h@;Hn{z^AExZDVZ4#50gL73rB*gXRoa5o%c@^kk+fJ4w8_FZf_08fDkwQZYc z9bD@HwoI_ThUeB{+(vWPFWR_v9inz_il>P|P~4pXiK6l|Rd<~PC&allO%(N1R)cE= zct}dTQwEn8_Lkb{7UEaB2&JPjeC6-Yww%thakJv~U26w%|j+ra7FCEeSj zp@R-KM0$OmgB@&bJ6i3GGg@bAZJ7~ts273-hCn0Y7)47eI&2J-sBK9R-G1KgUI=w+ z?Ksmv)R(;X{r!F4_kG`eZ*P5Tc92{`NFKVT5waVQHz4<$!7q2Zh;nh0aa^^zw9lGAuW|uB^Hm}s-oaQw;EYsntNN6MX^`9d5HxUxP zFQ2@3^2ax4bw){rFeRsu5d4#=2KTrfutdkVS~%c%4k1=Ppk*QH1cBtKo4MEpRl_I? zK1~~(-&PXB16W&uc(_A9A-niQEjB2(2K z0~aV8RTBojK)usMXxe@RCH8^GcFm}IGnm^l$s(FAehf@?rJ+>PRQrxE^oe>kNHIjb z-D7#S`*$HwZ~A6UWUqJ7DuM5oTYM>3YJQl=jx#qDNo@j3~RX7X!2=a-O$}P@W8#! zzw>xkr1R!^)2%OIb|Y}=9HY)buk&NC^DUoqKV^Bd5JQ1iJ4jQWnPYHz#TZy>~bl~Qo=S0$)ZnTWdr`_|3dK4!CHb?_de{JBWaZ9L=0R zAa3jH@26}(O|_wVHlldCO2yPV;ZpI`YCbk3vf-X%ww@)3fLWg{C!{B1dtg3~=b(q3 zOIaTUOTy|a`H4YUTfs;Pt?N_f>`^hBX8&a|pKg@eo-N-A&E%<(eD_{h!`T+l-D5e5 zZO>fLY6!>E@Z%6sR=gP^&E5iAkquGIxa}vp_u@8PunYnpyC||z%2e8Yw!m9=MqWdg z-c6MEHM<8=y0nQX$h;43ia5=4=Hy7KiD<82ikZ5nQ5-rTEFg51A<$yoM@g$#H>x}# zvX4Y|YUHP0*00&$g-ElX<_0P2K$#9+21qsh9%EC_SzFI39_03hr$GVytY zt2z_no=kEZ6i)G2)5XUD)PG_(4p^MQ2k3UIb|rL{Hk3f1%!BeIAnK2x zj0vat*#76ax|bc%>}R2=S)akUn@_~6+QqAyQntn67g-qfJzm-c^zfeTlr!TRDm|Y z22&n~&~w7J?MY~f?2qg-S~trLizmQm51gZX0NA2hoF=p&fmb4$61n9 z%f>sBkCm-;2nVdqxZE@})@LSBF_Vb=RlC>fxiijm#Zh7&<74a%%HBstl2)}tkN5yX z86V{ys`rV{Q}+?a7uBTF)cZTvcsDF zCcHh2+_R?uX;Y8GY6rAGn9a5O$F*7Upw4;T^cklT%4C6hBl1I$+|2nwoT%OV7xeY(_ne*% znl4|BaA~^1&!e77SuRg`jy6EM`wD2?N%^di7oL*7%j^~!b_+g-9nz>b%6j+s))3OZ zv_Q$N&GgX_t*5yMYcuyQ<*%a2qUWa|{Pv|QOKm$dANKW&A$NUK?IDpJ6LWjrw&z?k znrzRt<*!JKq_hb|broA#B-S013z@qhcb`}{AluwpFYWGK&{TI=ew)w0dU+9;jpsvT zxc-W#tD5FzhtxZf9IUs(hntcOaLx~Ty1=qAm!nsmdw!sM$TE_npYzC^R@sGp)hcWB zaf?1y>0>oU?w=P{3GHq;2bv`FHT$0?WuNo$+?N`;XVzcL;znUdHK8xo{OaVjKXZbv zJD?QN^8+{=w?RCziKazIwPp{%kG>b0eJxNA8}!zm(Clp!@K&w@?Il@ZGZ-T3dWl$j zg6_u3(7>5LNO#wo(}{H_MVn_(OqEZ?Y~J&)04w5Q>8Fyu?O631IMOQlyjtZQ`nV3G z>lUn7pzJ-$UZyGE0?I6`AmwwYXYuUp;5B&rgzWeCkLw?>K8V*`9*wCqE-|Y3K}bH! z9uw6bx_1fgxr}XRFTIGxYLAdOt`{?)h3)_!ix?=|BMNXOnx>5m&vCl ztTAD`3GXrC4ioM&;U7%cZ^Gjy{DuijP1t6_?0gsJXE6h+CjEv9kD74zzv*3Wb%}a& z8rjNkmu!o!Z{LszNb#@`OD6Omvyr$`TSJU7tM%WZrL2gQh0$B5kg~;vq^zcNbmq{E zf$5o&PD_3c$*;+&wv-o<^8Hgtxl%~VTPKt9#Ra6iX7=c;A^X5=djYY7Zx2r|~%M5rW6L^0H&ktUv0s)!rFF@Q@XLqsO! z24cY|ep&}YvO@052}I%%MUmtf*?tKRNF-!!mn8{5?kspnLio574#bo2{ir1nj3qV% zq^<}&Uu6jd8Uu+=DHLgsgaR9b@o-d<37O0JotauZ0?jW zYkNl^9*ju|ppeIdQ9^FZ3nb$R?Jh#0jg@r7^=bZ*6VSPjt_2rVk zk|sSwe~0di93~!#hn%G|c`Z>%>LhOyPb3jsAC+1+%2F^aM%K$gd2=Jukswdyu1reu zX1^r2cgV3|JR~)CBza`y=Q-Gq)=20sok7T93+TqAEK704aO4f+HF^TqFL#8bL;~!s z+_q(NLXl#v*im-^*^oF!nwwVL)Fjr{7%MPpKl|SXUU zLN?Ar?Eo&Dj?Xi|>j59aJ#s(rhXG&4eexu5{#|q$?ud7Q3xEUY7lCtl8lB?YRsjXv z4`skPY%}p8z@n82)2Uvt=Ryw{2VZFzkG^68447#Ha{V2ET{{^fYjp(l49$ln}x1eG@fXv zNXqeLiO@zV7ED|li-hEkL`S=FZKxx*ESQK@ZCX?z;53c2O9|yxN#-|NL&c)1>WbyX zg@RD8$awkk;x71{-f9gySSF-UQjRE_bw2`Hy)1nZ3>uiIBH1OEbj7vl^7 literal 0 HcmV?d00001 diff --git a/common/minFunc_2012/minFunc/compiled/lbfgsC.mexa64 b/common/minFunc_2012/minFunc/compiled/lbfgsC.mexa64 new file mode 100755 index 0000000000000000000000000000000000000000..fe3bf49d4be4f53c781672599d25dc7accb98ec8 GIT binary patch literal 8187 zcmeHMdu&_P89$EQCbS`TAdJwC)l@|U1YRKY5l~0m*vU=jlu#Pp;l{C@##Nj|en1Ke z?H0P#GA=>WwlZm=llG{#X)r0PGPXfVO2T-Ij8W*Q2Gtmwvu>jeqqJkg`+eu0&%M4$ zHZ*DfaV+2Oe82B;zVp84o*n)`Q;E$cIN8PL1aT)88AyW?bXKYiNP}1>O7Xl{ER(uQ z^{j#^cW9}>Fcrcr0V-oL{<(7{3+k2`rfnq#-l-_t5leQi3PbLyaLWwSaa9$jWrmt? zfYSdtCAZR5N{=b&QfhlA8K!)#s)v<+T51WwlHgzkCs=npM`hZn$`7r-fAH~!YN0)X2Jg1-U(`QmW_UpbHd zPk>d5lXM;hrkguh*SE&{`}9OI7*FcD(Dl$@Q16cR1$(3SMTEZb9({9U zYc!FJ#2b5qi9{qJVv)h7R9`3=?e7z@!RAP^EpAX>G#2Sg08Z#@Yiofu277z^LzIa} zg2~9HR4fvYhP?53aJ$lIibo=%H`)~&^d$NzGmz|oZWx@WmiQ3m)`;G&?yZSN4~h=1 zxmL9LZxhXdmJN;iHJ)octF3h<-i{GMW5lL@-?l$dO5=*g_xJFpM14_$=l+|7AQ>fU zKT2fxO;BoQN~vYhbI9vY4OY~;+%m`Go%v~}fm1V^=gkolUMek($1xL*0?sDopLEq! zk+L!qPIH8l%Y<{+lC0WcOiTU_;;Cvgha_hn}|{gL=PlZy@@YrE5XSI0!RbH#%Qk@VLOz@=zM0Lf)guOoFtTG63p*WUtB{us4W z$Hd48>3DDT-r{XXsHj+vmQIam=@+z2eeUlcA(+6jrt_zwmECQMKeLln!NT+rOGO4lSYqM2l zTIiKouhUijDxSBOzcTB*?I8NeSD#D$zS~*z{H*it=OC#pKcx*%+H7aM_3u4!g>)Q9 z?;1q^!?~pXU4Py2L?E3tW5<7iVs=CFz-5`#DB9qCX{lv~&>7`Z*{a1Ty(L}#It>kP z{j)n}e3{l!@9>PxxqBM1`_sq09~?!cV0uP*=yu-v3RMJ^*bnb}wRFesme6lovQ=+t zQ*xT4Jm;RHXhT_8`k*iCaJGiVZ6~$RFJ~K_F2^%eRmaPyYRRm#vV;pitY!RTvydDi zR2qBJ;~TQ&r7-l`PQV8$yF|9ri_+25tn;pe)H;rr*~v>3z0dZR&-SKw_)Olps}pU6 zI8osa*%FtiCTQOYXC*QcxT$nZu!o${owSPj(=$&~9~!YdN!dWA163bF)ra=BrmH4p zWgV!jlxE8>q)MVc9WPQ%UO@IX$D|B#a`+i^>z`9cF@j_@sTnFAlhUvWZIpg3TfP?N zKHH=gIwhS<;dz&|Ctq-dN2TLE4X(WPhj+Y0G5c&U*ZW7E5BJH64(&I(8{O!9{3TV_ z)JV3O&gULwj?}kqM+f$ek}~KDU$vm}_>T!-smXJa{|2 zcYF1Y38SlA)K#}XL)|s=ASQ#1dD2q9nOhMUt!cDI(ArRjrQ~!8mXhJq`Q$}dzm9`( zt^j|$3~OKp%hu_9>Le|OchcG-*X?)zA!9hZTA#SVw%mR_?S(Yx+8|yB`Vd0x1f|6L zq3yoS!Zzr#EiW%E8$nO4f{g53uo)o53RhD_^+xB?d&_o;8&h*r+f;*?`~Y)Kl4WX)k^qQ8iPJ%;h|!gmlIc*5M|y)~uW|#uN#Tk1 zMe$5V2Jyr>EectGIG79yPozihjt65Ay(bJ?=7cAd?2jiW|?xm$bI?|H?0iq4_Mc(02>=uZAJB!h_r?j5!{!JP(=HC{3PMJRjHr z?Qb*(*`DVqQ@0xER4(bTJUjqN$E!;qQl&)#+kXKLY__T>o|IJJC92&-Gl${%;Ua{cBl4QMSLrf~)3~ zHYBRVpUXC@cBz6tFoVdmJ;TKF*k_FBKGpF1)cmEh%{BOQ{kdQ0eQLh)8$JU8($6df zT3ADAfu|kO@_uMmp>LSHzZK*5!h2>h?hw2m72}Hp?=!{tV!``IF+`@Z!FmZ5=iQ+4Gh*?xQ2Y-{Kb7Ky+84me^a!3de8nGE`;~&lV-lYm{||su{KxnJ zrwk5)&ev|wOZmw>*Gs93Un5f@=8oIffm3@5%lP?&#OL<=N$K!pUURu+jvuEKZY{S2 z4QoUHTF2p1h4a3_el7uCGS5BOWx)AegLR44E`YCH0B>6W4+D3b9jkl-_gyP9WGS%Jf3E@txam$UH^+3zbTjULyuZY{& zXcC3{^{u`AUBO;mp0@O0YEa-LHP9PLM#7#otLv^7B+#SbLE`F!abOFlVzKR9j(?L+ z7SpuZ+v?YuqhdIJwiJ1^lclH|GDI7Zr#QO9i^8Q0wqTg(*L#9};a=SAMf$=J+y#_5c6? literal 0 HcmV?d00001 diff --git a/common/minFunc_2012/minFunc/compiled/lbfgsC.mexglx b/common/minFunc_2012/minFunc/compiled/lbfgsC.mexglx new file mode 100755 index 0000000000000000000000000000000000000000..60b76123053233b7991fef692587a7cdd322cabe GIT binary patch literal 7733 zcmd5>4RBP|6~1rZf~!P>8Y?ZSE*d*9=ms^Sv{)087sO^2NtU9hZ#J7HySmw=`$MC{ zYUmne=MhRPbg*S=su{~TjM9-hv`U*0YSeLBv{q@GZKE^RrJ#jrXNZbTzwf?x*GpnJ}=H8sL3rir;?jyO;1l|AW+ z88#w|5Hp3;OWAD5fh{O-6>z$vcY#)v0V{2k863SEy5(=C+IFH$g%z|1Qb)WIdXSF* z%cq`Ap5P6B6?$j}9GF7R_Dvd@w{9hu9g6yUgZiqvK+q$34J;#4W% zSP*mprvOqm0S_kvHWzu;Wc!U&7j~}(i+Z~i91~m5p2BAs2ZAm=Cq!A1r^hg)Mc($_ zwO^EC*wafGuOdH7thQBR9Vm0Z?eHXLienSkI6Rv~{k=dh82b7w^iK`!CP$wECENt0 z{pG-Q80a5iplNUSK!~RxzW{hXP=ObTr4F+gl=(5uir{6+G}}8m<7P4yOr%T`Y^XbE zu8GEjvFKf4VP3b^ToG=MCR5==eJq$vhLfTr+})Uthf>kbxajC!8cr=w*t8+q5soJz zPMTLPSO8glFc#|!F_8!dQ{n5=9pOYYRF_Bu?{q906X7rmTp3OX4&pUDKYd_o0UNShw&StHjc#WlQSK%Y2vm zE*ERBgz?9NE?gL6BLBIvbZM-?i~M^g<(i__!JQe*qq1>KGt?KMlWngh)9JMbJc zFEDw26cgMnjv-B;{@b8xOlN-o6mYuufJy-8p$C-nkF?356`<46kXfWy=#`|Hniqqf zf+ewl6jQ&36btVvQcQ-$q{vrGiV4_2ihP0Go7p3t!go+bp%N3Oa4sg?^TT4|N%~;o z5!OwZc!cXO!sMZ`3BV(4d4%di2P9ALL?}EoD0xPu2>%Z4k~}>Z@sjVCJd=^RWsvw&v^+cY*2E>xbLWT9^TV(Bd zngxpVRaW*N%r}^tZDm*6UJTL1%a)|p%-)yNhn~$crw$5*VKe%&l%ax&m+jnEW!3OzwC(FFo7W%N zj_ifM!gT%c11Q5vS6I{8n4QdH^jM86s(hiG78p7`+FVx$H7@5 zXY`lkw`$!vV<6X?LZ*?1DmRCUIWLnrH!6`87|2yr6)L54b5ZIa$dy$Urc3D-D?N~X zRx5uZ-LLeynySK~57^{wrJ`4*XuFl^&t;B`7)Kd_pqHT=)Wfe6Igrb|sLK)08T&_!U9th`-6M_TOpP?2AYn_(H(dz7S%LkQG59kI54-XQ z^R7|PXybT(<#={TgJ;-Ye}DQfXrONFvH}}NoA!<8ZyUG#gI3@f%h)3wnr|E*J#BOz zdN|^El=(8qm+jc4Wjl6zEdN2xzsJhFqGgclfMy{3iyG@5J$_)3v8%jy75ph3c%Jm8 z<-a$tS($C4Wg}K#d%kiX28+Y=7KgNa)B9+zF~%Pb|Dh_z3ICRS!?AHI1DC*8 zw;UTkbS>J3D2frVbPZzgGJx+6IShs z?D#~gJooCv0ju#u_8m9a^xs$CcIz->a!CyDN47!>8hR5Vw-7zoaOnMYElqVxyelrP z@y3Ix=-M!3+HCzRGf9C5V9yB6FAPDuYlYC(d9>MOZubTlR6<7F4m<9ZMBN#lUe}V- zr!C7)yU%^U{E(xE6VM1nncjxI7|Z8(m~@*%A9ma*o_pbQl$~`Kp zeFZ4Duc(oct_PV;19gFN56J$oFQLyCcvmV`9ZIIFLy1(>*ZfH`CDQ`F_trIgvBh2JTZ9qtT_Lodb*R%NXPaAl z@IR?P{@Lo+&J;7J>z>)3i#%t;HDW5iRkrVI)M)RYaXS9xp&Y)_K9#G4aC{}bJk7~ERkN!VzT}-9sbDK;Dq+)zN=1Uni14-D8Hb5YQwiZ~kEeZWonurf zW42AwnJ`U+W6W+Rx?(Bei^ik)PldbjkHcRSs?N4xDkyy6h`A;a>qNxWf!H<8&cgh&!iM2>i zwYvqfQ-QSOH-lJOFIf5Qpa+#5zcEDC%Y6JkDZ4n*>=VCTTsB0Uql!Q~LKLX%E`|-U zhYAO1M@#{g9Ty#OKTrXEOt=fE?6`)BC7+_AUwRxnwww1r#4jMNK(%iJ1T0Vb1vb1} z<9Akpvilwc%5D%g#Kp=8RN3`AcJx2*lX&09Z?Xbq_hSfH9_@I~>F3JDvxs_VFH}0+XGl3TQ|CHBi}c zui}N>TxI0g(C)PoJMIath26>en};3cY!C0xmcx$u6{z$Z5U^eaw3GW|N1$|v!K*Y? zGqGu1<=Bv@HoS*)sa*tir2?g!j)Y1xe+2g@UM3t+x{4`w8(=q6DPgOU+`}o~vxse& z=w78Pv7?A{RKXZ71P0*@F9dZ!`h{zRYqzw#e$2tGB^gVKhUH{n3?$e$-9pAXn|BIu z3g*2Rfw7CAJ0rvxW%KsJWh}CJ&Nw^f*gWsA?AT)S_9!rh*gSh-#|oQgM8PP z%J?&GPaEjV#hcKVw&xi5T1U_RP4x$_a1^k=i>L6$6n+JG#$hf24JWgk!7o(hyJm=Q zfLHTJa~+$w3%r^in(JI~KlR1_pDOymtM>$f`X2!1+V5K z>-#PEmf|!yHgOO(q`U`-o93DyG1;!kHG1AO`-xR&z&XhY+U+9w@e?EF<3gHc+ zc|b5tY~1aqBEdJJs4&BFLw~ZeT2a>0rW=;jHJLXwHZ~hAW=q|YCIe4Q;doo|aj3Rd zKEm*2#(7I}BqfhRANPKwEW7!Nq?nS&smX6ywY8;BKPP?I;$zn2M=Yn*DUW3T0`F#A A@&Et; literal 0 HcmV?d00001 diff --git a/common/minFunc_2012/minFunc/compiled/lbfgsC.mexmac b/common/minFunc_2012/minFunc/compiled/lbfgsC.mexmac new file mode 100644 index 0000000000000000000000000000000000000000..8279beb632cf0d76761e9bbfe551d4892e239a7b GIT binary patch literal 9500 zcmeHNe{2)?6@O^R)8HR>FSWAvy(tW16kB2jBTBci+wa!Y$vkM z%z<-apc%AOTcoyITKa3P6m6ojx~+dq&~07mq@bb+)_~|VX-q>RD|Dh7ohF*p`@Fle zxh6pS&;FQnU*&u6^L^j%d-vYw-C4Q&-X|Yi`y&y35uKY8y^P4w3F$x=@OMfS`!;Ux z!{}3OxiQjYDF!9z%UCIjikwy$>20aK>lEBq6R{*)VO>@`Wx-}75=xH728N?zQb`Rg z^-+02U#Urx4=o3tsuRVc4|d_ZiI10v7W;5858T|M7>y`$LW=rz#Zn(t2QdQ@G-=mY zX-DQyYoaI*iaQfhOfHNU^I0{8nO^u{SM3K2~vaqq7;y zlG|}$n?eFy%L?=+^qP)~&)yBGHIK)Ozkx{H8io#~oNYS0lSn<&m{y;=Gu=gw)oQXfolTJgc!iqQuoJjU9o29}7ugq7rLe|+dVh#W zUvK||KTFPi$H_5v{8#X0f11Z?sy1zmC$#Zcah&q6Bw_rRa8}dru1$}>a*6G}tBt=` z9B=jSrGP&`g8wC2=6@6MW@^*^9b}sfQs*uBvO9LSQ~t_) z_l`RMMH18l@JD~HKDU{u`EGIsAI-nvg`dDPv@TQoi?i%wEOa>CUkX3>AFZANC;kvQ z&*7gZ_Q$@O{+-VUv-CD{AkKl~=(~jVM?Um450OKChh=^qF?F^aAR%+OS@WA|`PnBfoh3(}6(%)oCnKGJy)>JfpKjT$9yz1=&8(fxH3#(k#$D&A8F}e>p2;DuuCYDAv&`SN zN08rF->>dxe?wPu%?}{|L$f&*HeBoZ4)EOXVrtRQKqybjav5> zYCVoxtNZUvbKc){z-QDM{kv+wsb9;YqqsMFvgB5~^!r}r`?$+K-P(csYd~ny=A6cF0_OPs=HG>h zLqZ+@9xVP1FwHw}Jx`ZEWB$}<%)efkC$z6$;p3+oKa7yD`$3Q&6qZ+Lk0N|4*5vaa zNAYO(Q|)oN;tRpC7ljvW&)c7K94>iw*)ye6&L5Zk$n|vjQx#9%#?LLz%1ME?2K%-I z+uaZPn%r?oiR>~SMfntHRdYW)znsvW`ZGB{mz%r3$~syMSPWPUSPWPUSPWPUSPWPU zSPWPUSPWPUSPcBXGQh7HgaGzF@vdMQOSdt;-Hpbu*Fu&ut0DPtr2>IeDT!X$0`Z?f_fw7P2x(qxIjQvp872pM6 z?6bPA0)JAe?QAl4wiVFd2K|44R{~c8&jYUluF|(tZ8dd*@FfF#pa+-bh}-4`UIC04 zZ1un^fpHF-4|o+YPGD;=;$(d{^r%NU>}^|szYL7AZ7c9sfKfAB4=~=6mb-z!5!N_o zOpZHr3`nbV%(b)iR%*Mt`Zox7==fFWzis4*IO*qw4@7|Lbz62r9aY!;|3t@?Gti4r zYcs$R6CO9=113CW!d}>T(0RYP9^Hq2BOUht`I`c9XDS{Nqr<~PsS(l?+V>*Zk`Lo^ z32{)4j>!D2Kx|MPNX5fZ87e8vs+gSK1Y0FC948a@$b*M5+6w^8H&V} z4T`TV;cHpn;A^_K$=~?3CVxv2-rGW9#n+1N4%dO! QHg@S>==ez7WmJv+3koSvK>z>% literal 0 HcmV?d00001 diff --git a/common/minFunc_2012/minFunc/compiled/lbfgsC.mexmaci b/common/minFunc_2012/minFunc/compiled/lbfgsC.mexmaci new file mode 100644 index 0000000000000000000000000000000000000000..7484e51a497a0089614905dd70f2d4ac42962c28 GIT binary patch literal 12660 zcmeHNU2IcT96xuvY()omnwelUvWG<>#z7OJ?n#Eb1(XW}D2{C5*mZZT+HSMEmkh%` zOqp^$87`5BneZ?TCYTW6VL?ntY_XwRLU58962ik0(bVt}Tnr`}t-t@hZPyK!_^jt- zr~i-h|2n^OdV81rul#Z6?@Gp4Ii?S@3bR#Z41Y|)+=36z_vp=gAe4^kyZ}o6%P`4@ z2IrR9XL-CQFZ3UOgt1>?Otj$RTB~fR;Kg|~Vwv$!^kq-8xP~!}F)D#5)m|8-@i;d- zc}F}HGriTtF;2m^2}?xF3IRYLW$ev@$a%QeibuK*YN4e1^OWkRN%k~2f6I>ba4Xanbx+`7{!fx&En^i#^8@v`$4GaIV$l`g zeB0LTuj@@)3)$C0Nx4BS7?tLQ*4@i0GQm3C=?TyzD9)Dc#H2y-Y0|#QpZKN5IQ$o^ zLnc3py#bSQmEyNC+8t^)+G2h6IEY9a%>KQPIq&qZ^MtHoq6d6b?}XOb zy>Y6EX0O#Ae*XFT7Yj}g__!yauu`7`;V|#5_ZM6q*pI4oW0wnkb|31s%vk-Vq7wlj zheSXkAQ6xVNCcKZAWQn@+}RfV_==%0K6A64P7LjKrO^c===}7?-rb4YhlO3^+O>!Z zN3F}Qd4e$~BLXT3Pm!RHi-3=$6|Qtr1gc2N)zI3hyyb030&+Sp-ynITnrW(2@s@&x zN%Hv$+IiJk;b}i5+u&qzp{CB!C$fUHc2OJ9&e&;3Kksa)%l2Ri)PKL|qF>0ikiC)4 z=r`$A2rKjpP1HxPOpY;|QS0c%!_ku?`mBS(vM;!atDG{bZ5;*GPot*cb6WFcgPyjt`LJKPVfUwsq3zGT&}%_N@Pc+mpBzyy`mdJ6Q?G4; zIZGsc4Z*a17nN?&j^05oVNaj{(LbqND%a0C`n0y1jqJ!d#)*vkx|j*LuZeB~?bv5P z0jzc=C5-WrniM58kT5f%ruM+PvBFAxpFfiEq8OYrT|r8gZ((`*~KbOnpM1OiXH?V<_86u$bL>Q+=)E`Izkg65z2!U15BtohRA$9}TY52Z& zY%MNGMxP;DVb6&E^BH}Ngw)UF%I_I^^<$aM9%4p%cxvFF(8sVdY-^;(=btLqlfaki zFTtnM!>2Nj53bHWbv-gyieP&9>fmcReFmH`W|ZJqgE6TcJBchv?Oa(R>)?mpO{rWM zY2ch&OjSI5yu0l9*^Fhn~N!OH=vSp-Mtv{s6$ zxNtYqeL|sSID-a!!-AuL#F(KPKWZho-3$LLU6Nq3cq=f3;^3Lk_n+V`rD4D)W z*Q+Vtgi}+O{f=1Uyi-+|O%tnMAcL=6gCrSz2{w^F+_IlfeY@y>RQ&SsU%6cVspUSS z_Ti}|-W$YrroN?37010-JoerozQyiP-rdEN!#<_DYMFWz2fhZ{r!jqh=5k-q;i~kQ zSm`@brqWYkg{sn5C9&*%^@DPwanH8KO|`rH8)~~kR^*_G6&mjUuGO<2`7k|={FDet z1SA3y0f~S_Kq4R!kO)WwBmxoviGW1lzl6YVOO5~1Cgolv0uljs0wovzp8+G2m) z-5uCKV_Udec{JU!5`tQlO5F#*JQ4ALs$zkiq_M3KP+g@h&VwriXq~hoim0lTis3tV z?%2C_gZc&!bEW%p&b{Zm=Y03>?9ANx)BErIdk14u6Jsohd^7UQcE-x6Dcy?Pv5hfZ zKdSZ~<*LRa+$b_31Cs%U1w^;57mZSJW9(+U!#mx+ugY#svS_*rnT@**x778*iA;Yk ztrv=Be{`#V&$YPY_6Q@Ar8xuRU?PD+LdOG>v@FD`JLl}@;k4N!R_7d@? zA?tc~;-LrBc*0x2h#28HCgev27-7`!=TAbu1^Fs#d%gVf9WOp1=N|Zt{PcsLhOP^l zMcm(a5H*eSQY6CI5QIt8q$hZ)7a?Pf+05;Xy@ZEBH`i&W zBd!hU_91(7dP8FATynt3>m$kH(BAZLf2P!l4KdvB^?zhLqtWmq+a-$)6K=7S-4)zx z7V_jDw}?(m*B#M=yIrS9EhOt zNvc>y-l^Hg$Gh#H5jDYf7k3K6@?HyJ)Tu_zL_Y@JHtBCNN*12|Dcu zXW_{t&2X@C?_Hs`IAepn)a-zkdexcd4qZn8ubXd!6=yLt`43DVw~blVx}v;yDQ>+* z94Xdm#hO>FIrXl~aqEq$Qi@=X}fA`rFtzcc|YECa2t`Qvxe38}y*v@I!*B#u|00-P*u7I)fT<96PxmTl?)aWu5+&@rr zDv>&)RhnNR2u=DU@gs=&OmrE3w|3jcI00YOrnt{%IXIS5@NkKEpy)XXeR7Zp82gUm zoKdB@hO`I)#OF5<-R~}LHr8C{OI%BP3?t_)v_F{fa zo{$zP*l`SI6aY4DNNiYI9tRupSb(oGR-4C05%$UpC|l(>%=2safqSQY9KR)}{Rg~! zl9x~MauTJRt{xEem^N`=jMT*tu0IFRqU+CI)NnVrHux{c8KT=@bbb-awF|(n4$-|e zG!fGLmTL$OngD5j$u(bgHO-`XmTUT4%{J1MNkc|nf|A{aOmzgAQUsYU5?-SFJIWHx zk3?-;32^V%g!^H#WNc(7ljxcC)!yw~v5RYA^Sy5{sT7vN%{zis^xO+Q%{TKtJVE-b zEC$MC))Ibdn@rF*2V}Y?J{_oRe@6aclT4!~_;W~jPIt!eY#?gr8&ZpS4*WnouU-~) zMCk7kb%T*F_zd_A_zd_A_zd_A_zd_A_zd_A_zd_A_zd_A{9iH9dV8dY9NHbDb65A! zx*zAvIJExyfp2(|5$F(g zm#F#s6X;_?ruQp*_?r?mkUoLj0@?enpi{^L;yVKc@*cbe@y^}9204WBc<;i~F_aRM z))5Cc$>W>k$xZU}o8+H!dHr*U^>b5t2SaaWZbC+iQTEPM>-!=&Aun2vR5OKdpD6j^yH`bS0AHK5a;cJR|>uS16 MtYf1?^x?yQ07w%cMF0Q* literal 0 HcmV?d00001 diff --git a/common/minFunc_2012/minFunc/compiled/lbfgsC.mexw32 b/common/minFunc_2012/minFunc/compiled/lbfgsC.mexw32 new file mode 100644 index 0000000000000000000000000000000000000000..1a87a91020b7d7610c1011038dcebc2993674075 GIT binary patch literal 7168 zcmeHMe{3699e?LsJGZ?hUR&5!WyL8e3j#&)HOOh>PVU-k^GC`8iJSev%`ZERGuw&m zyEH%!RWg$}-mFxbK>SJDKc*pQkQz;4D#9UAlXRKZZbNNaJIz=IS7Jd}Oyjo3$M?PS zCAY!~LV#A3Cw=dI-{<>%-}n7~-g`dfa`^d^s0bm%f@m5-BhZuPrqBC7PJp@ib8i%* z;SFztIeU}vP}%L4s8U4)6u#|ptpfk(Xox{7K9kxO!KE|FFZ*x&`4c$ z5}8LP(7w>MBGg1+H$q(y=M!54LIa;@{Pzu&qQ75+=`JfgATQb%@;0K{5o)cB_}av_ zLaqSNoQn+)y$9G`sktQIC-~;7W z+*EE;EF&UFwIM;IYg)xuvAq4kyqi)poBM7ZYR<-6KguGx> z(%!KgIK+g$E6Jce&h^Cx23aKTIpT@G zXw#YoS?^Jpy&)l1^gF<-QGH3%wAeTc)+3GTb~>=aG|Ud-{TxiNf)^QA4=()Fkyv<~ z#Oc-=)ulX{$8~Q~uLDyM!N^muBPMfXSJH>d!#cBq)o6%1*(}v}u>1w5eZn zh*rimL+Q6vFGPcaK>v&SHLB-8&!_^`GhKVgL@BF&5yoe%FzBkGNKSo}AnYoVQP%+y z8?QRU0f_e!(*{l4)Qy_eUDRU5dx;4^8v(cLp!z&y+A=kEirNz5Lk;SCuu;#QBC7#& zFdnC48jy~M$#@LWLdG*Qa=sG_`4iJzCv3`0D0khPnF-~wkeMp@q_@I0P6!!T2&so) zvNEQ47Z=J-xSk-;Ua?3MGRh4{;`ZZHADvyEiSNrR6NloZwFL(x{RYdKLvhzHiMt}F zH7Tvuk|cn-Gd9RGCTEev!iM!>2;B5{u@pJCvRIWS<}QA{Jw)U{yC8z#IiQl`()sVahz*ONfD8kRRb9Q^2PSct5Mi)7@50 znpS^6A*&)x>f@J;@uEJKjB&Et3jAVeI(C668Ok@SFBq2ey_S83MTHrPJUk)Lis8(z zkITc&-4^vr)We1)uD~spIfH;*)CL5cpAfPn|Al8}{+RM!nhha2fUsOOrEdQo58X&BI=SWqP-q9eB-% zzn!{}h||hlW(iAa>={q7cT%OZx;q5Q4qSt2u|F}2oW1!rBtqvbLT06TBeSPL1Qu?`3E|(0wtz2a$@!|OtI~tn+{;nuDm@C+uPI7l4;Zl0OOi909h~&zrSbEgp z6k_jWl3PlZ(y_6mOD6++v&u&$L+>iLnaj$>(cc!~!)fJeECDMmGRrCUHkKDt> zCAmjuZ>CBgF)emJ7RoZ@a+JiMgdaPD6+6vA#ok1UeS$C#i(n6Ad~JnB%FETlgq!ae ztRz3i%5EAbxxz}Jc(QXkKiCVqSBt%BlB@kkWc;cS;GdFFw?YQ?sLG#9hTc@N%%n0p z`hHPt45(NlRBVJ}l46S~p}-03HZYq^mM_gtk;U(ri23>IVJUfehlv{fz?{ckI5Xa9 z(Vnlz~%22$WEYSNYVNx+b{ zbIzpCn{?Hrr%ig+q~}aJ+P4~$GwC*yzSX2ZX3|Sdy4|FgoAinmo#egN))-i0;K5~} zY0JHTCF-^8jNZSd<0x`}hc)X1!N7Ojt7)?E`hqtS`mqVy0T{e#(69+}0S0fD|5&Sn zST2wzkfR{IAV&KZ=-&f5ZNkoiJ_2$P`27gI0@@+Jcpwn+hoga>a1 ze>93dK;S2Y`vQ@kFgS~iVk8hg7KLhl$9!E3dcwy8VIR7SS~`6_z5XzIooOL`OHZ`3 zt1avc`q3+v&W>PD6jr@z2^BK@(9#QG#7>0LmPlIwj7yfNUu>5EVTz(+q@x$|4F=jn z{gu98kbI}H-77EpBVm7V7q2^P1rI|)(Y}s|sN)rY-%pG^S?fQ24A=`P(Vl!WB;W6< zKgB4i<5kD7;}?z#jyE0G9q&7CIX3ekwU0R&V=g?I3}a6LQVW><=XXOOPoMY9 zboRuQlk=Z)C@1GPZH*;;or#Wn6OndbG!l<@qS+Q?g-W0}W0#jsRUznR4?{W}|@Og~6sYA;`whI#&>Ba$* zjEOD42A`f=m|ZtBrhvPqoUukgwwbXXlW0tO0dlRAv4Y&>3k=E_8~q&i(v;ej0=;hr zx*|NJJ6-mq=EJE<6D^Tcgs~HD5FnG~n>oBW54{qQOzOxT&WHCh+FL5zu{1oxR#cGct4C6=ExQM6g{XXM6 z)@(M-78!buZF@a|dnoA^|7PRXEEN=58!#$A zf#F6mFhO1MJ4`Xe)hr!2#iKamal@Tu$Ak-mr+51eNH^ZamJJn0x!&aOOz-e_Ht50p zpf2YrVw@zD4b=Nn6!gZwgOQ6PJe_Ymi($n%F3yIm!*nU)Ttm;B(%u`vsfZnZpCVHJ zMXl0?b7NA`!WZVz%8wM?{~`o2S%1JI>+zyiY2I#{Gv0m4)haDIqlhy*l8V0aa!5SK z#Ywr($P1O6;^MHJ>0P5=-9x<6ya0ntK0X$did+uf-RtAseLlI{aEwVCmW` zTs&?Xyk1z>N=hr@cqsbHj=L3o?PY8#>!BjK??Rp|9q5-c2Nk{kS++(m_#4u@6)k5k z5ge6sieGTZ>A)}-!685ca2RkD&;T6QPAk>NcKS&=Np<#dU3r$R7WF5Yoauw_w5Ky4Dk3@+N`Ng3%x}Tf}S9^g=2gdlt zUy;LXCEF}gqyw)j`lit$-uIR>gv24a?|dGP52w({IOGe=vPdsn;XJv5=>-_fa1fQ; zV58{^M}V3_s77)K>c|&n%pg<%*SnFr=7Y3Q`+=!&_BiAw%VphF9@1UCaF?0fc{rpO z^pQc}D_HN4VNaaHa+jbj%RGekDro~hbV&#WY-3`jJ^zB^hs1G3Y=Y6?21?1JrqRg{ zNYM<8eR!2#V>*E}Z&y}bd6YIp=#awnPz+ok#ixu(oBDA4zz}gXIfqTnNdFn2kYX?m zD2N60W?1Bw7=*gR>`hzbR*Y)jP=s}Fy!z74J99RHZ!F@vYpGK5hY*?rl7Rm*bwQ~?>8rNRp&J?*1Ji@ZO^H;q4yhm|e&1 zBc;!Nd_{Db(PWv9(nyz;Mvlw+>e+T4k@ed=lM=}!f5WW=jVm$Yvz$-q`8ak3XTEi%&s2SeognjIn;Ab z+M7niQRpDPkUA;m0$eEmW<}px6cPhUNiR{kqF0(pR4Ex$#B;0lniZ5HOW=efA^l#& z)6eaqL6L}XY*f;F{Ny!Cx(S&Gm8@)EF0Rf=dx(UC&ZpQ4iL(?~A?LG7$uG_Dy50&e zii`HWJDTFw@z93czZ1fPJ6ztWv69-dT>o zDhY{Gitaj$fKp1HlS}%^Z(N@hP_I+&Zsg)Oiu0%$Tf_{9%|_M_Noj#(PUT3y>$2Q8 zk|&pBLgGavZCM;zrC0s~SXk>mQh*#2m892)7{T};qG`musgrLJIeUn zc_HL{FXa3+?{4##IkG+@^}J!m6g-mbNmtXgqmdkgGMv5&wFz0*tWAA%s8zb<9{Xeg z-=_RyC0|sNL0JS0z#%}q>YyYEcu+cMXlFPsyO;z*5O?-l_13{rN9knMFU#rrm&bR! zL%ts{>TzfqabvkXYBuP_o7<(ySU_5dQ^s{B@=^aF!3Ph~>sc@yyeD{fa8vl6yZbW> zXV1d7+rp`4e;M!WnT79}{V@8#I^O+`?`ejYmyEXq2zt#CLmBUW8mHaLS4~h$py!c#tM_qTykxo|DClyCNo6!Gf* z)U{6=*e&Dj_R@BvT>96!`@pHaF_Itr1y(~?7v|mLc`4~>yo50$>}KrzcR}K!Py;FB zJ%zQ380Oe<_urEBL}8Oj_!$Txaaj>~mq2!W*CSWIlemj7+sRTNWV``T`LYICkDntV8(_sLgLUf#4d}I z8Si;;X59PHKSdjHq=Wz$ZPxo2%3_PZnY_yKZMfEiZ*`i!6>Nelm&syOTvEhq|3)&> z%|_Z=3yD3awVBJjw_~JDtvCnt`C}l%ZYg*7r97>I3wJpw)6<*sWCq0$s)w)VTUdop^N%J$r&Ux9N$ z__QonFmt9SgM(z8+R~iVelTAgy6)(L%a%RsNItYe3LS8- zE`}A+s0^D1OxXs6?aG3iOuNxH9Z#qHMH?viWic*Z=HdfbLvtG)=7|RcTo@lBD4CQ` ztH|)-^Wt}z4B1^TEW|G7$H8jl4^H2Lg+AKa`{(!tfvPXojAyiI_~goX*8?~5mrv6h zv+)8}?F0ANeBxGe+@f&E#4NWlxO=Sh(@Y$}_W+y$-)U9GUaUv7QEIHpcwHv9`}pztE&HrNL*@z?cU9LgL4eo;TZD`6AU99*Bfl!WTF zWdI}7xp0!n^D=|HyB}Hl)x^7&UC1Q;SFDq)b)Mg29n5GpF8ZDh-GxfL%#(6f^!D>8 zui?%k6}=BGqo1g>f@h}T_^r$Cm@Pfh`B+#=4bqd%`Kxq= z&2E5G-5~BLz0q*seVW7r{-09d&zIXr%Q0BEm>n#wJS=eIst!BR!ec( z5f={ONr(K#yNDff!Mi6JA9s-BA8`LQe*U7@D*E5sXfo`f5gWB#D0*8L-R2|8@L!(8Jr*ECMD?P-jA%NQFh$3}KLjJpZ|w{H@zmHZ00&uV{c{+P^b z@47Xis*K%&QC&ifq|`N9yPAkagNa0Bdw6LXto2(5XpuJa7x2B{yG>2i$J*6+GS(4K znk>gHY-578#kRC}mA16Cv89f%8js=+F5%9OSUjaB*qXd>yD26dO{Ca!j&Lj{O$%)>t&W zH4<-W!?x@$!gqEyM>|uu#@gs4e*sLiO^w9EsqLLA&e7Ttjz`+nB!-~JBW;X*&l%R@ z(D{DW18!*45jVG~%?q2@H}k?aCbVvS-P&bkrq+vH>)KSclbvMsv1DXRo7%J$R&7yY zTN07PcCuoUt#jU?sfq24YNE9x(H@CM)w&MNw9tKdux(Q;dcTQ4oH;PB(-H|ap0WhV zEsQ-r3EP^r3!SCLX|Mdpry{-^Z8-JORKc9^!8SoHZY10b{Ch z>+rmQwN&4zR?(Q?H|;Uiq&|DhzHI*|7I2wT9Beiqo?Tg?Sv)>csBC`j4fq_nJfMd4JRuO^kyk|`NA;`fwRVXec;q#D%{vD9``jt{LS zq5heMUDTGwL~I)p^u1~_N1RM<=)zLUtBSf!ZS%EJUt1hW%JFR-_p6CwpB4*7Df8DB zw?^8MYVnGi#W~qmF3!oH#PO-!YZhN|)|$mRr7at@sGVxO1qD5;OHCG&3fi`w*e7d! z84FTS-$zxpw)lZqyrR6=M}@YwIF!AG()D$yopo!QihbL%wqI6SR$9KKth~Ims+j&q z2PxW-2qvOiV<|P7(h?95X>Ylu5>#zVTP(U--QEPd(6zNjtF}~ZSr%EME~!#0wvb^y zc^> literal 0 HcmV?d00001 diff --git a/common/minFunc_2012/minFunc/compiled/lbfgsProdC.mexa64 b/common/minFunc_2012/minFunc/compiled/lbfgsProdC.mexa64 new file mode 100755 index 0000000000000000000000000000000000000000..26bfc89915375114d5ddde77023cc32033fca859 GIT binary patch literal 8322 zcmcgxYj9h|6<%3#;(%?*K*1!?sFX|%1=SE7;zEiQOU_M@laSbu6pSKEvaO74Wu&Xr zq$L>4OLd)uw$my7Q_~+UL(}3P873_=LB=!~To}Vp>XJ_IkcLX7(qj(W2b{`87pJXXJC)6*{+EntS+co_IE?GK;Z zbnl~2NubQbF3u4Rmd5Re?0F^R$I9T_%iz1q;2X-|)DAcP*3<={ls^xZ!B+xz<8Mtj z0Vq|E3;1PLVeoI`UnkBMtVOELMvOYr@Zwi1C;+;ngNwsQRf6HC1J4l?kUg%E@KI zc}P%My$Lt3uWl1=o`MY~oZCkAG@5V(Vj-wpr6U*G9u<9_L&Xq055#L8O=I07XtA0W}byMgI?~(j&;;V??1)luYMk-HNxrO#XR<9_` zNVgAh>Kt09K|yGl)83Bl>27U!-4tYnUagI+`w~iWPf#tRmrgEmi@@lrBOs{nQj`R) zIHgT|WY>n@v1!e-DYtW_kAjl5X_?ATP=~j0!E+FuC)3Td$3-sZj4+X;8kxV2g)J*H)* zwUOrBnNQ(q#-G(jzFCz!1!iOBdE{NW-^=`QxsSp6ANbS$1}Qb~bk!_}eQl&Yi@f8N9_MWn^UgX4di`0OxB0bu^1c!O zxOX^bb3XhSYV>DLdEYwwCuTBZw8>^j;Wp1OH7@RA5*>R`lq;1tHK z6dJwpLM>dF_reAOxYIE+@7!U7AsgE{Ilta{r(>ERvx3vL3`#qzKPB&=bVspwe<06% zwv*oB4+`n~;6auml|ElqK^`2OfYW(e2+@usQhKE8uopwn-#q)RSzqR@_ut5LW=l0_ ztW<+m{XZIHPiOtjllzC^z*znoOeZwnKc&KE>YJQw?0Lg?j|=C)EZXQ?De||V<9#FT zXk*^yYi>WXf6kYQP8kh6>wOwh0zLLZd-#>jqmJJZyJ@uA9>`oQJ8TEE&GDxWL+nd+ z7zWi**=es@JFV5}@>L3SmW}$3I(!H9oyqgpy~7hWdwPl-bRML2D8B{|e`f9&dGsrK zJVTEFjG#5+TIR-S>EO!%rGviFOD=2=pO}X+$4T~34j{)g52_Qef2ZR`GCf_Sgl73( zA`hX^3K!p8?BZ)0T=dzFdz)Xl=iWf(@44;|X?DGt`#RC#=hk9f@^*Re^ag2lYIydu z@tg0(9J4?)-vLb&2QB|DU>e!eCP&|Skk?wGw0_aLMT_zd8>T8jgdk$k4^IQff?h~1|0opJf{w$?E>KFmzp(AwCTxQ)+luO?RfjOBE=QT%^FwEh z1Z!Pwwe_2v=YFGVNZhpihAUTJei^}6;qSv=L!ENZ=dE=;SkYRuq+@vLcb1G+9&jAA z+XIesP_5Ve9wy!YCSml+#C2GC#?cg+ex@k%;}%{HH(*>&dn`)4N8s}SACDea1sD+t zk~^amc`q}hHbK0HsizJ=;yu=T1}iuw^@``cQ%>!_l$w}T<=kIO6@QVUTa{tP&)_XV ziQYYwcFCs@%M@jM9~TV&f2ia!6=ZwZE(W~hXH+@jEP0O88O85axpCv_6@Q-j6&2I>W_tCKY?oS@W{H2db(4P4k!9?^wLXq!hZm+fDVmi z9*5<=XK>YO_%SOy$i0WL$of1#nKmd%o^L!)xB^-yX>PJU&tIl;$B%NdOERp_^Qr|H z)y?|69x&zk4-uJoJ~6K$LvxMwd44hdyb@&pS&nHZ%4se$&-0rpwU3h3e*+aFf(*@l z*5~=p)U5Y1^s3^tCoE`;3_PPJE>V3tu#Vm}x)&66^%=+cN3vxA9`%hZ*-Ri~3)Vfe`|8ae^ zr6l`2KKQ#Se>b%HYt_$M^m8?CAUL@xKmMKm^N=;$|3K+~p!BWwto(1GPhHOb^Y>e! zI&Ow4N8-4#KKn&GeY1W3elz2gYQj2xS)b_}7Jc4N$>oGlL(dYM{rW|BB zuE5IYQ9u~Cf2;cLvQ_DGJIm?+4F%NxMix+%^{=$xDx6Y>it6xZ+h);y%JBPU5P8;T zn0TH$j1fJm0-sa!m-fiCKj-#yztBFbl>ITaaKB|SXknpUKH1?dzYon)^mUQnZ^gJ> z@O!HmcZi`9PTEDK;P*_i{1UV2 z)>9|uEdBV1#9iW$Wjy>?;*0(Nxw5+lO)#ekFs0)5vJ^U7;C8VNt@jlegw}C;0yxF< zjK$BK#25GbyRyQwglEvdggApe3FEG;wPVY+U|{3+4taCtE8;e_nu;QcU~fFp9f}9#sVx{v4+@;f z_Qa!lG~!v)wE7xB1;JQkkhs;tIQT`<{rz{b9sg#ZG}E@t+wKoCM`k$xnH70NlvYfa zVemLAPlR;87elqu;RWSXBG`vdS8?3>;ky-zac(reXo;Y2=VovF##RyB5ZJQb8whS` zYuoPc2zGea2mH7+lxJAeWuCmVGu;EyE#ac8KVa&{&~gV^bZtmicf~i3Wo`;fURA2= O$>LkhmKO6};6DM_sz#~+ literal 0 HcmV?d00001 diff --git a/common/minFunc_2012/minFunc/compiled/lbfgsProdC.mexmaci64 b/common/minFunc_2012/minFunc/compiled/lbfgsProdC.mexmaci64 new file mode 100755 index 0000000000000000000000000000000000000000..fbeb993e420d7076ef5c7df2785cda623ee8cd11 GIT binary patch literal 8992 zcmeHNTWl2989oEUdP$6D9W|BH3k*?_h!PoZnzpt|$PVlzv&uSguqj3brn~F$rCqOK zFUpcMjwQ1dqwRv9T9lWdN|mVckSMKGq)OB_25fNDl&WpOx%8ot)>2zh_X2S%rr&?& zjJ>-~Q2Nw9?2*p==ljq3|M_owJf1VZTK@B$2F9fIj4?m@CiL4I7%PBMdjLJZjxp2h z(R+J1tNIeChD>1SWKhG1QZvo0mCw3k7gy_R+vv_M$}S}=hAu;9WA4Dcnr7zMaC{_b zX0o|>aIJlxYjW4M2_eE#g#y3O=My~BOk}d@q13<%uC}kMMjv5{nb#1ass?={sm!Xw zwecGh`n>QFuJX4^8~I0$nuAvIh?T}%t-cAN?@b}+;T1J&;+IQ3Ka}b-hf@6`AEU2J zaV5Q!6TZ8B)}Yy+jtyI`T&;a)g}yFf5aG)B-OJdw#1tn~ckM`+X@<2PtzyOt*e6h` zH`43rHqE|c$-Y(jPtxb9qx$t^SEW`*rkO&`xutg`n_gpIKq&I`5w5HasyF(bdCfGF zvAgM0g`$uUAY7@B{JqCh21;==&`mQMdFE+79PtvUf=SIYOy!8F!dcQ&`TO`ha0~kF z&_jR!+ZpZX%cZU#f6)E@fd|2BLuab%haZ&Ic}-OrI|DEYN_?`FXvx5d-I=K&Z-vt)N-+G)eYCk$rSkpwj-BGlYP`P*}lZX54V1`QgDqNkCs#=H;_n} znbu&X7^{_&r4ih(TCV$8CTk71?yeH_0#v8<2-G965&^9{TKeMUe>#rV?QHucc5`J5 zbGDtwV1l=OhW(+oU;Bo(PwVM6?5ojI#&3-O$uR8PX`|Rtp20?E*!o$c_@cl3a{|Q| z0}=aT`89Xk60v8>ueyuQU`{Qc23S%anY8s;;O*rTF1~5!=7HPFd5)X)H)yp{`pOHy z-Pb$YPZ0{rmew_Hv34SN|k>alM4q(t?OL-ec#gIJy(yVlz{y^FKj1t!8qlueQ z`&RiMSP#l)BKGC-hrq#W=V`~bKY%~y%Gc>m_GP1}pEm6F^1FbAg@}D=G`OS$Hr)^H z!MWhI38R=hYn0^QQb3(4!%(v)V|mwUIU&pWfqi3?bw*^`=mL$jyC z%Ig=jUpQ#mtsBagFk}7v8c9qQ7L3x)i=;48dh7|~GJm8SeJE-#1?MJqmt+a|ISt)` z@I?gjz&c}m+D~`w^(`sQzTA5-`3!9E6&9A1!*V+@N58I>cD6+AzlQB;Wy=`p76c2M zjKnmE21B^x>7g{dnuyI@k>w9aof7uY;}IvzooY9lW*h|LV^>WbR4wr?4=(HEQ1sUgPmcAoMp73FQD{+susd zNq@(0a*O12wyCJkIY_Ojp9q^jPV)co7zF`kak!z=NA7+?9$^K@oOkk zo)>@gZepv!{{StK?=RG3^zqA>CQ^w3T}*%>1wXT`AXHohA7Rl|lZ9}Q- zmmkG28Sfv+;D44_I;+M~eX5HsjD~ZWtQxmeZzY!-bEOf-@yGMDt7H}Iv(IBh`|JQH zV(woYU3Q$oQ<%o&D_}0Z1#>3`zw2_wh(mkC#Ne>Y=_k%lIA^cRi4y0BoP!vVo=1sO z;G8eHoEG9_IOj7i=MKnXjB_v|Da!fcv&13BYJ({I0y(gS#3R@Ur7?r#v7SL|FizOcLQgTA=IQ7_A)6SkOGYj{t3)H1|Hq-#7D3NZBC|D zXfla41-`dVCe@pKGCfs~H+sf*n=w3z@@KUQeexGXp(1!Uu1AGkEzuv1D>2LAcGuX)Bhs z!XvqO(&~)iFglRzE4w_{oroo4 zoYl)&k<6ZCER(?+CXsbd=2D3)6tTd1_BKBrRN)iefcUlx+9oKyYaxC}P&z9m+$AUl zgD}0X*^GhL?#G}B*!xbQRp78Vtki%vnVS((8UQlg^RpT$auZ(L&VeJoawuRGc&q z51R2@sxN8b^8`9_=fdh9v2$76pSdy3W`6G8Q!i~?hVq=Ua+7{t(hU!tX;`<_5;vw-m8oAN$_TS>70{a8E2KX6!O>oCdevoy%{s5R3Q)jM*r`lcj7E3UJm7 zkC500RkH$?eLRUefbW;oRe+`eN}|7%b_NmB^6Z-{Q@Cd{R$~c0*D6s0TvQtIShE*2mcV z^EnqwpX33aYV+7{x%D=Gi~kn?t^V71?;AaenBe$Y!n0Blhq!n>Qwm>m(P-ylEMqhB zPF0NaRKU}md1s;&PN7n5=2fHQrjjpnz$jIo3OKGF3#1hBEn+c?%9(U&F zS+lh3xSO#KN99=NyWnl^Pn4nMN}hHNfuH$D$xBY+>5_Y?ywR|;XWGmR57%azk@xq0 zXbFgwg-;NA{bENT^^j*5WgUS(^6&^3donYiCdDXFcdIg`;55|q`_s)9@hq`;W*k9+ zi(Nd`z%s>#SQXEnayVUuXkQg0q*lp$^}-Ez@-&Bw{>;6@JiYLJK9DKkdcT@3>_fX4 z()7(l8EgZ^w$x1U)I+m4=C4aBh?cC_$ z;#tM}c4NA5Q1R}9N?{f6?YDD%w`u1aPo6;bus@yD{eQ5cZ61EQ@2`$~8nI-!P(4+u zXz|o?@jQ3@au^*QP{nIJJ?T%>O`g8`1EPNMqAH>uANRh%#ej19FN$}N54>V+5~mgK z(M9PNr(F?ex%WM)uvQU++&iFnb!FhVRk0pcy>F@BsHbXCdPD6vTo51!AI9_^RCZGB zOzC~Ea6>{#EAL|*m-B%W@Y#Txo|Yz|)Te>fVqkFB?o*CsKKQ0}_>|vK>Ljn!!Z*t} zlGgoMUcd@(F7u6@r#A&=%lLRsFZpm&-Jy+DTwwMTvAZ$da@qJz>B14kyN4PqFg-~X zk&)40^7yF*jukC7%zK-svgUwnkw)P7_Wd*#j6z8-u$!ac=`uNr4&%wqzaQN$K<}JB zpuiZG-?7Yo%F#A}x%Z8rR%c5AAp&EC1mkQYkjN5D2^_u*Npdz+f&^}GV%+(XvE+A^ zy8Rok#u$}!;X&?w#!$teoGd%Ye1q}i5v4a{=hhdw^@AqsbNW6phS=GeR(plV3Bd31kzt*(Rz19e9n-)@jdgR{=Tjc~@rS;CLl-y~kTDSh=` znrwm=KtoW>VUSP-Wfk_`LB&L(1eAm$ffrCTKnTovk3wDbp5or^+pNtEe7;D za_jE`_*?(Q;%^G(?z8bHheaUt0(Xb97&P(c%udbc#os-jj6a%iASDlO^Bv1n=kPhe z%?IF|7X%z6s`G+?lakK|IH~$+fRnB`4|N={>syisJ5{t}#qE~iVug}kjD;5PIQhWq zh!@*|m+upVWV}Fdc^dCNV{IXvF7sii%Q(hYO1V-#LMp^Zysg|vGK_z!By`u#e0^g?v>l^1@Qiv@z!cM?*j4G3RitJ-cqdk zDBdYbeF5>dFZYZ33;dy_907evu1fz7!gX^$55AS+)L&>o|uPd`Dk3)&mdYxWxNT zI=*w246QAmO4`H$w+*zDs~*n9c7RO*U``)t65Cbpb|9tJKnnjFPGzciK^1SR-lM8G z#sNNRu*v)SqVyaYhgI)OO8WLGIset@SPKXFPIu(*cKWx_szUK0)m&?!C-{CN4 zp$Q%>9Q+pbj3*U>kRKtpG}%vEyDNQC^z^D7FMl{n(_-W@&upnrw`4u4y4#T_STnuqk6 zui}=d&m58IGctWrrjN?>Co=t>OgA9)EybIOO)oSOdHBO#d zH$_dYbmuNB?i#&G98<-Kz5|ZFTJ%8J6tvUV@A%FP>d&r#cr#_Y4lIp}!kKiV<)YO- z>g08UiK&lg(3&6m4Q^i?TyfS*f zB8GI=_aV|<_scqLYsesFi-4r&H6m?5tv<*Us+?tsTabJIO zGZg;KP}mHTdCN*#KZaHTwE9jai@Dgz@3!3!4xc;kVUWa?sO>+H$^0{v(O_wl*zp9Z zD`Kzi>VrhLEkwG_0v^Xn-N&&ZBQD~=O8aChp~`?fpmh#gJrY~~7C^a55o6+vDqi|M z)sbq=q+EyGXzW1p(z&it^e`Z5gbAZg=#RzkSH{REERg@24 zE}HrdCnjbEe*Ivdj?U=TGefZ23hjT9+R>c0DCv=$YE8=Gqh1n*Ko^ z=JnjVtET_1x%BBXIq8Xs=yu24#Zw&j_P;$iq=xL)WYq4dV*IJ<2ajI!124*TI}?-m z>0tyz55%Kuc^X@y?$YJgEMQZ2y^m}_j3_mSb=P=a{vA?*c5PNT3I=@us8wqY^KBRz z9&l!i@NG3_bGQnqj=+eTDt~ku-E0pc_Xu*vLXy*sQC}*@SSHiUWI6?@?-p80`0U2T zex7QoF>17MibNa@K;@S<^tc4mAbK;&M@dqlrKlQA7y$hjw0r0 zzl1sFfL2fJG23e{HQS|n=%nX}ezVTJP<%jzzK%|GC! z--3u*Ou0qc*rm;I4dmvt0rNCmg+Ar+?gOtMyyiUK8sMWDJeJ4%I$@l67kDn>|9q!B zP|&&V#+artb~|zlVp=Gn1-g@~qFPfZ5sRz~US9=IR*@_emA}OO2Kj|rA`t1+y5f=U zuDGFM?OdnC7eqtxI6G-*EDeXEp%{D3C^g4eG_tC5-K_R#lvT4%ZCyhw))ZgeLO)ZI zL^DL}5^JziHWduV5^PIBFwzxCB(zv3yZc-|sIjoEL-zZWC8%|U@iTLKuq%nWAFu>N zo$=K{ZCwO4UttLb7X;%yS~$`X2?y6;jYhQ?W7CP>)6*L6N!%ETqI&$c8w`e{TBs|S zSl^?e)sF69SEy5qBMW&~C<-4H29sUz!kvsm8zbrNlFj_JASk&%wgyd3)6!cPESX(Z zC5OxYi#5nDHh0Uq*d7+psLR=Mf8o-oruDFwSs)S*!TBv~Fca-+WK}E_TTh;kv!7cR zC$-r6W-Zpy9qSBrg|!9UN$MHY4LIFNnUpd7{9L+j6p zpYw<^84=N7`+`}R<8L7#gWQhhi5^JeTPhtI>7iz`N9MhExDRwZXYjtn z?p$o@%k}`)HI-w z3Ca_F*UZzIp#m@QFFs$42MT317B&SG@1|_6S$+W+*!Vxjp8y38S?fy1mRlG*ZWyq7 zZ2Vg1s_1(6y3S}ieJ+i3CFtQ-5rT*!rh(U48=QVt({%r#vT^w z(Bg?(wHTcv=2gs|RaJ3ANwM2qpNQe`tY8QJoVC>$cCd_V;bbh5STE~wh?-BC*CRfOV7*V^towOEBa8S#f{*PK_;5sJpO ziW};$&DDLrajyP6jgPfnf9-jFt-m%mXqeYuoAu`n&HiS8AfPN-*xG7l{x@ZsRD983 Ol|Myt@%ew}f&T^xy$EUm literal 0 HcmV?d00001 diff --git a/common/minFunc_2012/minFunc/compiled/mcholC.mexa64 b/common/minFunc_2012/minFunc/compiled/mcholC.mexa64 new file mode 100755 index 0000000000000000000000000000000000000000..af237ad35d1e939accf1567e9659e3f3d1f8c100 GIT binary patch literal 12621 zcmeHNdvp}ndB4L-Alrgnxv`DGz}5$!gPnRU8$nPAwZ__k5nW+k3Z?`v%W8#0^uYTd z0tX{oR?2u-S9v%QIZ3QGO3O)3gHM_|ZkjaWDX?iG$91hH$BCPScx9YeRDYA z+)tVvq@<_k^gmslo$r3X?{UBT-N(#m?tUY(a+;#06FAla*E#DN7i$lZlDOrd?tRJ|3DR( zjvy`${!QP3>y|uv=vfI&a`B04#WGLlo`CN46XlT$CBx2 zR79ik_E@x`sWsNzv^OE5t9M7&Cw4TY(urhsb1aogq(n=iePyOKo^EPu6)o+n66v)` zhi-_+nq#USZfZ%irl69FeyF@0^6F$FmQJk6v?P*EaXp!g?NI_a2*=VfQi(M;x5Y*3 zp=27|%48xTnwz$_w1-k{vH}`fO0`|-MtG|yzMS~DBWZ~@wl!CW5MOL*g{ULi6|*{LU&J8cWhRyiN#`QK39jA-iLsG#)|x zGIN@)p_TqK)TxmCFsNF-O70tzo53Kd30{B94=!+U8YY&J-jEANLu2JN7hXDsqN4oI zK$X@Wml79FYnDsEh4WrQve_=&z2631I8Q0kU*y86Z7$_5{8|PDX1FZsuar<4poBl*3=Q!0jgC7&doQZU>t`JKd5*M~bKznysM^6)On|0VG>gu~mw zQ{1Lq{POIeFuVHG)AEBd>~1b~9V?c>K$zA!edC7AcC&NEbI=v(Qqx{>6C}B>lb7tg zf4ng$BH7#eK;UmCB$3;HX&$}oGds^~X5~;S=wA?~r1Wd1Rs0+H(DNTS0o7kn)0IQX zbEk@bh^n?l|Jc^!4gLj>BSg|)u?;Ro9Rf(tf%;-nN2Ig1n%NaAAKT~(U zOB2e>W}f*0&CFbbbXT;{v<7ld$ofgB_74~*Xd)L_^g9+6QY*Z$giJPpF|96|e~vD# zBzRZ4B}y0h9mxIk_fpq{ZrHk{`;QD*#z8Ya^sh99P3z~muS226Ix)t7Q5qN>)Yqj5 zoBDN_E*y-0=rP^89JY?h^qcYLYwU_2)>sv1Xrz7@ zuDp~vUxTVqIk5jF!}^(?U8Pw@uM<8Z5#|r*iU-AeF?12Tw!@4ct+76NQos)?6(>56 zX<_YXxbknsZxZW!^$0x@LvdySpU$Dpd!}SXu;~%t7hEjGwnL+QPHtn5ZS?$ zJq^z2$x*JH&RK6tYrXQ-^un4rRu&t)g@mxd;oK|WWP_c@HAA})uDqN%t9PE%Y@=5* z`i#meneK?~1YpYmWILn&2PlbwnQvm}$$eJ(*lcveQCAfO*_qcKiP^@%&YU*tzoUQD zzplT*AL@VDKaS-dx=bwxYT@TW8GUzdwJWrq+()F_by$?4GY?;r)1mLgU6x&oM$X?s z)@YOZ9Vpbj!J{jO`tPx8>e)@2^;9Fwt-5Z*dViox?ttOC z8U(5h{|0luCVsLedkZZfU+yciv5;|icTn09e?f2Mq2c0G0M23}soorOc+&hbbE4qkkS@_=s)j~RNH|Q0x*>&C8Q#A9#TBfH{j=pLI+v|YQ zi}pphQs&T~U58D^$)o?smuW4bjbeJ@RP#J@0DBhY7n@bB_3NN^TN9q1fh3@Pd)I-anSwTD> zT*{|!#`DKqF#ZML&zImC#>QjErF`aXdYHitw!2$CpWQ_*&{G-puAp=PHM?mLY%I5>P-E0Ld2mK1@ zTDn1^@N>JicfHWs1KQlu871A2Erg7o^-sT(&-a3gS%H&ymR^U_9s%K6&;7G~qLElBJ^sXgX-a0OS9Pl-;CtqXoP2=)Dz{QXU z-SREt@)Js4<(3{IE&fFEjXUt?8#3>8|NY|rbLsrQSM!&-T3jnN}&q$?Q{*vrMY;#;E8v zWzX?@^S94q&-)wGA(uUklh^*QfKg7>@*O+R!TUImlX4?tc$Jg*(bh(5Q-;o8-HK>rt~ zy_EaU<3i6_6SZG{mt#NAhiLhJ=+^KdV!po>;y%ImtwOwL>^Zd%FBW{C zDa5A>zJC-yQhz*tULQLZ{y;D&6?xsYgCjrP9zT=C|G4y1CSLQ5*QX^O z5Itn*Do>+6QT*QkPVM%p`zxCqMP;JAJ+JiNQ*lxrsnC8;e!i#hYb7egPZfTH>S$2m zv_A`si+6nA06GmPVEH*gs^Kys@lvrw`KeNZ-z)r-YFB<+1XhfM(>RWJ@^d|KiZi49 z=*rK0seko6{2*|Ob6YT|G#yf|aBut{k$!N#)Uz%Oh4X!kooxX=ZGwFx27IFPt~)2e z)05y2PlA7L68!Ka__u)bFr zwKxvRx{mCMcru+zXBrwpah%<)-gy7|XryMtMtPDqT)?fac8TiSqC1+~w#S;I@+~qN z%d`u;A?|8Uq!aa_rArpyEl3b;s&6N5v2b2Q>oYAad$=8AO;|Rwa=l(_M46*zxIW&B zd}Wobs6)DOu`7{m$)poCcqeVy(-LcEKtvPrwUrJbH!--r?Gq^x+n#D`h6w_YU2~dI zeyZD=QxMdr+MC0gr&;O*Br-7KQv&1>}9nrabU6}kUDJrcct<;o4l#^^@< zzKDUN&+<)t>=?2<+N@3{%d^OT*-m5I literal 0 HcmV?d00001 diff --git a/common/minFunc_2012/minFunc/compiled/mcholC.mexglx b/common/minFunc_2012/minFunc/compiled/mcholC.mexglx new file mode 100644 index 0000000000000000000000000000000000000000..949e3416bbcb570ecd4e879c81f110376956f43c GIT binary patch literal 13046 zcmeHOdvsJqny=g4U^_+<6de%MhG|_~pu;07*&Rm$xgeed8A4D8own26B)xXhu^$*b zB9A6Bw3kL^f#c)I$_}1g&m0^%D>|+tBd>ut%g!K2XA&p9n>{ejuLb!Oc{!!U$SPT>@!`bG&+1>Am};#3KbxJXPD z*NG{TujPqb#wvh{5EVl5yRH$U%cmd&gHga}o9_Vb1HTV^f*U*n`7RsAf!5$f8A0=w zzMBkx4{Sbg{18-1HZU^b5R$8`alwo19k!yEol^wg=%kj`S%mp{C9Vw~m0N!Q0R58zs6?BoGnZ|8|F11u_`x0lcjCG?{ubaM%f zp%_Y2kb7c?QuWKP^skUG;5^EE!9gD)rrE!HD zf`Mqj&euj-!)*zOB>XqeoC*GdcsP&@FG;nA+{h!YU)iqNyOp z!Wu`6D#jDT;3Q(`dx)`My~J3wjhS~F(q~+GSQaZc969snLRj0+77B&VQ^`?z1SpZe z1=Zy{pmW#CH;xouV0BB@3aim=)t6<<9<^p!W<}Pu(W=Od&CFCyvg+MdV_A0G zn>mMM9?FT_Rk`s}^Uf8P>9*F6>nuDnEnD>`G+ofpxGt+6X1e~71v!UONFOqcKNm{2?P;uF>5qO<{EnFczSJxks6aryZQ=++^*Z;ztdx_g)RzY|D=nQ!Y%rnPpy_~TE*v$$uCy@Q~j^g~qasgVtUEkuf)0S*c#%cij9Pm+Kcy zr1~<+VH1mO6Z2(@?w2;U6zj15VukTks4vUZxLN6|n2?#yscdj(Eq<++SbO;kA7x&e zjtmUB+%!<-&HqY@&ZMZhshIJyWK>DUw)6oS5bLh!$@lc-c5+qZcHM>F{u|L7t5b>X z&8*#*G55a$)54M(hyKH9?Ue0sZOp8F&PwgG>i54&er|MbLi(^_d~SSPy_3e*y_Y^{ zq|N>Aw7Jh+Exo<&h;m!D?4Mz|dX(tc0gN^Q=K8*Lq6-8t7qR+gQ}O)_BBX+CQk zeJ9s_+_V+WZ@HD)i#AO1=08P8bN6`NxlY+Fl+Y)+U8F4Y6>BFe$bR$i&)Dqh)GNs; zxr?ZGWJr&WV9DcBk5u}Y;avM%wRHQse@dIL7?%00VeYjWU$Gmuz9&CiX>X@Cu^c9P zvz(8)UH8$DbfZze^)Xa#b#nC5d1L8sUEXZS>%yx2B-e-K{Ltv+*X4rlMmw^}>qY)I zpP32{vZ0};)Q~ETyE&u&td%0aLXvXPAJlE|w z&&nmq3AyfVY!3s9v~z2z9eZk(Z{35f$dXebJML{c-i@zi8jo7_o6?8enfh*wcP7<) z+&sZ?IBs?+^fb}XWXl}zuxXcD=263Z+AzD*W-mh2iA-acWu8DBdCf{~GLF42bvxT3 zt8z)18+cvKh<>y&C%HXuyweZ&uD(t&PLd(}OH~f}vn<&~%36COZJscksiUaLp1=Dt z4|`F4S7y1_Wz}!VlzH>Vpdd?AU0lDGT%TOCkAb1O59uj+<~q58y1Tl6TGpo(@1rp&E~DbTC#{|IgJnLQb2(Vt$YS%PoP8jt zgS5=uR^yi3J%=&m7Gp;ba!uo>;=1fIfVM}z>j*y$@7f2;Siq% ztqpnX^mqg+<0FuIetVBCJ#ss_>9@d54;GRWvM(~5-G4n)Mf!bL;iNShjMQ2E+*R~! zTLIPlz=wsx-FS|r{MbLt*zn}z58T3hea-T^n)#k(v#LC8fn;QL7-hyp=U?6i%y_mL zwTFOz2)rKn05HSXZ-MFWbHHx_--ynh1&#w(V75iky_vv#3T84MS|*H#-NwW+muoZ1 z$D?c#0s2=_pk|D_(=q>2JjzSnT;Qv~n|J}gEAe7!saahxp8o zq!AB{cwoc>BOVy>z=#J%JTT&c5f6-bV8jC>9{7Lr0FIjSm6*mf?828995cm_0K_la z_<)W1&5Gr3+PKfg1Hff~&+xg2-)p8C0TXVif-wzs}h{T@( z^II^g#n&w%&f>cjU#u?$@C=O4k)WvZ! z=7k*k6{F1o;1kH;9hT&y=zpE~&0VMY=*Tia6Ce)Q2>5Hj4*~xGI1D%i;I}!kXu*QN z@JwxJY%o1_k+xLF4W26B9N%ou%I08jMwNfgjHY;?E!g6ThgV1VJ=8O+YS!%PS@(`; z{*NB=!o3`CHPh7Gn*xe+k8?vOCAStNCI!+|j16-G_8gUk_*nz6?O|4`RGCV|D--cx zWqTlaf1o*>sGQ%pxUROcnID{I%=T4RVi?=nlhMjxB2^iTCnLTF@fDO3DPEKC)HHbT zUAkHr>-loVA*Z&>_~Y~I_6UR%G)h)g7Fo)GTeXR^HnC+Btqf#L}hchjW+^>uzdn!k+w(@HxMGe zpmOsl2cv~^j4`=#@-5$)+}vDQ=@^B|T@EqI!9jGM5jM{;8s&@$4#8JK zB?oYngG24S)|fnbvIAe-4cO(NJ7)}+z?zPKY|S-LVAZDqI%z(QO$X4#*&=={(f(kw z5Wd8k)?}awI2l*CMd#XL$*`}vE#+%UMWUe@k&uv7OCZrAe4#aM&{Q}XSDe-M}UxO73@fwCN+~Qvq5448;Eg@)XO8A1wSUiE+ z75b|nm{gFq<1yK+Kr05)*BpaQ{P<)=%4kyps=-(*E`@*@3OA*i{Q(3tEEG*f+E&qu zVl_1(&S@$dX$$MT*0F8*7%+WzEM1<^gYXqF-+A&iBiC^8Jt(cJR=*IlQGj8Ta{3+< z=a%LZ6BzRG>Qe3*+`kCZFCfQvuH3^|FZVUV0P?H@S!t^RcpiZJ5#_iK61e~Aq_el7 zfO{9^xNi~o&Q`aN<%C9n-t)Ns5!e>pH!UAR8TUl$a9OX#epUK+AO^&pvU#=h`C7L|!M#F|7w^Ij%3lJ~He?If4bya($2? z46-41qK}z=0MK$=%LMseTXKi{^SCX?{tiNhAm5GKa_lS9GXUD7T=D%mkTU6h@WzOi z3!yOt7Ypp9<$eO1#q_*}mzJn+~}P`R`-!S+7o%yQio?XR$S$C%1XBn$KEK+a6mY z*A2P1wGc4aGV!aS&a>e8De5k!;tunsmB_sbxlgqau(ok?iQLI=2r;inPPfmr<=_&T zD!ghOwJ7Zo_#92&>2xLJ?jplZI!nwsuhZOVSihJA5EmwZiiwKm&Y)tSqLtSe;}op| z31gX}c{))sOVNz*DmE#akxs=RMf237VvVB5NSZN4(JI6Ep=gz1j8HU3Ma2U}^OS=% zk9ErAw4Z3Xz7@^W5!Ub!eTAg;ImIb&f?`c#y)ilEDN3EM6wR-1_8A4(DJDpo>xs!J z;GgaS9peUl&He8_HEpDPY5nWK zKT%Zw7SJIzY_K<0JO#SA{=Wmg+a7<`Z%Juh+l0{?B$*ghoP3cAXkU(}D6 z*joeoamX_{U~d~}J)hbBCqeTWLh|v%^$cj8JjndGgg#tEb2XhRqIrk<*F`k>`4W1t zguVF`$9sPu)gkbN-5w1m!y(^|H{;|{)7w6Nl;T{XuD&B5xoPfAzUFY!-yZZQTT*TJ`+^;!bxmuaL)%=8!;3%M z7Q&A_)?7%ZsYCc{?_N?U#c=9kgR<+@}5; D+41Dx literal 0 HcmV?d00001 diff --git a/common/minFunc_2012/minFunc/compiled/mcholC.mexmac b/common/minFunc_2012/minFunc/compiled/mcholC.mexmac new file mode 100644 index 0000000000000000000000000000000000000000..10d8d6531277a22c22f61c1400560a24e6f05309 GIT binary patch literal 9936 zcmeHNdu&tJ8UL;y;RZqo)LqS^ttsgi7&r;A)p>!P1Zp0m890SbXpWPZ+Tc8DJK)fJ zV<&``45oQ%rG}0|+Lef^lZ7_ARU4%|x-JjeMcNdV1hzl}1RA7JwGx_~-u=$?bz(?c z`tP(H<>dU%Ip6pDzVqGhT;JDMxBl}n5zRtng-1;$(iEfBpmO-@vDnANYWEH7)AEib6|;dFNcQ3ffa+@?s5B_UWdmN8<@y3BY>F+f?_kJA*bHD%&)+TUpY(0#Z73P%5ZePDZ}K>T*%tlN6KleTav%=64TpO?AB>ZW?1-&tiWOweKRflr1Kv_(V< zPT+9Q17HOa|0JVIJ=pQ>Hp)$L-a zB2aLEsPVwmHnBzBE~KdA{nia3jbTjQ6_YR60off>{i0jl9-lvTSvqMY{--a`mr59S zXd996rymA_L(uuxvVd@CLBLAM;^K#h6L!xHn42gqKS)<{y{);#mJdmV^R7 zLV|#`gvcCBJ{MSgnCRE|rOH`I8uMN<1UuF3rKC4)KC3qgxtfTOuZ;*aOIJwJ7dbq9 zg}$X#Iv?kfd7x8ZSUHko##&K@W3zNj*7hbi3P0D0=@*QqX3~hotd*id@nA$Kz7+xI zCyXcm-_Y4NLfs!GpOgkh!scdrH^)FETG#4Ip9IIVgOSmYzGSpbAC}Z*^wcx|XtKOY z{AgQQIQoc-%#D;1rI9d>2_Gd_*fc;Sou;tx;s}9jYw0w-iLFamcSX1PG%#->=$ks4 zh(<#O@EXEVO&P7XJkVrmQh1MmcQ50;GsY_oP}p=GxlpKL@i@uWC@%b5r^LiIdg{)i zf=7uujwOE(ot~liFPyMXP!!I-k6GKbNEx5{}hrqRw5(CxxNBfVq|S2~XbcMa)Ws7VIzUEM4g;y-Cy# zM@HMsC8Hs8SmIsdu@AYz(UdLBezE9+>^8`LAF?4U;%N;>TM*B`E^D&95R0cY4e^xy z-!c}@(J~5)Nkst`N0uju=>glI-el_nSMmACVysWxem-IwPRq4*q~(g0A4SBqsEbb< z1uN?MGe$Avj1fL$vEwVQ*%T0(76inm;mGE5$eZNhh@iR^5enz%#FkqTap4?hV=$)- z`F1L&SJ*G-$MS}2w|i}G=}k{x(VKF1AjeyVBf)7SB)%~m5#PEM!QSA5O}8SZLgJD8 z1I(|uOj!<|>wgW$hGMSx&v9-nR2u*{L%^lEt3w!8|tFTk36I!mqPGQWB;+yzF zf}JMFcS8OO>W!;0J7=$s*;)QKNwS0}OSqwK4?fl%unl1UV_jl_Cem4mJi)q0lS|UY zx$Io9mf0D~nSx`6Ju9k<0@!07F=LUgbdlbai7Que`(Q-eek)>WrQ+Oc#FeTLqtEoF z2JnlI-P8%|`gG#DO@Z#SM4Qj+O&vJ@2!-W3aX;d{uv{k{F3S(@m(LFaEH`r263@E zcL~QDxGWr;(nFu9&~K%!@Q(zSeC{m~7u@a@o$&cn`0RuY#702@gaz6Nn-e}kA=eDK^m!56S+rWnYarV^OD8;z_Ne@OG3H8|>D_)#I^92C>gcyfJ^c>plm13&p#ODg zsQ-*K(tlaHgY!Di_&UMY3BE4yb%C!7d|lw{0$&&Sy1>^3zHab!gRdKW-Qep6UpM$d z;OPb@yY}e@uk1gsTob{6c74O<_U`9i7F))zapY^JyXOP?L^sjMlVQW)l6xVq0%1G;GXX zZm4$n-8+@rQ5c&s{HuX9Lgi!k1QZyy3!hycAnDW-Jz_fQ(6$QKIf~H zA5pO92V8TA;)QEZ!0#sDv(4l3L*rPLlz?X@U_%04oq#>*9H)*ww@~dX<~XcJTZz}e zEx=k}7w|8DlYl+I`+@O}S6d5w0GK`dz&lLUFH?aK2mDw49^-h*s|^7E0eC8~0DJ-% z?@6^-hUy*Q6yRpyKc=#`#(4G!{1L{dV|*X*$H1w;c*3gsJMawPFmO*g$339nYZyl? zwJ&4*I`D(Qt-wRT-vd4bd;|C);5Oi!il4BpR%0B;L;EJM26#5`F<>q59N^ypCjrj| zJ`OykG8S`S(mWEw@P{5!a2aIyR76erCZQ-co-%}ipDDNp*b&3i(}A}u7_p*V3PucR zkAls>;rNFpz=s}*@t**GJ7N4p0)9UMf1H3Xj$`#KCFUCBn{#}eTNksNYQXqL#Rukd zRty8-k=Tr}IB?Iz@O0+SXBd`efWI8a+_VJzKmulz_+y33Ou(iDylNa%0R#(C^H5iy z=A+_G8(C1zsP~)4w-4-l3j4OY>I%E3re;T7Ey)A&w=(z~p%R~p*j-Lft&@ExQRT9i z*Hu?~oEUOcvLQ#guf|j7capuz8CVS#zq_Uyo%$+Afb7t!k|z~VRO9iDjjpNokKv6q zyL=SqGCEz9z$SMY9|H@yVwx~rVkKCB6T%X1}IvY~Nf4M}y;;XDYdf)b z!A8Y6au;Pf9c&~b+zN!Hh86@ZRHAALg{zRfm7**~d3cc|WziCyhf-A}NxHsO z+rE{H=2xNltR#5&l;B1x5yQHkPIjdQgJz+>W0mgMr4$5+c-if`wJh}(vbx^c-P+im z(mT_c#_*~BmalNN*9arx0|gFBg}=G@676lBa{(@luU7c05LNY_rZ*>3 zyOJH8pO0U&@Ha%tAdh!B)Tdl;rtP+twjFv)TT?rR7seM{=~^}m2gHAIe930LsUzN+ zbPX2z>k|HY1!%-mj(BvIB;D-wsiizw3te50s$129k&x&IMZ18fJR!fHZ(ZNfo!TLE z(VWSj=S~XE+0I$b^>n=rIpU_uu5`yK{(`V4^868xbk7sz2gT@-uBYN(U+K585L=>8z zjA*Vxd}>qe@g2zyeOEl)d~vF!v9+rj3DMFBHBMjJE;--u zOWVcM^CmpSPNwJ=OBZK4J19OXo#`Eki!Q3Zw9xPrJHA?hd3HM!3B9vATxfO*E^>f& z`+U1C-JR)VYxULh3|)Bq+DE`gz(>GGz(>GGz(>GGz(>GGz(>GGz(?T!5&?Bfoweee z1~}EvDEcrn-kBV2QdUJJsY$6iQs1O(TXq3XHAoJ@;TdIQ zqNqvPI^3kxA8t~vn}Be5l}-KJg_GiKHLnDK(Tb&GM7?Rjqk(J?b(e}Drr=lPg%NXh;N z-5Djg=mc*ovKt-8iJ5mu%xZYT9)U1wRxvA+3r`*ZBORn;pQzEALpfv*K*Wskm{DRs zfNmI<@Wz}PGqPRq2F)kNwI}J^$<{rC^`-VL+~NS8xxkD&W`@>lUzXPiihh=HH}2JUp99`(SP>&se zBhUv2Az4~ZlBYaLow08_4C)N)W;FIapF7Tsay`dI?fX11qf}&@$>H5M$T+y;QNRgT zb|z+&HMtpO6Ut5K2f3BK0WOexTtKevE4WY6x<02`4UQIe*tj-G5sYg?&~S`~hmD5) zG}l^in(-OS#}_p|)s)BIVTQE}L*v;fYO*|D*;MW2pEIfj-cuv5_e8I@5 z(oYB6z#A#|y3sITYJ(B&VMBXN={Kv_ePaK`8w z%3h%jflQjTXM$0Ea-G3IE4jy9wj?>g7+r)$hU zh$Uer-(uk@XT*_MUrtsdFK0faMSKz$O3>SnQfJR0x`W)H-l#UHH>vvMC~iosZ(P=se4~HkivzCmTAjduSds-rF#;cs+ zXyBi?Pr^uzyijMzMOZcL;L75dQCX7tBReQG53*;y9_=0DneBwP@mt!l(Y#vZb0yo24&?=DpL(p%`f2FV^MMxf?iB4UZ2)OGg~c|sdyP?qkG;${KdxG`Bul?pQ-=t4Y}tnmGa=P@=0us#aI>nVM2 zoZv2KN51HctIn_*`6RP%`c`aoJfKQ#yz2m#?h|V|25_9!GWM&px`fxIyk5fVGG3SS zI*3|Ouh*&g$G7(U^uFC!2tMbwA6&)yt{9>RURuHLkq6PE-y{9B6Z#I7PG9%2<1}|; z7=s5OoPGf@3^f11m5|}|(_Hd>S8^9g9^sNJT}g^0_i+i{u#lh4B*}6Ky?}vaElE1L z1Ok$*BuShkBp{+^>-i`ckS;_aqLz3;q#|7|%86C3lz4)p3~zid^t6{rf!<|O@e#D4 zxu;h)|EonHU1 z7^xEav|ya(;&3Tt1#`M{C*I+S))RZ}EY^(ZZP8ANHYeI&i}rre{#dlGm0$Y^ z_z3t2_z3t2_z3t2_z3t2_z3t2_z3t2_z3(jAW-qO&^C%=c`g0#+WpgdBmQTOKdo=t z9UPE$hbn6I)~pR00R7+Z!h5gwSYg#`TS=b{Qur%kkNM{#!avfR3@F2 zbbMgZicYL8o$u#6o7->e%rooSQ=L+XzRL)?-(_H^Fq!7^oO-9vaE$QWM(bs@=+4h) RSBOk@J0geYUnqST@?XQ8-+ll9 literal 0 HcmV?d00001 diff --git a/common/minFunc_2012/minFunc/compiled/mcholC.mexw32 b/common/minFunc_2012/minFunc/compiled/mcholC.mexw32 new file mode 100644 index 0000000000000000000000000000000000000000..7507a40eed6aa86fe7e378336dd817ae1a40ee93 GIT binary patch literal 8192 zcmeHM4{#LK8GoB>@Q{-n8f)#GdN^UqSk2@Xr2M&S4`GpMQKLzKf=d#Ti@A7riFdmY zH0xlmIP5%5sB~J&h!veu+OfhY*tDfI654>27CRKla0|3XyEHd8!qpsblk4~1?#pdJ z+Zo4pY$rSO-uvF~egD7r-fk}UE!%{O5kfq)d>)}L7|9c${lD(k!t%u1elZd48Gm$g z7q{r>qiwQhWzZ8-qMlIHGyj2VPo%9WSaIET*OptVt3-qr zapTa}`|H2Pter&@Cr{wG5;PGIr-?3{1fvUDC7rt{U4#&4E>r(0*$zDk1|AX@6(Vv( zfR5Hsk5CPP0|>nidOr54LTJ;+E`IWb3OU#zgLwOO(1BlcwESjTJqXoT#F`r92Fq6f zsL$6k4s!%JP+_v6MFgsZoRYDObcH8?PZxA51Rz&c>u}3BIC@R_yNUlz+7vRtiU%)s;C5$XI4^tV$mQko7n0LgVdrb0)PWZ| z6P>T|NO?P7^O|Get)`77L@II6S+`<&Pp$SYD6efrt*B)SsLm@6qaMz@1dCc)>d65^ zBJChNSaM>~F%+OaDiKZ0V$r3ExulrY=H+nQiJx!~4^}3fs}h|Xc_hzXT$_Bz zODjNne78Kc!wX#1#=3kypE$_F>f&nSRyuKj80I(Pc}@^;02i50-#GBHw|n4bqNg=q zZCn^8a?aKXMlm=E8knDE90}FQDJ913BtF%Nfcz>5d-XtrQK+uEuIM zKiyA^7y)2N9CReaERSn*N%svSzPyNQv%UO{go`}%Ad$-UkbKaYWENQ$T`9?N=Iq-3 zk|&A=N_5dNPz;zv_WpxaFx923E@u1Arlc(4n#;HdYv0+!CLcM1suL@YQ~Z7qz^Vf) zPAs{w`v;o)XIOQ?5@^|ML}80=*4+E4FVw_{sLl`DMRop+?j9Nu)!oBPCXsg0Qsqcw z0}H9)TQj2j|mh?+sH0-~0|9F7)u8P~eFOFDl-cb}#jZS3M8s7{DE4y$Fe)LiH8 zk2doFXoQ%qHiMNiEIDv3ES=CUa&Lhtpt!en{<7{qLZtv;&RuEk$IRMO>D>?wO-30j z?{BW5r&tsBB~%xuOwdc_!#2{T^#eD7v-#D$vCHJprM0@a78VF>?Tt=z3H#`U273!! z(bP?}{|+4AaY=R4lGY-llJY@;oYppjd@>#LnF9tD+tMpOM{Z(Xwf(#sSKk*#F(8L%~M5LKDRQa@8UqNZ0b@zK)kW<ZfmiQF1qw>9q(x@hX) zxo58|d%95e?bO^z@F!C=3+xVtn-umn>|(kJZDOrVtl$d^6di(w1k*}vz&#!HA~Gr`Fle4 za!`PQcACa4Tn0JoGANu}X285chyl>5eNejVh?eCJC7lOKcS#wYCx87Q?lcw+ogd`R zq@0&dpVQqG9c{L&klW7nLsIxFoVtF%>= z1e?i=kgcL6J){Izv|WM&0RY3+RJuD8iMG+A~8-v_1p zfEyyO(QQ>k-29r+*O5soNqVfpCinxX3H{Kq<>KYS9ag$w8C-q`3eCwTe| z#o46ZA)jn_Le1gRIj{5^lTDJoqEUW|5kRg@o5-HXPf6S8K^vX1(ODavv(ad7L66f$ zyKHosjrQ2+avMF>M&D_pD{b_28$HuT`)qVzgeI|#J!2X8tYo0(2KKAiXpg4EN6)XE ztwu2m8vOO7zuooJ_rLcTJ?vNVjm@jN5;Cgcymibv?&}!n~Rzo!)10vF zis9X&c2FZXBa|+RHH2XCLQy;@uT%iJR1|+KCc}4Iaa@iywy!}b99kLasAvj@$?xf| zk86(xW6@xEnqV@P0Vivb_^QU3Y~oV^|96d5b!_}JGT^b4(9s1G^7}pPRpf&<_WXZm z0P0QtXXCBhP#N|9$h*h;bMJ4xN4%%JfA)Upyf7&o(|5x67vKB7OTKab>;1R-@9@v?-{ZgEzs%q4Cwm8e9XIz-z#OWC8eysMuuv~F z2`$3o!jr-`g=dBD3R{Keg%^b#!cT-(gjWS3H1_&qI>kjt)tnm zv-jS**`(~fwV|~w9A|3k zsY6FEJo>$V-I#tOM<;J=LpyS`T!)IXcVj^Be|2L-$A5EUlMbC5m+SEBI-J+$Yo$4^ z<+#3#{uh-X@2vFV3n2icNX~@=~0bwnD zSMNr{7?cK|Z5x>SI~Y?yeW;wVdO)_6u~%qnG={02e!3XTP1pY`u#7Rg&iLmKDh>O) z!{}-niy|K{?H&8bd>r7aG}zn}Zer}kEc9SbX2*LA-i)UP=22>p(KZZYp}l;K8dshg z#^R-22H`NF>+up@F*U9{H8fu^6!Zb5%Va4k#M?LUW;``Ioxi;kgu0Bn&`I-^W)PuT z!`RHLF*jOYB!O6iGk?Cj!M(u!0FSNm@YQ#P+bk}qr9y{dJ`9M$Us z7N)aqDc>%~m~1)dwe+G@kj+IF$Z8pn)iSSTw`>^#Z59_d0h`TZCfRaMw(JH@k}VhI z=owSw7h)opushJ_%{8F5eS!TE1twI3}&9 zopRAHtMe}L#Cv9}xJ^-W6I|?|D%a4mCKPr2_jP`Z{WCe5aF7gyJ1Ixw4w8cVBuR!R zIk=ExHA(ah6JAadzGfJ>Vc;(4wA7Q&F94^vmt^sYTO8%$tVg=7=6MwSDHI#JHBAG2z7;U}dLCpH^Eiv67fq2- zpv=#U!7UAq=HgrS`oqoA2UeAZZ3w2(2?u|l0zV9R93`2;v^Rrygg^6rz_8Fc6t${Ej$XEMp*R;}v?u1=vg)xqHMbK6+bN5~vSqJAI!nB5gF#^d#aZkX zJ9x=%SzU6;E8gUZAv4#Y>FGbDIi-oLPFP_(1jZ?rZF0^bv9ts=H7;`KS|m@_0t%nYIFOYD|^LmVS`-b3zU z#9w%0<6(i=f;C$vF0tPGWGxfsk7o>)TkQg@_*fA;T56@@-WIo1d`_3`Pp*-zIHjmw zJBj$J`RUYHoh?%FzO&Neffi|MB4kA?0qdI@f80h!7&mnChP3`q;152s)hiCe<+aMt z15_fbk;@RvZub_7U0(5fxYJ%)XwpOvw&>axzLgfSe9juu)ZxczwP9IxYlpyUNtCdaj)gAw2gRO+oknu|Ey`4!L}Bu3(5J7v$fK~K@$9+ zA^1VJ_#JSI_QOX!@ik$bG7U-!#Lr3Z#*-V6@|~3)9;P9$8$(`)6--T_4r54=F~pP0 zLFEyDrZ}gFkI8ixLng_d6VlUh7-5U_zzJDh4VSm*49<%zw$rd)@q4*s2TZkA9O2Om zR-7_veMCRx?M{cRdQGx=zX|fdfw0zbHGG)y#2;_mYcb%>}6bS-_B&!Q;5Ug z;B5q%BY>?8IK0gMJaM__&XVST%TWDisuTv3#X*R66F*{5zX3AZ@z6$n%L zIhj-$qsgX6X8p`)^&!oxa&rA%NZ<-##srfx@t>JMoWOVr<=L5J8N>h5`X-FGAktag zFhX~<*UByDx#a_H*~cwMyskcJ{adgP(oDt2Fc(EMI~}VV5zUaO=*|Pek0WmPU5AIi zLV~bMH(BW}tg-6Xq$qe^#mPN_+}uuh=MUZDGLLO?K83@LwTztP>`bY^x^t$mes8A8 z{g;_BE8g7i)r!4taYPn-l97)!4K2))LiC*{U%1LyHiG41Uvf?+86kd{BCEWL?CaN( ztpJ%@ZPdh{)mf(4i+cEhl}hW$^XkjcPB@%Y0C`3~U@81o0FJ3Yb&QNqvd=5FgI!*- zPm9ADnn(WEJ|Nf;Y&U3_u@g`F~vf_uT1 zI%e2gGJqpV0fScO(#y!xaDur7d1)?UX|JaQ!6!~0R%gpjOI`pIND<-BF#S72HhqYq z&M|pidzF~tgady|QV;3Jc@>4H9qMd4%y4drIqLX*SeRsXn}TF-DOacT6GxT2q@Pme zZjN}r2F&RGoeL{6L32es`ia#eh81xkonMEYWwTYK0|5bvmUS9(7z3q;!DjrRyRiKGMcX0g&bZ>Wj>fDn&9#AP49y z*)j~zR*T#^nSn;~4c#odNwz>#DPoFIx_%ZxbOWTBbXVjB1$n_zisIcVT^UM%Jv%(E zb5G9E)h3g@AFM5T1JkRGJ>07 z>$)hZ6QE~e&;dh|#eTUYK^|RNzYR+mIXQBhZDdX$;3eCzbt_4oYw#~)*6Qg(roX<* z<6Y|*u`e0k{fbBZJc9hcL@@Sx1jyALq#miK6Ja_Po9SwC^)|rCbve~jemWp;KnZai zo9%k&U16j6w@vrA-2546$;yZMH!HuTwb_4H<_2mUN- zSQ9ui>(?e}(N1i|3r`Jef|oHdhETj*Z6B zbt)eR23?th$*FlQ#|%Y04n^n&2%D2#zgu00?R`WyFL8K|YSqIPx17UfdwI!0+7m9- zf5NJC3`{&42fSu!{Rkr2J?_@}z2ZaiTE(@lPd&JF*8CR2WYYS%2m(VkT~Ww*pTHdr zw-U|z3fFcZg!u&>dvcjBqlNaNeD3NEe`QM&t7(O&M$>Y`T>9q;Nl->k4Q09i9iuVXAllIj z=CHH{v$Es^HpWi>7+qZC?M0VX_yNW$xaMKSb^oBxp0`Qwcmo|?aY@0=5ZBS@8C;g$ z4?f6etY+a4wZd1>%V*TG@E^GFVw#tzO)`6qD)J|#Y>R$sj#O}A z+Y0gUo_N@<4T_`mJ)q}A_-}AuH4???mo+Vome#2lJknKw%@|!#%@p}&PdqXWeH91j zTz`b!jWmWYk@uM}7YlhY`KWY``~BPzAROF6Wf?K1ABgsIUA-!m|$fH4;QmOcRSh&&QDT%5cQ^5GC}{QOZ38B||! z?;ibop)L-tPI!$LRjW4x(1$JynT1(H7K}W{^=9ylAr}&f@trX}?m1)JhQDTW(d)QJD@8v#Tg@9TSUps?-I*H_N9R;>qX3QNcJsUsQyGQZ4tMGA65eF1Y!Y#d8 zGp1I0bzxSWs<5%OFs56?++4+VAebZEcFQ)!H5ioK8WN}kqS5Z)+q8m6kn&Rx$w3kL zx|tU|hU7%-OfH8|u+JiINkSHbgn5d@1e7FdoQdSb^MlcorXi%zhQzeVbg$PYKcknM z^zv7F`3g#!pA0IWN2?p55f;hP3a6sOINKvKA8W`?*e>D+5*}@KmNEWX`A^=w{Rbsz zlT+g72ch71&^@#Q_jPBPR`?nWN!Ni^_;09s#8J88gjTpYqyB3m0d-}%b0ZLx9#<+A zC-`RU3=c4qj`GbFspS;cn~LN)s>CJ)u$p}SAZqQzmVS_Qb??z8AIC)6RR#n>eo!iNq<==*IS-fWy$gKDJ1Fy3dt=I0E* zG9C+6>|9%k*cO+bO$huM^F_}+bKAZf}2wCXDMhXa6tZ{4VxOFRaA^$DG z`_eSVZk=RjlMXR9F`UhEn(fTifFEEp`c0CJN#;?**(Yy`+mL~pq%00e{xOt-Kv4dB?Seug4r& zW|mcME;CKYWfS^Fu?gWEHlZP#O_*+H6UxU8-+Z!gFhx^8uF%d3(O=k{%?i<9IIkdn z2*39tIIzx1jLmg2Hu!Y|wxRt1+UehUbzYlqV$yW*Tt2F+Ahj-yr|d`rt$#9OkOcLBHJtpP1TQ>xz>W7o$FR~Yk9`6Bc@0C}}$;C=;M z4RFO7xJt5r;J)a&aZU`$gv=e?_Xhob#y&B1bkAyP*ME%{z~1K%d+1*W?m&H3(BBmH z&xv&SgKa){Fxa%R@y;?x_vsW}TZcaoYU>Pybo71$JKxvT-V|iJjaGe-wYM$n=q_z; zZ)Z2#LXQQ*NLS}Cm^E)knU1^D)aVcR@b4szU7c-#us_J=m>WBEh8le!e%;jA7HA8j zql4|cQg8ILP1Y8D2bJj=ni~TVta7`lvDtssjBm5XrjF1fjsEU7tiFgfHqL4cb@_d5 zEp5KW)}}ynJC}Qt7NC47%l=XlbyX*|;%T${ib?iIl#uR3%b$+|w-^KPaPg|&IS-ZcX74mIX+Li^I zf-6Y}AvWJKH{uVjtoH|7I)fce0iStV9*~38xzUR zjBU!m)(1O%{!j?$`80O^%23$f(Ey#fL)f36YHW7xyg9W>MY*nuYYt8Ne{KE_XNt~u z`t3%072Y*?m*W3!HUKi#;cM+|pH5b?Av;^|6t?D0E^r{GqK@2% zy^Hq}>PbL1;=Z*U_aMNPi0Q?s)Bn|N!P|`b0AM%bd>D0te~&kg`Ub!Oyw4I1ApIX1 z)z<(f@csz(lYkcF7esFbd=>8i>XdVw!|Onu@(s!%s7|mvRi|8`DOCs6jpuVDVAXLZ zhHEChYtkfXJmq-^;Ge-~gMg!N;ZnRHddC>Rs+vL}f5)=+m5%O?_CTn*C=v|Z74o(E zJDNhbceMF}ouST_@a?|Nj=P#d9i=O#6*&;K+FJaf@B)93a)aukX{BXFcjx6e997|9 zBovkrMgPj#DvWU$8S?ug!M5;9eI5s%s-XX|2v+eo*9Y5HAe=qo4~=wYcGq@eD8(Vg zzrx?{Xs5Ees3{}|R&+k<4;DEhZEhdM$?Bq(ruLA(=ZcBf>0Wu3J2Wef;o`wq%Os*4_P3sjUBIVcsWF7l?e zP`Y3ia;jPL8j2h%(z2gXT2@*do>;6DKw(mBom literal 0 HcmV?d00001 diff --git a/common/minFunc_2012/minFunc/conjGrad.m b/common/minFunc_2012/minFunc/conjGrad.m new file mode 100644 index 0000000..56a147e --- /dev/null +++ b/common/minFunc_2012/minFunc/conjGrad.m @@ -0,0 +1,86 @@ +function [x,k,res,negCurv] = cg(A,b,optTol,maxIter,verbose,precFunc,precArgs,matrixVectFunc,matrixVectArgs) +% [x,k,res,negCurv] = +% cg(A,b,optTol,maxIter,verbose,precFunc,precArgs,matrixVectFunc,matrixVect +% Args) +% Linear Conjugate Gradient, where optionally we use +% - preconditioner on vector v with precFunc(v,precArgs{:}) +% - matrix multipled by vector with matrixVectFunc(v,matrixVectArgs{:}) + +if nargin <= 4 + verbose = 0; +end + +x = zeros(size(b)); +r = -b; + +% Apply preconditioner (if supplied) +if nargin >= 7 && ~isempty(precFunc) + y = precFunc(r,precArgs{:}); +else + y = r; +end + +ry = r'*y; +p = -y; +k = 0; + +res = norm(r); +done = 0; +negCurv = []; +while res > optTol & k < maxIter & ~done + % Compute Matrix-vector product + if nargin >= 9 + Ap = matrixVectFunc(p,matrixVectArgs{:}); + else + Ap = A*p; + end + pAp = p'*Ap; + + % Check for negative Curvature + if pAp <= 1e-16 + if verbose + fprintf('Negative Curvature Detected!\n'); + end + + if nargout == 4 + if pAp < 0 + negCurv = p; + return + end + end + + if k == 0 + if verbose + fprintf('First-Iter, Proceeding...\n'); + end + done = 1; + else + if verbose + fprintf('Stopping\n'); + end + break; + end + end + + % Conjugate Gradient + alpha = ry/(pAp); + x = x + alpha*p; + r = r + alpha*Ap; + + % If supplied, apply preconditioner + if nargin >= 7 && ~isempty(precFunc) + y = precFunc(r,precArgs{:}); + else + y = r; + end + + ry_new = r'*y; + beta = ry_new/ry; + p = -y + beta*p; + k = k + 1; + + % Update variables + ry = ry_new; + res = norm(r); +end +end diff --git a/common/minFunc_2012/minFunc/dampedUpdate.m b/common/minFunc_2012/minFunc/dampedUpdate.m new file mode 100644 index 0000000..9c3998f --- /dev/null +++ b/common/minFunc_2012/minFunc/dampedUpdate.m @@ -0,0 +1,43 @@ +function [old_dirs,old_stps,Hdiag,Bcompact] = lbfgsUpdate(y,s,corrections,debug,old_dirs,old_stps,Hdiag) + +%B0 = eye(length(y))/Hdiag; +S = old_dirs(:,2:end); +Y = old_stps(:,2:end); +k = size(Y,2); +L = zeros(k); +for j = 1:k + for i = j+1:k + L(i,j) = S(:,i)'*Y(:,j); + end +end +D = diag(diag(S'*Y)); +N = [S/Hdiag Y]; +M = [S'*S/Hdiag L;L' -D]; + +ys = y'*s; +Bs = s/Hdiag - N*(M\(N'*s)); % Product B*s +sBs = s'*Bs; + +eta = .02; +if ys < eta*sBs + if debug + fprintf('Damped Update\n'); + end + theta = min(max(0,((1-eta)*sBs)/(sBs - ys)),1); + y = theta*y + (1-theta)*Bs; +end + + +numCorrections = size(old_dirs,2); +if numCorrections < corrections + % Full Update + old_dirs(:,numCorrections+1) = s; + old_stps(:,numCorrections+1) = y; +else + % Limited-Memory Update + old_dirs = [old_dirs(:,2:corrections) s]; + old_stps = [old_stps(:,2:corrections) y]; +end + +% Update scale of initial Hessian approximation +Hdiag = (y'*s)/(y'*y); \ No newline at end of file diff --git a/common/minFunc_2012/minFunc/isLegal.m b/common/minFunc_2012/minFunc/isLegal.m new file mode 100644 index 0000000..f808c97 --- /dev/null +++ b/common/minFunc_2012/minFunc/isLegal.m @@ -0,0 +1,2 @@ +function [legal] = isLegal(v) +legal = sum(any(imag(v(:))))==0 & sum(isnan(v(:)))==0 & sum(isinf(v(:)))==0; \ No newline at end of file diff --git a/common/minFunc_2012/minFunc/lbfgs.m b/common/minFunc_2012/minFunc/lbfgs.m new file mode 100644 index 0000000..458b955 --- /dev/null +++ b/common/minFunc_2012/minFunc/lbfgs.m @@ -0,0 +1,40 @@ +function [d] = lbfgs(g,s,y,Hdiag) +% BFGS Search Direction +% +% This function returns the (L-BFGS) approximate inverse Hessian, +% multiplied by the gradient +% +% If you pass in all previous directions/sizes, it will be the same as full BFGS +% If you truncate to the k most recent directions/sizes, it will be L-BFGS +% +% s - previous search directions (p by k) +% y - previous step sizes (p by k) +% g - gradient (p by 1) +% Hdiag - value of initial Hessian diagonal elements (scalar) + +[p,k] = size(s); + +for i = 1:k + ro(i,1) = 1/(y(:,i)'*s(:,i)); +end + +q = zeros(p,k+1); +r = zeros(p,k+1); +al =zeros(k,1); +be =zeros(k,1); + +q(:,k+1) = g; + +for i = k:-1:1 + al(i) = ro(i)*s(:,i)'*q(:,i+1); + q(:,i) = q(:,i+1)-al(i)*y(:,i); +end + +% Multiply by Initial Hessian +r(:,1) = Hdiag*q(:,1); + +for i = 1:k + be(i) = ro(i)*y(:,i)'*r(:,i); + r(:,i+1) = r(:,i) + s(:,i)*(al(i)-be(i)); +end +d=r(:,k+1); \ No newline at end of file diff --git a/common/minFunc_2012/minFunc/lbfgsAdd.m b/common/minFunc_2012/minFunc/lbfgsAdd.m new file mode 100644 index 0000000..41bdb95 --- /dev/null +++ b/common/minFunc_2012/minFunc/lbfgsAdd.m @@ -0,0 +1,32 @@ +function [S,Y,YS,lbfgs_start,lbfgs_end,Hdiag,skipped] = lbfgsAdd(y,s,S,Y,YS,lbfgs_start,lbfgs_end,Hdiag,useMex) +ys = y'*s; +skipped = 0; +corrections = size(S,2); +if ys > 1e-10 + if lbfgs_end < corrections + lbfgs_end = lbfgs_end+1; + if lbfgs_start ~= 1 + if lbfgs_start == corrections + lbfgs_start = 1; + else + lbfgs_start = lbfgs_start+1; + end + end + else + lbfgs_start = min(2,corrections); + lbfgs_end = 1; + end + + if useMex + lbfgsAddC(y,s,Y,S,ys,int32(lbfgs_end)); + else + S(:,lbfgs_end) = s; + Y(:,lbfgs_end) = y; + end + YS(lbfgs_end) = ys; + + % Update scale of initial Hessian approximation + Hdiag = ys/(y'*y); +else + skipped = 1; +end \ No newline at end of file diff --git a/common/minFunc_2012/minFunc/lbfgsProd.m b/common/minFunc_2012/minFunc/lbfgsProd.m new file mode 100644 index 0000000..9280769 --- /dev/null +++ b/common/minFunc_2012/minFunc/lbfgsProd.m @@ -0,0 +1,32 @@ +function [d] = lbfgsProd(g,S,Y,YS,lbfgs_start,lbfgs_end,Hdiag) +% BFGS Search Direction +% +% This function returns the (L-BFGS) approximate inverse Hessian, +% multiplied by the negative gradient + +% Set up indexing +[nVars,maxCorrections] = size(S); +if lbfgs_start == 1 + ind = 1:lbfgs_end; + nCor = lbfgs_end-lbfgs_start+1; +else + ind = [lbfgs_start:maxCorrections 1:lbfgs_end]; + nCor = maxCorrections; +end +al = zeros(nCor,1); +be = zeros(nCor,1); + +d = -g; +for j = 1:length(ind) + i = ind(end-j+1); + al(i) = (S(:,i)'*d)/YS(i); + d = d-al(i)*Y(:,i); +end + +% Multiply by Initial Hessian +d = Hdiag*d; + +for i = ind + be(i) = (Y(:,i)'*d)/YS(i); + d = d + S(:,i)*(al(i)-be(i)); +end diff --git a/common/minFunc_2012/minFunc/lbfgsUpdate.m b/common/minFunc_2012/minFunc/lbfgsUpdate.m new file mode 100644 index 0000000..5e48228 --- /dev/null +++ b/common/minFunc_2012/minFunc/lbfgsUpdate.m @@ -0,0 +1,21 @@ +function [old_dirs,old_stps,Hdiag] = lbfgsUpdate(y,s,corrections,debug,old_dirs,old_stps,Hdiag) +ys = y'*s; +if ys > 1e-10 + numCorrections = size(old_dirs,2); + if numCorrections < corrections + % Full Update + old_dirs(:,numCorrections+1) = s; + old_stps(:,numCorrections+1) = y; + else + % Limited-Memory Update + old_dirs = [old_dirs(:,2:corrections) s]; + old_stps = [old_stps(:,2:corrections) y]; + end + + % Update scale of initial Hessian approximation + Hdiag = ys/(y'*y); +else + if debug + fprintf('Skipping Update\n'); + end +end \ No newline at end of file diff --git a/common/minFunc_2012/minFunc/mchol.m b/common/minFunc_2012/minFunc/mchol.m new file mode 100644 index 0000000..ad3dbde --- /dev/null +++ b/common/minFunc_2012/minFunc/mchol.m @@ -0,0 +1,60 @@ +function [l,d,perm] = mchol(A,mu) +% [l,d,perm] = mchol(A,mu) +% Compute the Gill-Murray modified LDL factorization of A, + +if nargin < 2 + mu = 1e-12; +end + +n = size(A,1); +l = eye(n); +d = zeros(n,1); +perm = 1:n; + +for i = 1:n + c(i,i) = A(i,i); +end + +% Compute modification parameters +gamma = max(abs(diag(A))); +xi = max(max(abs(setdiag(A,0)))); +delta = mu*max(gamma+xi,1); +if n > 1 + beta = sqrt(max([gamma xi/sqrt(n^2-1) mu])); +else + beta = sqrt(max([gamma mu])); +end + +for j = 1:n + + % Find q that results in Best Permutation with j + [maxVal maxPos] = max(abs(diag(c(j:end,j:end)))); + q = maxPos+j-1; + + % Permute d,c,l,a + d([j q]) = d([q j]); + perm([j q]) = perm([q j]); + c([j q],:) = c([q j],:); + c(:,[j q]) = c(:,[q j]); + l([j q],:) = l([q j],:); + l(:,[j q]) = l(:,[q j]); + A([j q],:) = A([q j],:); + A(:,[j q]) = A(:,[q j]); + + for s = 1:j-1 + l(j,s) = c(j,s)/d(s); + end + for i = j+1:n + c(i,j) = A(i,j) - sum(l(j,1:j-1).*c(i,1:j-1)); + end + theta = 0; + if j < n + theta = max(abs(c(j+1:n,j))); + end + d(j) = max([abs(c(j,j)) (theta/beta)^2 delta]); + if j < n + for i = j+1:n + c(i,i) = c(i,i) - (c(i,j)^2)/d(j); + end + end +end \ No newline at end of file diff --git a/common/minFunc_2012/minFunc/mcholinc.m b/common/minFunc_2012/minFunc/mcholinc.m new file mode 100644 index 0000000..fbe2550 --- /dev/null +++ b/common/minFunc_2012/minFunc/mcholinc.m @@ -0,0 +1,25 @@ +function [R,tau] = mcholinc(H,verbose) +% Computes Cholesky of H+tau*I, for suitably large tau that matrix is pd + +p = size(H,1); + +beta = norm(H,'fro'); +if min(diag(H)) > 1e-12 + tau = 0; +else + if verbose + fprintf('Small Value on Diagonal, Adjusting Hessian\n'); + end + tau = max(beta/2,1e-12); +end +while 1 + [R,posDef] = chol(H+tau*eye(p)); + if posDef == 0 + break; + else + if verbose + fprintf('Cholesky Failed, Adjusting Hessian\n'); + end + tau = max(2*tau,beta/2); + end +end diff --git a/common/minFunc_2012/minFunc/mex/lbfgsAddC.c b/common/minFunc_2012/minFunc/mex/lbfgsAddC.c new file mode 100644 index 0000000..aa31bc2 --- /dev/null +++ b/common/minFunc_2012/minFunc/mex/lbfgsAddC.c @@ -0,0 +1,34 @@ +#include +#include "mex.h" + +/* See lbfgsAdd.m for details */ +/* This function will not exit gracefully on bad input! */ + +void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) +{ + /* Variable Declarations */ + + double *s,*y,*S, *Y, ys; + int i,j,nVars,lbfgs_end; + + /* Get Input Pointers */ + + y = mxGetPr(prhs[0]); + s = mxGetPr(prhs[1]); + Y = mxGetPr(prhs[2]); + S = mxGetPr(prhs[3]); + ys= mxGetScalar(prhs[4]); + lbfgs_end = (int)mxGetScalar(prhs[5]); + + if (!mxIsClass(prhs[5],"int32")) + mexErrMsgTxt("lbfgs_end must be int32"); + + /* Compute number of variables, maximum number of corrections */ + + nVars = mxGetDimensions(prhs[2])[0]; + + for(j=0;j +#include "mex.h" + +/* See lbfgs.m for details! */ +/* This function may not exit gracefully on bad input! */ + + +void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) +{ + /* Variable Declarations */ + + double *s, *y, *g, *H, *d, *ro, *alpha, *beta, *q, *r; + int nVars,nSteps,lhs_dims[2]; + double temp; + int i,j; + + /* Get Input Pointers */ + + g = mxGetPr(prhs[0]); + s = mxGetPr(prhs[1]); + y = mxGetPr(prhs[2]); + H = mxGetPr(prhs[3]); + + /* Compute number of variables (p), rank of update (d) */ + + nVars = mxGetDimensions(prhs[1])[0]; + nSteps = mxGetDimensions(prhs[1])[1]; + + /* Allocated Memory for Function Variables */ + ro = mxCalloc(nSteps,sizeof(double)); + alpha = mxCalloc(nSteps,sizeof(double)); + beta = mxCalloc(nSteps,sizeof(double)); + q = mxCalloc(nVars*(nSteps+1),sizeof(double)); + r = mxCalloc(nVars*(nSteps+1),sizeof(double)); + + /* Set-up Output Vector */ + + lhs_dims[0] = nVars; + lhs_dims[1] = 1; + + plhs[0] = mxCreateNumericArray(2,lhs_dims,mxDOUBLE_CLASS,mxREAL); + d = mxGetPr(plhs[0]); + + /* ro = 1/(y(:,i)'*s(:,i)) */ + for(i=0;i=0;i--) + { + /* alpha(i) = ro(i)*s(:,i)'*q(:,i+1) */ + alpha[i] = 0; + for(j=0;j +#include "mex.h" + +/* See lbfgsProd.m for details */ +/* This function will not exit gracefully on bad input! */ + +void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) +{ + /* Variable Declarations */ + + double *S, *Y, *YS, *g, Hdiag, *d, *alpha, *beta; + int i,j,nVars,nCor,maxCor,lbfgs_start,lbfgs_end; + + /* Get Input Pointers */ + + g = mxGetPr(prhs[0]); + S = mxGetPr(prhs[1]); + Y = mxGetPr(prhs[2]); + YS= mxGetPr(prhs[3]); + lbfgs_start = (int)mxGetScalar(prhs[4]); + lbfgs_end = (int)mxGetScalar(prhs[5]); + Hdiag = mxGetScalar(prhs[6]); + + if (!mxIsClass(prhs[4],"int32")||!mxIsClass(prhs[5],"int32")) + mexErrMsgTxt("lbfgs_start and lbfgs_end must be int32"); + + /* Compute number of variables, maximum number of corrections */ + + nVars = mxGetDimensions(prhs[1])[0]; + maxCor = mxGetDimensions(prhs[1])[1]; + + /* Compute number of corrections available */ + if (lbfgs_start == 1) + nCor = lbfgs_end-lbfgs_start+1; + else + nCor = maxCor; + + /* Allocate Memory for Local Variables */ + alpha = mxCalloc(nCor,sizeof(double)); + beta = mxCalloc(nCor,sizeof(double)); + + /* Set-up Output Vector */ + plhs[0] = mxCreateDoubleMatrix(nVars,1,mxREAL); + d = mxGetPr(plhs[0]); + + for(j=0;j= 0;i--) { + alpha[i] = 0; + for(j=0;j= lbfgs_start-1;i--) { + alpha[i] = 0; + for(j=0;j +#include "mex.h" + +double mymax(double x, double y) +{ + if (x > y) + return x; + else + return y; +} + +double absolute(double x) +{ + if (x >= -x) + return x; + else + return -x; +} + +void permuteInt(int *x, int p, int q) +{ + int temp; + temp = x[p]; + x[p] = x[q]; + x[q] = temp; +} + +void permute(double *x, int p, int q) +{ + double temp; + temp = x[p]; + x[p] = x[q]; + x[q] = temp; +} + +void permuteRows(double *x, int p, int q,int n) +{ + int i; + double temp; + for(i = 0; i < n; i++) + { + temp = x[p+i*n]; + x[p+i*n] = x[q+i*n]; + x[q+i*n] = temp; + } +} + +void permuteCols(double *x, int p, int q,int n) +{ + int i; + double temp; + for(i = 0; i < n; i++) + { + temp = x[i+p*n]; + x[i+p*n] = x[i+q*n]; + x[i+q*n] = temp; + } +} + +void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) +{ + int n,sizL[2],sizD[2],i,j,q,s, + *P; + + double mu,gamma,xi,delta,beta,maxVal,theta, + *c, *H, *L, *D, *A; + + /* Input */ + H = mxGetPr(prhs[0]); + if (nrhs == 1) + { + mu = 1e-12; + } + else + { + mu = mxGetScalar(prhs[1]); + } + + /* Compute Sizes */ + n = mxGetDimensions(prhs[0])[0]; + + /* Form Output */ + sizL[0] = n; + sizL[1] = n; + plhs[0] = mxCreateNumericArray(2,sizL,mxDOUBLE_CLASS,mxREAL); + L = mxGetPr(plhs[0]); + sizD[0] = n; + sizD[1] = 1; + plhs[1] = mxCreateNumericArray(2,sizD,mxDOUBLE_CLASS,mxREAL); + D = mxGetPr(plhs[1]); + plhs[2] = mxCreateNumericArray(2,sizD,mxINT32_CLASS,mxREAL); + P = (int*)mxGetData(plhs[2]); + + /* Initialize */ + c = mxCalloc(n*n,sizeof(double)); + A = mxCalloc(n*n,sizeof(double)); + + for (i = 0; i < n; i++) + { + P[i] = i; + for (j = 0;j < n; j++) + { + A[i+n*j] = H[i+n*j]; + } + } + + gamma = 0; + for (i = 0; i < n; i++) + { + L[i+n*i] = 1; + c[i+n*i] = A[i+n*i]; + } + + /* Compute modification parameters */ + gamma = -1; + xi = -1; + for (i = 0; i < n; i++) + { + gamma = mymax(gamma,absolute(A[i+n*i])); + for (j = 0;j < n; j++) + { + /*printf("A(%d,%d) = %f, %f\n",i,j,A[i+n*j],absolute(A[i+n*j]));*/ + if (i != j) + xi = mymax(xi,absolute(A[i+n*j])); + } + } + delta = mu*mymax(gamma+xi,1); + + if (n > 1) + { + beta = sqrt(mymax(gamma,mymax(mu,xi/sqrt(n*n-1)))); + } + else + { + beta = sqrt(mymax(gamma,mu)); + } + + for (j = 0; j < n; j++) + { + + /* Find q that results in Best Permutation with j */ + maxVal = -1; + q = 0; + for(i = j; i < n; i++) + { + if (absolute(c[i+n*i]) > maxVal) + { + maxVal = mymax(maxVal,absolute(c[i+n*i])); + q = i; + } + } + + /* Permute D,c,L,A,P */ + permute(D,j,q); + permuteInt(P,j,q); + permuteRows(c,j,q,n); + permuteCols(c,j,q,n); + permuteRows(L,j,q,n); + permuteCols(L,j,q,n); + permuteRows(A,j,q,n); + permuteCols(A,j,q,n); + + for(s = 0; s <= j-1; s++) + L[j+n*s] = c[j+n*s]/D[s]; + + for(i = j+1; i < n; i++) + { + c[i+j*n] = A[i+j*n]; + for(s = 0; s <= j-1; s++) + { + c[i+j*n] -= L[j+n*s]*c[i+n*s]; + } + } + + theta = 0; + if (j < n-1) + { + for(i = j+1;i < n; i++) + theta = mymax(theta,absolute(c[i+n*j])); + } + + D[j] = mymax(absolute(c[j+n*j]),mymax(delta,theta*theta/(beta*beta))); + + if (j < n-1) + { + for(i = j+1; i < n; i++) + { + c[i+n*i] = c[i+n*i] - c[i+n*j]*c[i+n*j]/D[j]; + } + } + + } + + for(i = 0; i < n; i++) + P[i]++; + + mxFree(c); + mxFree(A); +} \ No newline at end of file diff --git a/common/minFunc_2012/minFunc/minFunc.m b/common/minFunc_2012/minFunc/minFunc.m new file mode 100644 index 0000000..a731da3 --- /dev/null +++ b/common/minFunc_2012/minFunc/minFunc.m @@ -0,0 +1,1170 @@ +function [x,f,exitflag,output] = minFunc(funObj,x0,options,varargin) +% [x,f,exitflag,output] = minFunc(funObj,x0,options,varargin) +% +% Unconstrained optimizer using a line search strategy +% +% Uses an interface very similar to fminunc +% (it doesn't support all of the optimization toolbox options, +% but supports many other options). +% +% It computes descent directions using one of ('Method'): +% - 'sd': Steepest Descent +% (no previous information used, not recommended) +% - 'csd': Cyclic Steepest Descent +% (uses previous step length for a fixed length cycle) +% - 'bb': Barzilai and Borwein Gradient +% (uses only previous step) +% - 'cg': Non-Linear Conjugate Gradient +% (uses only previous step and a vector beta) +% - 'scg': Scaled Non-Linear Conjugate Gradient +% (uses previous step and a vector beta, +% and Hessian-vector products to initialize line search) +% - 'pcg': Preconditionined Non-Linear Conjugate Gradient +% (uses only previous step and a vector beta, preconditioned version) +% - 'lbfgs': Quasi-Newton with Limited-Memory BFGS Updating +% (default: uses a predetermined nunber of previous steps to form a +% low-rank Hessian approximation) +% - 'newton0': Hessian-Free Newton +% (numerically computes Hessian-Vector products) +% - 'pnewton0': Preconditioned Hessian-Free Newton +% (numerically computes Hessian-Vector products, preconditioned +% version) +% - 'qnewton': Quasi-Newton Hessian approximation +% (uses dense Hessian approximation) +% - 'mnewton': Newton's method with Hessian calculation after every +% user-specified number of iterations +% (needs user-supplied Hessian matrix) +% - 'newton': Newton's method with Hessian calculation every iteration +% (needs user-supplied Hessian matrix) +% - 'tensor': Tensor +% (needs user-supplied Hessian matrix and Tensor of 3rd partial derivatives) +% +% Several line search strategies are available for finding a step length satisfying +% the termination criteria ('LS_type') +% - 0 : A backtracking line-search based on the Armijo condition (default for 'bb') +% - 1 : A bracekting line-search based on the strong Wolfe conditions (default for all other methods) +% - 2 : The line-search from the Matlab Optimization Toolbox (requires Matlab's linesearch.m to be added to the path) +% +% For the Armijo line-search, several interpolation strategies are available ('LS_interp'): +% - 0 : Step size halving +% - 1 : Polynomial interpolation using new function values +% - 2 : Polynomial interpolation using new function and gradient values (default) +% +% When (LS_interp = 1), the default setting of (LS_multi = 0) uses quadratic interpolation, +% while if (LS_multi = 1) it uses cubic interpolation if more than one point are available. +% +% When (LS_interp = 2), the default setting of (LS_multi = 0) uses cubic interpolation, +% while if (LS_multi = 1) it uses quartic or quintic interpolation if more than one point are available +% +% To use the non-monotonic Armijo condition, set the 'Fref' value to the number of previous function values to store +% +% For the Wolfe line-search, these interpolation strategies are available ('LS_interp'): +% - 0 : Step Size Doubling and Bisection +% - 1 : Cubic interpolation/extrapolation using new function and gradient values (default) +% - 2 : Mixed quadratic/cubic interpolation/extrapolation +% +% Several strategies for choosing the initial step size are avaiable ('LS_init'): +% - 0: Always try an initial step length of 1 (default for all except 'sd' and 'cg') +% (t = 1) +% - 1: Use a step similar to the previous step +% (t = t_old*min(2,g'd/g_old'd_old)) +% - 2: Quadratic Initialization using previous function value and new +% function value/gradient (use this if steps tend to be very long, default for 'sd' and 'cg') +% (t = min(1,2*(f-f_old)/g)) +% - 3: The minimum between 1 and twice the previous step length +% (t = min(1,2*t) +% - 4: The scaled conjugate gradient step length (may accelerate +% conjugate gradient methods, but requires a Hessian-vector product, default for 'scg') +% (t = g'd/d'Hd) +% +% Inputs: +% funObj - is a function handle +% x0 - is a starting vector; +% options - is a struct containing parameters (defaults are used for non-existent or blank fields) +% varargin{:} - all other arguments are passed as additional arguments to funObj +% +% Outputs: +% x is the minimum value found +% f is the function value at the minimum found +% exitflag returns an exit condition +% output returns a structure with other information +% +% Supported Input Options +% Display - Level of display [ off | final | (iter) | full | excessive ] +% MaxFunEvals - Maximum number of function evaluations allowed (1000) +% MaxIter - Maximum number of iterations allowed (500) +% optTol - Termination tolerance on the first-order optimality (1e-5) +% progTol - Termination tolerance on progress in terms of function/parameter changes (1e-9) +% Method - [ sd | csd | bb | cg | scg | pcg | {lbfgs} | newton0 | pnewton0 | +% qnewton | mnewton | newton | tensor ] +% c1 - Sufficient Decrease for Armijo condition (1e-4) +% c2 - Curvature Decrease for Wolfe conditions (.2 for cg methods, .9 otherwise) +% LS_init - Line Search Initialization - see above (2 for cg/sd, 4 for scg, 0 otherwise) +% LS - Line Search type - see above (2 for bb, 4 otherwise) +% Fref - Setting this to a positive integer greater than 1 +% will use non-monotone Armijo objective in the line search. +% (20 for bb, 10 for csd, 1 for all others) +% numDiff - [ 0 | 1 | 2] compute derivatives using user-supplied function (0), +% numerically user forward-differencing (1), or numerically using central-differencing (2) +% (default: 0) +% (this option has a different effect for 'newton', see below) +% useComplex - if 1, use complex differentials if computing numerical derivatives +% to get very accurate values (default: 0) +% DerivativeCheck - if 'on', computes derivatives numerically at initial +% point and compares to user-supplied derivative (default: 'off') +% outputFcn - function to run after each iteration (default: []). It +% should have the following interface: +% outputFcn(x,iterationType,i,funEvals,f,t,gtd,g,d,optCond,varargin{:}); +% useMex - where applicable, use mex files to speed things up (default: 1) +% +% Method-specific input options: +% newton: +% HessianModify - type of Hessian modification for direct solvers to +% use if the Hessian is not positive definite (default: 0) +% 0: Minimum Euclidean norm s.t. eigenvalues sufficiently large +% (requires eigenvalues on iterations where matrix is not pd) +% 1: Start with (1/2)*||A||_F and increment until Cholesky succeeds +% (an approximation to method 0, does not require eigenvalues) +% 2: Modified LDL factorization +% (only 1 generalized Cholesky factorization done and no eigenvalues required) +% 3: Modified Spectral Decomposition +% (requires eigenvalues) +% 4: Modified Symmetric Indefinite Factorization +% 5: Uses the eigenvector of the smallest eigenvalue as negative +% curvature direction +% cgSolve - use conjugate gradient instead of direct solver (default: 0) +% 0: Direct Solver +% 1: Conjugate Gradient +% 2: Conjugate Gradient with Diagonal Preconditioner +% 3: Conjugate Gradient with LBFGS Preconditioner +% x: Conjugate Graident with Symmetric Successive Over Relaxation +% Preconditioner with parameter x +% (where x is a real number in the range [0,2]) +% x: Conjugate Gradient with Incomplete Cholesky Preconditioner +% with drop tolerance -x +% (where x is a real negative number) +% numDiff - compute Hessian numerically +% (default: 0, done with complex differentials if useComplex = 1) +% LS_saveHessiancomp - when on, only computes the Hessian at the +% first and last iteration of the line search (default: 1) +% mnewton: +% HessianIter - number of iterations to use same Hessian (default: 5) +% qnewton: +% initialHessType - scale initial Hessian approximation (default: 1) +% qnUpdate - type of quasi-Newton update (default: 3): +% 0: BFGS +% 1: SR1 (when it is positive-definite, otherwise BFGS) +% 2: Hoshino +% 3: Self-Scaling BFGS +% 4: Oren's Self-Scaling Variable Metric method +% 5: McCormick-Huang asymmetric update +% Damped - use damped BFGS update (default: 1) +% newton0/pnewton0: +% HvFunc - user-supplied function that returns Hessian-vector products +% (by default, these are computed numerically using autoHv) +% HvFunc should have the following interface: HvFunc(v,x,varargin{:}) +% useComplex - use a complex perturbation to get high accuracy +% Hessian-vector products (default: 0) +% (the increased accuracy can make the method much more efficient, +% but gradient code must properly support complex inputs) +% useNegCurv - a negative curvature direction is used as the descent +% direction if one is encountered during the cg iterations +% (default: 1) +% precFunc (for pnewton0 only) - user-supplied preconditioner +% (by default, an L-BFGS preconditioner is used) +% precFunc should have the following interfact: +% precFunc(v,x,varargin{:}) +% lbfgs: +% Corr - number of corrections to store in memory (default: 100) +% (higher numbers converge faster but use more memory) +% Damped - use damped update (default: 0) +% cg/scg/pcg: +% cgUpdate - type of update (default for cg/scg: 2, default for pcg: 1) +% 0: Fletcher Reeves +% 1: Polak-Ribiere +% 2: Hestenes-Stiefel (not supported for pcg) +% 3: Gilbert-Nocedal +% HvFunc (for scg only)- user-supplied function that returns Hessian-vector +% products +% (by default, these are computed numerically using autoHv) +% HvFunc should have the following interface: +% HvFunc(v,x,varargin{:}) +% precFunc (for pcg only) - user-supplied preconditioner +% (by default, an L-BFGS preconditioner is used) +% precFunc should have the following interface: +% precFunc(v,x,varargin{:}) +% bb: +% bbType - type of bb step (default: 0) +% 0: min_alpha ||delta_x - alpha delta_g||_2 +% 1: min_alpha ||alpha delta_x - delta_g||_2 +% 2: Conic BB +% 3: Gradient method with retards +% csd: +% cycle - length of cycle (default: 3) +% +% Supported Output Options +% iterations - number of iterations taken +% funcCount - number of function evaluations +% algorithm - algorithm used +% firstorderopt - first-order optimality +% message - exit message +% trace.funccount - function evaluations after each iteration +% trace.fval - function value after each iteration +% +% Author: Mark Schmidt (2005) +% Web: http://www.di.ens.fr/~mschmidt/Software/minFunc.html +% +% Sources (in order of how much the source material contributes): +% J. Nocedal and S.J. Wright. 1999. "Numerical Optimization". Springer Verlag. +% R. Fletcher. 1987. "Practical Methods of Optimization". Wiley. +% J. Demmel. 1997. "Applied Linear Algebra. SIAM. +% R. Barret, M. Berry, T. Chan, J. Demmel, J. Dongarra, V. Eijkhout, R. +% Pozo, C. Romine, and H. Van der Vost. 1994. "Templates for the Solution of +% Linear Systems: Building Blocks for Iterative Methods". SIAM. +% J. More and D. Thuente. "Line search algorithms with guaranteed +% sufficient decrease". ACM Trans. Math. Softw. vol 20, 286-307, 1994. +% M. Raydan. "The Barzilai and Borwein gradient method for the large +% scale unconstrained minimization problem". SIAM J. Optim., 7, 26-33, +% (1997). +% "Mathematical Optimization". The Computational Science Education +% Project. 1995. +% C. Kelley. 1999. "Iterative Methods for Optimization". Frontiers in +% Applied Mathematics. SIAM. + +if nargin < 3 + options = []; +end + +% Get Parameters +[verbose,verboseI,debug,doPlot,maxFunEvals,maxIter,optTol,progTol,method,... + corrections,c1,c2,LS_init,cgSolve,qnUpdate,cgUpdate,initialHessType,... + HessianModify,Fref,useComplex,numDiff,LS_saveHessianComp,... + Damped,HvFunc,bbType,cycle,... + HessianIter,outputFcn,useMex,useNegCurv,precFunc,... + LS_type,LS_interp,LS_multi,checkGrad] = ... + minFunc_processInputOptions(options); + +% Constants +SD = 0; +CSD = 1; +BB = 2; +CG = 3; +PCG = 4; +LBFGS = 5; +QNEWTON = 6; +NEWTON0 = 7; +NEWTON = 8; +TENSOR = 9; + +% Initialize +p = length(x0); +d = zeros(p,1); +x = x0; +t = 1; + +% If necessary, form numerical differentiation functions +funEvalMultiplier = 1; +if useComplex + numDiffType = 3; +else + numDiffType = numDiff; +end +if numDiff && method ~= TENSOR + varargin(3:end+2) = varargin(1:end); + varargin{1} = numDiffType; + varargin{2} = funObj; + if method ~= NEWTON + if debug + if useComplex + fprintf('Using complex differentials for gradient computation\n'); + else + fprintf('Using finite differences for gradient computation\n'); + end + end + funObj = @autoGrad; + else + if debug + if useComplex + fprintf('Using complex differentials for Hessian computation\n'); + else + fprintf('Using finite differences for Hessian computation\n'); + end + end + funObj = @autoHess; + end + + if method == NEWTON0 && useComplex == 1 + if debug + fprintf('Turning off the use of complex differentials for Hessian-vector products\n'); + end + useComplex = 0; + end + + if useComplex + funEvalMultiplier = p; + elseif numDiff == 2 + funEvalMultiplier = 2*p; + else + funEvalMultiplier = p+1; + end +end + +% Evaluate Initial Point +if method < NEWTON + [f,g] = funObj(x,varargin{:}); + computeHessian = 0; +else + [f,g,H] = funObj(x,varargin{:}); + computeHessian = 1; +end +funEvals = 1; + +% Derivative Check +if checkGrad + if numDiff + fprintf('Can not do derivative checking when numDiff is 1\n'); + pause + end + derivativeCheck(funObj,x,1,numDiffType,varargin{:}); % Checks gradient + if computeHessian + derivativeCheck(funObj,x,2,numDiffType,varargin{:}); + end +end + +% Output Log +if verboseI + fprintf('%10s %10s %15s %15s %15s\n','Iteration','FunEvals','Step Length','Function Val','Opt Cond'); +end + +% Compute optimality of initial point +optCond = max(abs(g)); + +if nargout > 3 + % Initialize Trace + trace.fval = f; + trace.funcCount = funEvals; + trace.optCond = optCond; +end + +% Exit if initial point is optimal +if optCond <= optTol + exitflag=1; + msg = 'Optimality Condition below optTol'; + if verbose + fprintf('%s\n',msg); + end + if nargout > 3 + output = struct('iterations',0,'funcCount',1,... + 'algorithm',method,'firstorderopt',max(abs(g)),'message',msg,'trace',trace); + end + return; +end + +% Output Function +if ~isempty(outputFcn) + stop = outputFcn(x,'init',0,funEvals,f,[],[],g,[],max(abs(g)),varargin{:}); + if stop + exitflag=-1; + msg = 'Stopped by output function'; + if verbose + fprintf('%s\n',msg); + end + if nargout > 3 + output = struct('iterations',0,'funcCount',1,... + 'algorithm',method,'firstorderopt',max(abs(g)),'message',msg,'trace',trace); + end + return; + end +end + +% Perform up to a maximum of 'maxIter' descent steps: +for i = 1:maxIter + + % ****************** COMPUTE DESCENT DIRECTION ***************** + + switch method + case SD % Steepest Descent + d = -g; + + case CSD % Cyclic Steepest Descent + + if mod(i,cycle) == 1 % Use Steepest Descent + alpha = 1; + LS_init = 2; + LS_type = 1; % Wolfe line search + elseif mod(i,cycle) == mod(1+1,cycle) % Use Previous Step + alpha = t; + LS_init = 0; + LS_type = 0; % Armijo line search + end + d = -alpha*g; + + case BB % Steepest Descent with Barzilai and Borwein Step Length + + if i == 1 + d = -g; + else + y = g-g_old; + s = t*d; + if bbType == 0 + yy = y'*y; + alpha = (s'*y)/(yy); + if alpha <= 1e-10 || alpha > 1e10 + alpha = 1; + end + elseif bbType == 1 + sy = s'*y; + alpha = (s'*s)/sy; + if alpha <= 1e-10 || alpha > 1e10 + alpha = 1; + end + elseif bbType == 2 % Conic Interpolation ('Modified BB') + sy = s'*y; + ss = s'*s; + alpha = ss/sy; + if alpha <= 1e-10 || alpha > 1e10 + alpha = 1; + end + alphaConic = ss/(6*(myF_old - f) + 4*g'*s + 2*g_old'*s); + if alphaConic > .001*alpha && alphaConic < 1000*alpha + alpha = alphaConic; + end + elseif bbType == 3 % Gradient Method with retards (bb type 1, random selection of previous step) + sy = s'*y; + alpha = (s'*s)/sy; + if alpha <= 1e-10 || alpha > 1e10 + alpha = 1; + end + v(1+mod(i-2,5)) = alpha; + alpha = v(ceil(rand*length(v))); + end + d = -alpha*g; + end + g_old = g; + myF_old = f; + + + case CG % Non-Linear Conjugate Gradient + + if i == 1 + d = -g; % Initially use steepest descent direction + else + gotgo = g_old'*g_old; + + if cgUpdate == 0 + % Fletcher-Reeves + beta = (g'*g)/(gotgo); + elseif cgUpdate == 1 + % Polak-Ribiere + beta = (g'*(g-g_old)) /(gotgo); + elseif cgUpdate == 2 + % Hestenes-Stiefel + beta = (g'*(g-g_old))/((g-g_old)'*d); + else + % Gilbert-Nocedal + beta_FR = (g'*(g-g_old)) /(gotgo); + beta_PR = (g'*g-g'*g_old)/(gotgo); + beta = max(-beta_FR,min(beta_PR,beta_FR)); + end + + d = -g + beta*d; + + % Restart if not a direction of sufficient descent + if g'*d > -progTol + if debug + fprintf('Restarting CG\n'); + end + beta = 0; + d = -g; + end + + % Old restart rule: + %if beta < 0 || abs(gtgo)/(gotgo) >= 0.1 || g'*d >= 0 + + end + g_old = g; + + case PCG % Preconditioned Non-Linear Conjugate Gradient + + % Apply preconditioner to negative gradient + if isempty(precFunc) + % Use L-BFGS Preconditioner + if i == 1 + S = zeros(p,corrections); + Y = zeros(p,corrections); + YS = zeros(corrections,1); + lbfgs_start = 1; + lbfgs_end = 0; + Hdiag = 1; + s = -g; + else + [S,Y,YS,lbfgs_start,lbfgs_end,Hdiag,skipped] = lbfgsAdd(g-g_old,t*d,S,Y,YS,lbfgs_start,lbfgs_end,Hdiag,useMex); + if debug && skipped + fprintf('Skipped L-BFGS updated\n'); + end + if useMex + s = lbfgsProdC(g,S,Y,YS,int32(lbfgs_start),int32(lbfgs_end),Hdiag); + else + s = lbfgsProd(g,S,Y,YS,lbfgs_start,lbfgs_end,Hdiag); + end + end + else % User-supplied preconditioner + s = precFunc(-g,x,varargin{:}); + end + + if i == 1 + d = s; + else + + if cgUpdate == 0 + % Preconditioned Fletcher-Reeves + beta = (g'*s)/(g_old'*s_old); + elseif cgUpdate < 3 + % Preconditioned Polak-Ribiere + beta = (g'*(s-s_old))/(g_old'*s_old); + else + % Preconditioned Gilbert-Nocedal + beta_FR = (g'*s)/(g_old'*s_old); + beta_PR = (g'*(s-s_old))/(g_old'*s_old); + beta = max(-beta_FR,min(beta_PR,beta_FR)); + end + d = s + beta*d; + + if g'*d > -progTol + if debug + fprintf('Restarting CG\n'); + end + beta = 0; + d = s; + end + + end + g_old = g; + s_old = s; + case LBFGS % L-BFGS + + % Update the direction and step sizes + if Damped + if i == 1 + d = -g; % Initially use steepest descent direction + old_dirs = zeros(length(g),0); + old_stps = zeros(length(d),0); + Hdiag = 1; + else + [old_dirs,old_stps,Hdiag] = dampedUpdate(g-g_old,t*d,corrections,debug,old_dirs,old_stps,Hdiag); + if useMex + d = lbfgsC(-g,old_dirs,old_stps,Hdiag); + else + d = lbfgs(-g,old_dirs,old_stps,Hdiag); + end + end + else + if i == 1 + d = -g; % Initially use steepest descent direction + S = zeros(p,corrections); + Y = zeros(p,corrections); + YS = zeros(corrections,1); + lbfgs_start = 1; + lbfgs_end = 0; + Hdiag = 1; + else + [S,Y,YS,lbfgs_start,lbfgs_end,Hdiag,skipped] = lbfgsAdd(g-g_old,t*d,S,Y,YS,lbfgs_start,lbfgs_end,Hdiag,useMex); + if debug && skipped + fprintf('Skipped L-BFGS updated\n'); + end + if useMex + d = lbfgsProdC(g,S,Y,YS,int32(lbfgs_start),int32(lbfgs_end),Hdiag); + else + d = lbfgsProd(g,S,Y,YS,lbfgs_start,lbfgs_end,Hdiag); + end + end + end + g_old = g; + + case QNEWTON % Use quasi-Newton Hessian approximation + + if i == 1 + d = -g; + else + % Compute difference vectors + y = g-g_old; + s = t*d; + + if i == 2 + % Make initial Hessian approximation + if initialHessType == 0 + % Identity + if qnUpdate <= 1 + R = eye(length(g)); + else + H = eye(length(g)); + end + else + % Scaled Identity + if debug + fprintf('Scaling Initial Hessian Approximation\n'); + end + if qnUpdate <= 1 + % Use Cholesky of Hessian approximation + R = sqrt((y'*y)/(y'*s))*eye(length(g)); + else + % Use Inverse of Hessian approximation + H = eye(length(g))*(y'*s)/(y'*y); + end + end + end + + if qnUpdate == 0 % Use BFGS updates + Bs = R'*(R*s); + if Damped + eta = .02; + if y'*s < eta*s'*Bs + if debug + fprintf('Damped Update\n'); + end + theta = min(max(0,((1-eta)*s'*Bs)/(s'*Bs - y'*s)),1); + y = theta*y + (1-theta)*Bs; + end + R = cholupdate(cholupdate(R,y/sqrt(y'*s)),Bs/sqrt(s'*Bs),'-'); + else + if y'*s > 1e-10 + R = cholupdate(cholupdate(R,y/sqrt(y'*s)),Bs/sqrt(s'*Bs),'-'); + else + if debug + fprintf('Skipping Update\n'); + end + end + end + elseif qnUpdate == 1 % Perform SR1 Update if it maintains positive-definiteness + + Bs = R'*(R*s); + ymBs = y-Bs; + if abs(s'*ymBs) >= norm(s)*norm(ymBs)*1e-8 && (s-((R\(R'\y))))'*y > 1e-10 + R = cholupdate(R,-ymBs/sqrt(ymBs'*s),'-'); + else + if debug + fprintf('SR1 not positive-definite, doing BFGS Update\n'); + end + if Damped + eta = .02; + if y'*s < eta*s'*Bs + if debug + fprintf('Damped Update\n'); + end + theta = min(max(0,((1-eta)*s'*Bs)/(s'*Bs - y'*s)),1); + y = theta*y + (1-theta)*Bs; + end + R = cholupdate(cholupdate(R,y/sqrt(y'*s)),Bs/sqrt(s'*Bs),'-'); + else + if y'*s > 1e-10 + R = cholupdate(cholupdate(R,y/sqrt(y'*s)),Bs/sqrt(s'*Bs),'-'); + else + if debug + fprintf('Skipping Update\n'); + end + end + end + end + elseif qnUpdate == 2 % Use Hoshino update + v = sqrt(y'*H*y)*(s/(s'*y) - (H*y)/(y'*H*y)); + phi = 1/(1 + (y'*H*y)/(s'*y)); + H = H + (s*s')/(s'*y) - (H*y*y'*H)/(y'*H*y) + phi*v*v'; + + elseif qnUpdate == 3 % Self-Scaling BFGS update + ys = y'*s; + Hy = H*y; + yHy = y'*Hy; + gamma = ys/yHy; + v = sqrt(yHy)*(s/ys - Hy/yHy); + H = gamma*(H - Hy*Hy'/yHy + v*v') + (s*s')/ys; + elseif qnUpdate == 4 % Oren's Self-Scaling Variable Metric update + + % Oren's method + if (s'*y)/(y'*H*y) > 1 + phi = 1; % BFGS + omega = 0; + elseif (s'*(H\s))/(s'*y) < 1 + phi = 0; % DFP + omega = 1; + else + phi = (s'*y)*(y'*H*y-s'*y)/((s'*(H\s))*(y'*H*y)-(s'*y)^2); + omega = phi; + end + + gamma = (1-omega)*(s'*y)/(y'*H*y) + omega*(s'*(H\s))/(s'*y); + v = sqrt(y'*H*y)*(s/(s'*y) - (H*y)/(y'*H*y)); + H = gamma*(H - (H*y*y'*H)/(y'*H*y) + phi*v*v') + (s*s')/(s'*y); + + elseif qnUpdate == 5 % McCormick-Huang asymmetric update + theta = 1; + phi = 0; + psi = 1; + omega = 0; + t1 = s*(theta*s + phi*H'*y)'; + t2 = (theta*s + phi*H'*y)'*y; + t3 = H*y*(psi*s + omega*H'*y)'; + t4 = (psi*s + omega*H'*y)'*y; + H = H + t1/t2 - t3/t4; + end + + if qnUpdate <= 1 + d = -R\(R'\g); + else + d = -H*g; + end + + end + g_old = g; + + case NEWTON0 % Hessian-Free Newton + + cgMaxIter = min(p,maxFunEvals-funEvals); + cgForce = min(0.5,sqrt(norm(g)))*norm(g); + + % Set-up preconditioner + precondFunc = []; + precondArgs = []; + if cgSolve == 1 + if isempty(precFunc) % Apply L-BFGS preconditioner + if i == 1 + S = zeros(p,corrections); + Y = zeros(p,corrections); + YS = zeros(corrections,1); + lbfgs_start = 1; + lbfgs_end = 0; + Hdiag = 1; + else + [S,Y,YS,lbfgs_start,lbfgs_end,Hdiag,skipped] = lbfgsAdd(g-g_old,t*d,S,Y,YS,lbfgs_start,lbfgs_end,Hdiag,useMex); + if debug && skipped + fprintf('Skipped L-BFGS updated\n'); + end + if useMex + precondFunc = @lbfgsProdC; + else + precondFunc = @lbfgsProd; + end + precondArgs = {S,Y,YS,int32(lbfgs_start),int32(lbfgs_end),Hdiag}; + end + g_old = g; + else + % Apply user-defined preconditioner + precondFunc = precFunc; + precondArgs = {x,varargin{:}}; + end + end + + % Solve Newton system using cg and hessian-vector products + if isempty(HvFunc) + % No user-supplied Hessian-vector function, + % use automatic differentiation + HvFun = @autoHv; + HvArgs = {x,g,useComplex,funObj,varargin{:}}; + else + % Use user-supplid Hessian-vector function + HvFun = HvFunc; + HvArgs = {x,varargin{:}}; + end + + if useNegCurv + [d,cgIter,cgRes,negCurv] = conjGrad([],-g,cgForce,cgMaxIter,debug,precondFunc,precondArgs,HvFun,HvArgs); + else + [d,cgIter,cgRes] = conjGrad([],-g,cgForce,cgMaxIter,debug,precondFunc,precondArgs,HvFun,HvArgs); + end + + funEvals = funEvals+cgIter; + if debug + fprintf('newtonCG stopped on iteration %d w/ residual %.5e\n',cgIter,cgRes); + + end + + if useNegCurv + if ~isempty(negCurv) + %if debug + fprintf('Using negative curvature direction\n'); + %end + d = negCurv/norm(negCurv); + d = d/sum(abs(g)); + end + end + + case NEWTON % Newton search direction + + if cgSolve == 0 + if HessianModify == 0 + % Attempt to perform a Cholesky factorization of the Hessian + [R,posDef] = chol(H); + + % If the Cholesky factorization was successful, then the Hessian is + % positive definite, solve the system + if posDef == 0 + d = -R\(R'\g); + + else + % otherwise, adjust the Hessian to be positive definite based on the + % minimum eigenvalue, and solve with QR + % (expensive, we don't want to do this very much) + if debug + fprintf('Adjusting Hessian\n'); + end + H = H + eye(length(g)) * max(0,1e-12 - min(real(eig(H)))); + d = -H\g; + end + elseif HessianModify == 1 + % Modified Incomplete Cholesky + R = mcholinc(H,debug); + d = -R\(R'\g); + elseif HessianModify == 2 + % Modified Generalized Cholesky + if useMex + [L D perm] = mcholC(H); + else + [L D perm] = mchol(H); + end + d(perm) = -L' \ ((D.^-1).*(L \ g(perm))); + + elseif HessianModify == 3 + % Modified Spectral Decomposition + [V,D] = eig((H+H')/2); + D = diag(D); + D = max(abs(D),max(max(abs(D)),1)*1e-12); + d = -V*((V'*g)./D); + elseif HessianModify == 4 + % Modified Symmetric Indefinite Factorization + [L,D,perm] = ldl(H,'vector'); + [blockPos junk] = find(triu(D,1)); + for diagInd = setdiff(setdiff(1:p,blockPos),blockPos+1) + if D(diagInd,diagInd) < 1e-12 + D(diagInd,diagInd) = 1e-12; + end + end + for blockInd = blockPos' + block = D(blockInd:blockInd+1,blockInd:blockInd+1); + block_a = block(1); + block_b = block(2); + block_d = block(4); + lambda = (block_a+block_d)/2 - sqrt(4*block_b^2 + (block_a - block_d)^2)/2; + D(blockInd:blockInd+1,blockInd:blockInd+1) = block+eye(2)*(lambda+1e-12); + end + d(perm) = -L' \ (D \ (L \ g(perm))); + else + % Take Newton step if Hessian is pd, + % otherwise take a step with negative curvature + [R,posDef] = chol(H); + if posDef == 0 + d = -R\(R'\g); + else + if debug + fprintf('Taking Direction of Negative Curvature\n'); + end + [V,D] = eig(H); + u = V(:,1); + d = -sign(u'*g)*u; + end + end + + else + % Solve with Conjugate Gradient + cgMaxIter = p; + cgForce = min(0.5,sqrt(norm(g)))*norm(g); + + % Select Preconditioner + if cgSolve == 1 + % No preconditioner + precondFunc = []; + precondArgs = []; + elseif cgSolve == 2 + % Diagonal preconditioner + precDiag = diag(H); + precDiag(precDiag < 1e-12) = 1e-12 - min(precDiag); + precondFunc = @precondDiag; + precondArgs = {precDiag.^-1}; + elseif cgSolve == 3 + % L-BFGS preconditioner + if i == 1 + old_dirs = zeros(length(g),0); + old_stps = zeros(length(g),0); + Hdiag = 1; + else + [old_dirs,old_stps,Hdiag] = lbfgsUpdate(g-g_old,t*d,corrections,debug,old_dirs,old_stps,Hdiag); + end + g_old = g; + if useMex + precondFunc = @lbfgsC; + else + precondFunc = @lbfgs; + end + precondArgs = {old_dirs,old_stps,Hdiag}; + elseif cgSolve > 0 + % Symmetric Successive Overelaxation Preconditioner + omega = cgSolve; + D = diag(H); + D(D < 1e-12) = 1e-12 - min(D); + precDiag = (omega/(2-omega))*D.^-1; + precTriu = diag(D/omega) + triu(H,1); + precondFunc = @precondTriuDiag; + precondArgs = {precTriu,precDiag.^-1}; + else + % Incomplete Cholesky Preconditioner + opts.droptol = -cgSolve; + opts.rdiag = 1; + R = cholinc(sparse(H),opts); + if min(diag(R)) < 1e-12 + R = cholinc(sparse(H + eye*(1e-12 - min(diag(R)))),opts); + end + precondFunc = @precondTriu; + precondArgs = {R}; + end + + % Run cg with the appropriate preconditioner + if isempty(HvFunc) + % No user-supplied Hessian-vector function + [d,cgIter,cgRes] = conjGrad(H,-g,cgForce,cgMaxIter,debug,precondFunc,precondArgs); + else + % Use user-supplied Hessian-vector function + [d,cgIter,cgRes] = conjGrad(H,-g,cgForce,cgMaxIter,debug,precondFunc,precondArgs,HvFunc,{x,varargin{:}}); + end + if debug + fprintf('CG stopped after %d iterations w/ residual %.5e\n',cgIter,cgRes); + %funEvals = funEvals + cgIter; + end + end + + case TENSOR % Tensor Method + + if numDiff + % Compute 3rd-order Tensor Numerically + [junk1 junk2 junk3 T] = autoTensor(x,numDiffType,funObj,varargin{:}); + else + % Use user-supplied 3rd-derivative Tensor + [junk1 junk2 junk3 T] = funObj(x,varargin{:}); + end + options_sub.Method = 'newton'; + options_sub.Display = 'none'; + options_sub.progTol = progTol; + options_sub.optTol = optTol; + d = minFunc(@taylorModel,zeros(p,1),options_sub,f,g,H,T); + + if any(abs(d) > 1e5) || all(abs(d) < 1e-5) || g'*d > -progTol + if debug + fprintf('Using 2nd-Order Step\n'); + end + [V,D] = eig((H+H')/2); + D = diag(D); + D = max(abs(D),max(max(abs(D)),1)*1e-12); + d = -V*((V'*g)./D); + else + if debug + fprintf('Using 3rd-Order Step\n'); + end + end + end + + if ~isLegal(d) + fprintf('Step direction is illegal!\n'); + pause; + return + end + + % ****************** COMPUTE STEP LENGTH ************************ + + % Directional Derivative + gtd = g'*d; + + % Check that progress can be made along direction + if gtd > -progTol + exitflag=2; + msg = 'Directional Derivative below progTol'; + break; + end + + % Select Initial Guess + if i == 1 + if method < NEWTON0 + t = min(1,1/sum(abs(g))); + else + t = 1; + end + else + if LS_init == 0 + % Newton step + t = 1; + elseif LS_init == 1 + % Close to previous step length + t = t*min(2,(gtd_old)/(gtd)); + elseif LS_init == 2 + % Quadratic Initialization based on {f,g} and previous f + t = min(1,2*(f-f_old)/(gtd)); + elseif LS_init == 3 + % Double previous step length + t = min(1,t*2); + elseif LS_init == 4 + % Scaled step length if possible + if isempty(HvFunc) + % No user-supplied Hessian-vector function, + % use automatic differentiation + dHd = d'*autoHv(d,x,g,0,funObj,varargin{:}); + else + % Use user-supplid Hessian-vector function + dHd = d'*HvFunc(d,x,varargin{:}); + end + + funEvals = funEvals + 1; + if dHd > 0 + t = -gtd/(dHd); + else + t = min(1,2*(f-f_old)/(gtd)); + end + end + + if t <= 0 + t = 1; + end + end + f_old = f; + gtd_old = gtd; + + % Compute reference fr if using non-monotone objective + if Fref == 1 + fr = f; + else + if i == 1 + old_fvals = repmat(-inf,[Fref 1]); + end + + if i <= Fref + old_fvals(i) = f; + else + old_fvals = [old_fvals(2:end);f]; + end + fr = max(old_fvals); + end + + computeHessian = 0; + if method >= NEWTON + if HessianIter == 1 + computeHessian = 1; + elseif i > 1 && mod(i-1,HessianIter) == 0 + computeHessian = 1; + end + end + + % Line Search + f_old = f; + if LS_type == 0 % Use Armijo Bactracking + % Perform Backtracking line search + if computeHessian + [t,x,f,g,LSfunEvals,H] = ArmijoBacktrack(x,t,d,f,fr,g,gtd,c1,LS_interp,LS_multi,progTol,debug,doPlot,LS_saveHessianComp,funObj,varargin{:}); + else + [t,x,f,g,LSfunEvals] = ArmijoBacktrack(x,t,d,f,fr,g,gtd,c1,LS_interp,LS_multi,progTol,debug,doPlot,1,funObj,varargin{:}); + end + funEvals = funEvals + LSfunEvals; + + elseif LS_type == 1 % Find Point satisfying Wolfe conditions + + if computeHessian + [t,f,g,LSfunEvals,H] = WolfeLineSearch(x,t,d,f,g,gtd,c1,c2,LS_interp,LS_multi,25,progTol,debug,doPlot,LS_saveHessianComp,funObj,varargin{:}); + else + [t,f,g,LSfunEvals] = WolfeLineSearch(x,t,d,f,g,gtd,c1,c2,LS_interp,LS_multi,25,progTol,debug,doPlot,1,funObj,varargin{:}); + end + funEvals = funEvals + LSfunEvals; + x = x + t*d; + + else + % Use Matlab optim toolbox line search + [t,f_new,fPrime_new,g_new,LSexitFlag,LSiter]=... + lineSearch({'fungrad',[],funObj},x,p,1,p,d,f,gtd,t,c1,c2,-inf,maxFunEvals-funEvals,... + progTol,[],[],[],varargin{:}); + funEvals = funEvals + LSiter; + if isempty(t) + exitflag = -2; + msg = 'Matlab LineSearch failed'; + break; + end + + if method >= NEWTON + [f_new,g_new,H] = funObj(x + t*d,varargin{:}); + funEvals = funEvals + 1; + end + x = x + t*d; + f = f_new; + g = g_new; + end + + % Compute Optimality Condition + optCond = max(abs(g)); + + % Output iteration information + if verboseI + fprintf('%10d %10d %15.5e %15.5e %15.5e\n',i,funEvals*funEvalMultiplier,t,f,optCond); + end + + if nargout > 3 + % Update Trace + trace.fval(end+1,1) = f; + trace.funcCount(end+1,1) = funEvals; + trace.optCond(end+1,1) = optCond; + end + + % Output Function + if ~isempty(outputFcn) + stop = outputFcn(x,'iter',i,funEvals,f,t,gtd,g,d,optCond,varargin{:}); + if stop + exitflag=-1; + msg = 'Stopped by output function'; + break; + end + end + + % Check Optimality Condition + if optCond <= optTol + exitflag=1; + msg = 'Optimality Condition below optTol'; + break; + end + + % ******************* Check for lack of progress ******************* + + if max(abs(t*d)) <= progTol + exitflag=2; + msg = 'Step Size below progTol'; + break; + end + + + if abs(f-f_old) < progTol + exitflag=2; + msg = 'Function Value changing by less than progTol'; + break; + end + + % ******** Check for going over iteration/evaluation limit ******************* + + if funEvals*funEvalMultiplier >= maxFunEvals + exitflag = 0; + msg = 'Reached Maximum Number of Function Evaluations'; + break; + end + + if i == maxIter + exitflag = 0; + msg='Reached Maximum Number of Iterations'; + break; + end + +end + +if verbose + fprintf('%s\n',msg); +end +if nargout > 3 + output = struct('iterations',i,'funcCount',funEvals*funEvalMultiplier,... + 'algorithm',method,'firstorderopt',max(abs(g)),'message',msg,'trace',trace); +end + +% Output Function +if ~isempty(outputFcn) + outputFcn(x,'done',i,funEvals,f,t,gtd,g,d,max(abs(g)),varargin{:}); + end + +end + diff --git a/common/minFunc_2012/minFunc/minFunc_processInputOptions.m b/common/minFunc_2012/minFunc/minFunc_processInputOptions.m new file mode 100644 index 0000000..57299e4 --- /dev/null +++ b/common/minFunc_2012/minFunc/minFunc_processInputOptions.m @@ -0,0 +1,168 @@ + +function [verbose,verboseI,debug,doPlot,maxFunEvals,maxIter,optTol,progTol,method,... + corrections,c1,c2,LS_init,cgSolve,qnUpdate,cgUpdate,initialHessType,... + HessianModify,Fref,useComplex,numDiff,LS_saveHessianComp,... + Damped,HvFunc,bbType,cycle,... + HessianIter,outputFcn,useMex,useNegCurv,precFunc,... + LS_type,LS_interp,LS_multi,DerivativeCheck] = ... + minFunc_processInputOptions(o) + +% Constants +SD = 0; +CSD = 1; +BB = 2; +CG = 3; +PCG = 4; +LBFGS = 5; +QNEWTON = 6; +NEWTON0 = 7; +NEWTON = 8; +TENSOR = 9; + +verbose = 1; +verboseI= 1; +debug = 0; +doPlot = 0; +method = LBFGS; +cgSolve = 0; + +o = toUpper(o); + +if isfield(o,'DISPLAY') + switch(upper(o.DISPLAY)) + case 0 + verbose = 0; + verboseI = 0; + case 'FINAL' + verboseI = 0; + case 'OFF' + verbose = 0; + verboseI = 0; + case 'NONE' + verbose = 0; + verboseI = 0; + case 'FULL' + debug = 1; + case 'EXCESSIVE' + debug = 1; + doPlot = 1; + end +end + +DerivativeCheck = 0; +if isfield(o,'DERIVATIVECHECK') + switch(upper(o.DERIVATIVECHECK)) + case 1 + DerivativeCheck = 1; + case 'ON' + DerivativeCheck = 1; + end +end + +LS_init = 0; +LS_type = 1; +LS_interp = 2; +LS_multi = 0; +Fref = 1; +Damped = 0; +HessianIter = 1; +c2 = 0.9; +if isfield(o,'METHOD') + m = upper(o.METHOD); + switch(m) + case 'TENSOR' + method = TENSOR; + case 'NEWTON' + method = NEWTON; + case 'MNEWTON' + method = NEWTON; + HessianIter = 5; + case 'PNEWTON0' + method = NEWTON0; + cgSolve = 1; + case 'NEWTON0' + method = NEWTON0; + case 'QNEWTON' + method = QNEWTON; + Damped = 1; + case 'LBFGS' + method = LBFGS; + case 'BB' + method = BB; + LS_type = 0; + Fref = 20; + case 'PCG' + method = PCG; + c2 = 0.2; + LS_init = 2; + case 'SCG' + method = CG; + c2 = 0.2; + LS_init = 4; + case 'CG' + method = CG; + c2 = 0.2; + LS_init = 2; + case 'CSD' + method = CSD; + c2 = 0.2; + Fref = 10; + LS_init = 2; + case 'SD' + method = SD; + LS_init = 2; + end +end + +maxFunEvals = getOpt(o,'MAXFUNEVALS',1000); +maxIter = getOpt(o,'MAXITER',500); +optTol = getOpt(o,'OPTTOL',1e-5); +progTol = getOpt(o,'PROGTOL',1e-9); +corrections = getOpt(o,'CORRECTIONS',100); +corrections = getOpt(o,'CORR',corrections); +c1 = getOpt(o,'C1',1e-4); +c2 = getOpt(o,'C2',c2); +LS_init = getOpt(o,'LS_INIT',LS_init); +cgSolve = getOpt(o,'CGSOLVE',cgSolve); +qnUpdate = getOpt(o,'QNUPDATE',3); +cgUpdate = getOpt(o,'CGUPDATE',2); +initialHessType = getOpt(o,'INITIALHESSTYPE',1); +HessianModify = getOpt(o,'HESSIANMODIFY',0); +Fref = getOpt(o,'FREF',Fref); +useComplex = getOpt(o,'USECOMPLEX',0); +numDiff = getOpt(o,'NUMDIFF',0); +LS_saveHessianComp = getOpt(o,'LS_SAVEHESSIANCOMP',1); +Damped = getOpt(o,'DAMPED',Damped); +HvFunc = getOpt(o,'HVFUNC',[]); +bbType = getOpt(o,'BBTYPE',0); +cycle = getOpt(o,'CYCLE',3); +HessianIter = getOpt(o,'HESSIANITER',HessianIter); +outputFcn = getOpt(o,'OUTPUTFCN',[]); +useMex = getOpt(o,'USEMEX',1); +useNegCurv = getOpt(o,'USENEGCURV',1); +precFunc = getOpt(o,'PRECFUNC',[]); +LS_type = getOpt(o,'LS_type',LS_type); +LS_interp = getOpt(o,'LS_interp',LS_interp); +LS_multi = getOpt(o,'LS_multi',LS_multi); +end + +function [v] = getOpt(options,opt,default) +if isfield(options,opt) + if ~isempty(getfield(options,opt)) + v = getfield(options,opt); + else + v = default; + end +else + v = default; +end +end + +function [o] = toUpper(o) +if ~isempty(o) + fn = fieldnames(o); + for i = 1:length(fn) + o = setfield(o,upper(fn{i}),getfield(o,fn{i})); + end +end +end \ No newline at end of file diff --git a/common/minFunc_2012/minFunc/polyinterp.m b/common/minFunc_2012/minFunc/polyinterp.m new file mode 100644 index 0000000..b29cd33 --- /dev/null +++ b/common/minFunc_2012/minFunc/polyinterp.m @@ -0,0 +1,144 @@ +function [minPos,fmin] = polyinterp(points,doPlot,xminBound,xmaxBound) +% function [minPos] = polyinterp(points,doPlot,xminBound,xmaxBound) +% +% Minimum of interpolating polynomial based on function and derivative +% values +% +% It can also be used for extrapolation if {xmin,xmax} are outside +% the domain of the points. +% +% Input: +% points(pointNum,[x f g]) +% doPlot: set to 1 to plot, default: 0 +% xmin: min value that brackets minimum (default: min of points) +% xmax: max value that brackets maximum (default: max of points) +% +% set f or g to sqrt(-1) if they are not known +% the order of the polynomial is the number of known f and g values minus 1 + +if nargin < 2 + doPlot = 0; +end + +nPoints = size(points,1); +order = sum(sum((imag(points(:,2:3))==0)))-1; + +xmin = min(points(:,1)); +xmax = max(points(:,1)); + +% Compute Bounds of Interpolation Area +if nargin < 3 + xminBound = xmin; +end +if nargin < 4 + xmaxBound = xmax; +end + +% Code for most common case: +% - cubic interpolation of 2 points +% w/ function and derivative values for both + +if nPoints == 2 && order ==3 && doPlot == 0 + % Solution in this case (where x2 is the farthest point): + % d1 = g1 + g2 - 3*(f1-f2)/(x1-x2); + % d2 = sqrt(d1^2 - g1*g2); + % minPos = x2 - (x2 - x1)*((g2 + d2 - d1)/(g2 - g1 + 2*d2)); + % t_new = min(max(minPos,x1),x2); + [minVal minPos] = min(points(:,1)); + notMinPos = -minPos+3; + d1 = points(minPos,3) + points(notMinPos,3) - 3*(points(minPos,2)-points(notMinPos,2))/(points(minPos,1)-points(notMinPos,1)); + d2 = sqrt(d1^2 - points(minPos,3)*points(notMinPos,3)); + if isreal(d2) + t = points(notMinPos,1) - (points(notMinPos,1) - points(minPos,1))*((points(notMinPos,3) + d2 - d1)/(points(notMinPos,3) - points(minPos,3) + 2*d2)); + minPos = min(max(t,xminBound),xmaxBound); + else + minPos = (xmaxBound+xminBound)/2; + end + return; +end + +% Constraints Based on available Function Values +A = zeros(0,order+1); +b = zeros(0,1); +for i = 1:nPoints + if imag(points(i,2))==0 + constraint = zeros(1,order+1); + for j = order:-1:0 + constraint(order-j+1) = points(i,1)^j; + end + A = [A;constraint]; + b = [b;points(i,2)]; + end +end + +% Constraints based on available Derivatives +for i = 1:nPoints + if isreal(points(i,3)) + constraint = zeros(1,order+1); + for j = 1:order + constraint(j) = (order-j+1)*points(i,1)^(order-j); + end + A = [A;constraint]; + b = [b;points(i,3)]; + end +end + +% Find interpolating polynomial +[params,ignore] = linsolve(A,b); + +% Compute Critical Points +dParams = zeros(order,1); +for i = 1:length(params)-1 + dParams(i) = params(i)*(order-i+1); +end + +if any(isinf(dParams)) + cp = [xminBound;xmaxBound;points(:,1)].'; +else + cp = [xminBound;xmaxBound;points(:,1);roots(dParams)].'; +end + +% Test Critical Points +fmin = inf; +minPos = (xminBound+xmaxBound)/2; % Default to Bisection if no critical points valid +for xCP = cp + if imag(xCP)==0 && xCP >= xminBound && xCP <= xmaxBound + fCP = polyval(params,xCP); + if imag(fCP)==0 && fCP < fmin + minPos = real(xCP); + fmin = real(fCP); + end + end +end + +% Plot Situation +if doPlot + clf; hold on; + + % Plot Points + plot(points(:,1),points(:,2),'b*'); + + % Plot Derivatives + for i = 1:nPoints + if isreal(points(i,3)) + m = points(i,3); + b = points(i,2) - m*points(i,1); + plot([points(i,1)-.05 points(i,1)+.05],... + [(points(i,1)-.05)*m+b (points(i,1)+.05)*m+b],'c.-'); + end + end + + % Plot Function + x = min(xmin,xminBound)-.1:(max(xmax,xmaxBound)+.1-min(xmin,xminBound)+.1)/100:max(xmax,xmaxBound)+.1; + for i = 1:length(x) + f(i) = polyval(params,x(i)); + end + plot(x,f,'y'); + axis([x(1)-.1 x(end)+.1 min(f)-.1 max(f)+.1]); + + % Plot Minimum + plot(minPos,fmin,'g+'); + if doPlot == 1 + pause(1); + end +end \ No newline at end of file diff --git a/common/minFunc_2012/minFunc/precondDiag.m b/common/minFunc_2012/minFunc/precondDiag.m new file mode 100644 index 0000000..b112b19 --- /dev/null +++ b/common/minFunc_2012/minFunc/precondDiag.m @@ -0,0 +1,2 @@ +function [y] = precondDiag(r,D) +y = D.*r; \ No newline at end of file diff --git a/common/minFunc_2012/minFunc/precondTriu.m b/common/minFunc_2012/minFunc/precondTriu.m new file mode 100644 index 0000000..8479777 --- /dev/null +++ b/common/minFunc_2012/minFunc/precondTriu.m @@ -0,0 +1,2 @@ +function [y] = precondUpper(r,U) +y = U \ (U' \ r); \ No newline at end of file diff --git a/common/minFunc_2012/minFunc/precondTriuDiag.m b/common/minFunc_2012/minFunc/precondTriuDiag.m new file mode 100644 index 0000000..89f2134 --- /dev/null +++ b/common/minFunc_2012/minFunc/precondTriuDiag.m @@ -0,0 +1,2 @@ +function [y] = precondUpper(r,U,D) +y = U \ (D .* (U' \ r)); \ No newline at end of file diff --git a/common/minFunc_2012/minFunc/taylorModel.m b/common/minFunc_2012/minFunc/taylorModel.m new file mode 100644 index 0000000..9f66726 --- /dev/null +++ b/common/minFunc_2012/minFunc/taylorModel.m @@ -0,0 +1,38 @@ +function [f,g,H] = taylorModel(d,f,g,H,T) + +p = length(d); + +fd3 = 0; +gd2 = zeros(p,1); +Hd = zeros(p); +for t1 = 1:p + for t2 = 1:p + for t3 = 1:p + fd3 = fd3 + T(t1,t2,t3)*d(t1)*d(t2)*d(t3); + + if nargout > 1 + gd2(t3) = gd2(t3) + T(t1,t2,t3)*d(t1)*d(t2); + end + + if nargout > 2 + Hd(t2,t3) = Hd(t2,t3) + T(t1,t2,t3)*d(t1); + end + end + + end +end + +f = f + g'*d + (1/2)*d'*H*d + (1/6)*fd3; + +if nargout > 1 + g = g + H*d + (1/2)*gd2; +end + +if nargout > 2 + H = H + Hd; +end + +if any(abs(d) > 1e5) + % We want the optimizer to stop if the solution is unbounded + g = zeros(p,1); +end \ No newline at end of file diff --git a/common/minFunc_2012/rosenbrock.m b/common/minFunc_2012/rosenbrock.m new file mode 100644 index 0000000..abcad59 --- /dev/null +++ b/common/minFunc_2012/rosenbrock.m @@ -0,0 +1,4 @@ +function fxy = rosenbrock(x,y) +fxy=((1-x).^2)+(100*((y-(x.^2)).^2)); +return; +end \ No newline at end of file diff --git a/common/samplePatches.m b/common/samplePatches.m new file mode 100644 index 0000000..89d7ec5 --- /dev/null +++ b/common/samplePatches.m @@ -0,0 +1,25 @@ +function patches = samplePatches(rawImages, patchSize, numPatches) +% rawImages is of size (image width)*(image height) by number_of_images +% We assume that image width = image height +imWidth = sqrt(size(rawImages,1)); +imHeight = imWidth; +numImages = size(rawImages,2); +rawImages = reshape(rawImages,imWidth,imHeight,numImages); + +% Initialize patches with zeros. +patches = zeros(patchSize*patchSize, numPatches); + +% Maximum possible start coordinate +maxWidth = imWidth - patchSize + 1; +maxHeight = imHeight - patchSize + 1; + +% Sample! +for num = 1:numPatches + x = randi(maxHeight); + y = randi(maxWidth); + img = randi(numImages); + p = rawImages(x:x+patchSize-1,y:y+patchSize-1, img); + patches(:,num) = p(:); +end + + diff --git a/ex1/binary_classifier_accuracy.m b/ex1/binary_classifier_accuracy.m new file mode 100644 index 0000000..3596aaa --- /dev/null +++ b/ex1/binary_classifier_accuracy.m @@ -0,0 +1,3 @@ +function accuracy=binary_classifier_accuracy(theta, X,y) + correct=sum(y == (sigmoid(theta'*X) > 0.5)); + accuracy = correct / length(y); diff --git a/ex1/ex1_load_mnist.m b/ex1/ex1_load_mnist.m new file mode 100644 index 0000000..3b2f1c5 --- /dev/null +++ b/ex1/ex1_load_mnist.m @@ -0,0 +1,50 @@ +function [train, test] = ex1_load_mnist(binary_digits) + + % Load the training data + X=loadMNISTImages('train-images-idx3-ubyte'); + y=loadMNISTLabels('train-labels-idx1-ubyte')'; + + if (binary_digits) + % Take only the 0 and 1 digits + X = [ X(:,y==0), X(:,y==1) ]; + y = [ y(y==0), y(y==1) ]; + end + + % Randomly shuffle the data + I = randperm(length(y)); + y=y(I); % labels in range 1 to 10 + X=X(:,I); + + % We standardize the data so that each pixel will have roughly zero mean and unit variance. + s=std(X,[],2); + m=mean(X,2); + X=bsxfun(@minus, X, m); + X=bsxfun(@rdivide, X, s+.1); + + % Place these in the training set + train.X = X; + train.y = y; + + % Load the testing data + X=loadMNISTImages('t10k-images-idx3-ubyte'); + y=loadMNISTLabels('t10k-labels-idx1-ubyte')'; + + if (binary_digits) + % Take only the 0 and 1 digits + X = [ X(:,y==0), X(:,y==1) ]; + y = [ y(y==0), y(y==1) ]; + end + + % Randomly shuffle the data + I = randperm(length(y)); + y=y(I); % labels in range 1 to 10 + X=X(:,I); + + % Standardize using the same mean and scale as the training data. + X=bsxfun(@minus, X, m); + X=bsxfun(@rdivide, X, s+.1); + + % Place these in the testing set + test.X=X; + test.y=y; + diff --git a/ex1/ex1a_linreg.m b/ex1/ex1a_linreg.m new file mode 100644 index 0000000..65ce91f --- /dev/null +++ b/ex1/ex1a_linreg.m @@ -0,0 +1,90 @@ +% +%This exercise uses a data from the UCI repository: +% Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository +% http://archive.ics.uci.edu/ml +% Irvine, CA: University of California, School of Information and Computer Science. +% +%Data created by: +% Harrison, D. and Rubinfeld, D.L. +% ''Hedonic prices and the demand for clean air'' +% J. Environ. Economics & Management, vol.5, 81-102, 1978. +% +addpath ../common +addpath ../common/minFunc_2012/minFunc +addpath ../common/minFunc_2012/minFunc/compiled + +% Load housing data from file. +data = load('housing.data'); +data=data'; % put examples in columns + +% Include a row of 1s as an additional intercept feature. +data = [ ones(1,size(data,2)); data ]; + +% Shuffle examples. +data = data(:, randperm(size(data,2))); + +% Split into train and test sets +% The last row of 'data' is the median home price. +train.X = data(1:end-1,1:400); +train.y = data(end,1:400); + +test.X = data(1:end-1,401:end); +test.y = data(end,401:end); + +m=size(train.X,2); +n=size(train.X,1); + +% Initialize the coefficient vector theta to random values. +theta = rand(n,1); + +% Run the minFunc optimizer with linear_regression.m as the objective. +% +% TODO: Implement the linear regression objective and gradient computations +% in linear_regression.m +% +tic; +options = struct('MaxIter', 200); +theta = minFunc(@linear_regression, theta, options, train.X, train.y); +fprintf('Optimization took %f seconds.\n', toc); + +% Run minFunc with linear_regression_vec.m as the objective. +% +% TODO: Implement linear regression in linear_regression_vec.m +% using MATLAB's vectorization features to speed up your code. +% Compare the running time for your linear_regression.m and +% linear_regression_vec.m implementations. +% +% Uncomment the lines below to run your vectorized code. +%Re-initialize parameters +%theta = rand(n,1); +%tic; +%theta = minFunc(@linear_regression_vec, theta, options, train.X, train.y); +%fprintf('Optimization took %f seconds.\n', toc); + +% Plot predicted prices and actual prices from training set. +actual_prices = train.y; +predicted_prices = theta'*train.X; + +% Print out root-mean-squared (RMS) training error. +train_rms=sqrt(mean((predicted_prices - actual_prices).^2)); +fprintf('RMS training error: %f\n', train_rms); + +% Print out test RMS error +actual_prices = test.y; +predicted_prices = theta'*test.X; +test_rms=sqrt(mean((predicted_prices - actual_prices).^2)); +fprintf('RMS testing error: %f\n', test_rms); + + +% Plot predictions on test data. +plot_prices=true; +if (plot_prices) + [actual_prices,I] = sort(actual_prices); + predicted_prices=predicted_prices(I); + plot(actual_prices, 'rx'); + hold on; + plot(predicted_prices,'bx'); + legend('Actual Price', 'Predicted Price'); + xlabel('House #'); + ylabel('House price ($1000s)'); +end \ No newline at end of file diff --git a/ex1/ex1b_logreg.m b/ex1/ex1b_logreg.m new file mode 100644 index 0000000..321b41e --- /dev/null +++ b/ex1/ex1b_logreg.m @@ -0,0 +1,56 @@ +addpath ../common +addpath ../common/minFunc_2012/minFunc +addpath ../common/minFunc_2012/minFunc/compiled + +% Load the MNIST data for this exercise. +% train.X and test.X will contain the training and testing images. +% Each matrix has size [n,m] where: +% m is the number of examples. +% n is the number of pixels in each image. +% train.y and test.y will contain the corresponding labels (0 or 1). +binary_digits = true; +[train,test] = ex1_load_mnist(binary_digits); + +% Add row of 1s to the dataset to act as an intercept term. +train.X = [ones(1,size(train.X,2)); train.X]; +test.X = [ones(1,size(test.X,2)); test.X]; + +% Training set dimensions +m=size(train.X,2); +n=size(train.X,1); + +% Train logistic regression classifier using minFunc +options = struct('MaxIter', 100); + +% First, we initialize theta to some small random values. +theta = rand(n,1)*0.001; + +% Call minFunc with the logistic_regression.m file as the objective function. +% +% TODO: Implement batch logistic regression in the logistic_regression.m file! +% +tic; +theta=minFunc(@logistic_regression, theta, options, train.X, train.y); +fprintf('Optimization took %f seconds.\n', toc); + +% Now, call minFunc again with logistic_regression_vec.m as objective. +% +% TODO: Implement batch logistic regression in logistic_regression_vec.m using +% MATLAB's vectorization features to speed up your code. Compare the running +% time for your logistic_regression.m and logistic_regression_vec.m implementations. +% +% Uncomment the lines below to run your vectorized code. +%theta = rand(n,1)*0.001; +%tic; +%theta=minFunc(@logistic_regression_vec, theta, options, train.X, train.y); +%fprintf('Optimization took %f seconds.\n', toc); + +% Print out training accuracy. +tic; +accuracy = binary_classifier_accuracy(theta,train.X,train.y); +fprintf('Training accuracy: %2.1f%%\n', 100*accuracy); + +% Print out accuracy on the test set. +accuracy = binary_classifier_accuracy(theta,test.X,test.y); +fprintf('Test accuracy: %2.1f%%\n', 100*accuracy); + diff --git a/ex1/ex1c_softmax.m b/ex1/ex1c_softmax.m new file mode 100644 index 0000000..7334021 --- /dev/null +++ b/ex1/ex1c_softmax.m @@ -0,0 +1,58 @@ +addpath ../common +addpath ../common/minFunc_2012/minFunc +addpath ../common/minFunc_2012/minFunc/compiled + +% Load the MNIST data for this exercise. +% train.X and test.X will contain the training and testing images. +% Each matrix has size [n,m] where: +% m is the number of examples. +% n is the number of pixels in each image. +% train.y and test.y will contain the corresponding labels (0 to 9). +binary_digits = false; +num_classes = 10; +[train,test] = ex1_load_mnist(binary_digits); + +% Add row of 1s to the dataset to act as an intercept term. +train.X = [ones(1,size(train.X,2)); train.X]; +test.X = [ones(1,size(test.X,2)); test.X]; +train.y = train.y+1; % make labels 1-based. +test.y = test.y+1; % make labels 1-based. + +% Training set info +m=size(train.X,2); +n=size(train.X,1); + +% Train softmax classifier using minFunc +options = struct('MaxIter', 200); + +% Initialize theta. We use a matrix where each column corresponds to a class, +% and each row is a classifier coefficient for that class. +% Inside minFunc, theta will be stretched out into a long vector (theta(:)). +% We only use num_classes-1 columns, since the last column is always assumed 0. +theta = rand(n,num_classes-1)*0.001; + +% Call minFunc with the softmax_regression_vec.m file as objective. +% +% TODO: Implement batch softmax regression in the softmax_regression_vec.m +% file using a vectorized implementation. +% +tic; +theta(:)=minFunc(@softmax_regression_vec, theta(:), options, train.X, train.y); +fprintf('Optimization took %f seconds.\n', toc); +theta=[theta, zeros(n,1)]; % expand theta to include the last class. + +% Print out training accuracy. +tic; +accuracy = multi_classifier_accuracy(theta,train.X,train.y); +fprintf('Training accuracy: %2.1f%%\n', 100*accuracy); + +% Print out test accuracy. +accuracy = multi_classifier_accuracy(theta,test.X,test.y); +fprintf('Test accuracy: %2.1f%%\n', 100*accuracy); + + +% % for learning curves +% global test +% global train +% test.err{end+1} = multi_classifier_accuracy(theta,test.X,test.y); +% train.err{end+1} = multi_classifier_accuracy(theta,train.X,train.y); diff --git a/ex1/grad_check.m b/ex1/grad_check.m new file mode 100644 index 0000000..cb18e46 --- /dev/null +++ b/ex1/grad_check.m @@ -0,0 +1,28 @@ +function average_error = grad_check(fun, theta0, num_checks, varargin) + + delta=1e-3; + sum_error=0; + + fprintf(' Iter i err'); + fprintf(' g_est g f\n') + + for i=1:num_checks + T = theta0; + j = randsample(numel(T),1); + T0=T; T0(j) = T0(j)-delta; + T1=T; T1(j) = T1(j)+delta; + + [f,g] = fun(T, varargin{:}); + f0 = fun(T0, varargin{:}); + f1 = fun(T1, varargin{:}); + + g_est = (f1-f0) / (2*delta); + error = abs(g(j) - g_est); + + fprintf('% 5d % 6d % 15g % 15f % 15f % 15f\n', ... + i,j,error,g(j),g_est,f); + + sum_error = sum_error + error; + end + + average=sum_error/num_checks; diff --git a/ex1/housing.data b/ex1/housing.data new file mode 100644 index 0000000..f83ac56 --- /dev/null +++ b/ex1/housing.data @@ -0,0 +1,506 @@ + 0.00632 18.00 2.310 0 0.5380 6.5750 65.20 4.0900 1 296.0 15.30 396.90 4.98 24.00 + 0.02731 0.00 7.070 0 0.4690 6.4210 78.90 4.9671 2 242.0 17.80 396.90 9.14 21.60 + 0.02729 0.00 7.070 0 0.4690 7.1850 61.10 4.9671 2 242.0 17.80 392.83 4.03 34.70 + 0.03237 0.00 2.180 0 0.4580 6.9980 45.80 6.0622 3 222.0 18.70 394.63 2.94 33.40 + 0.06905 0.00 2.180 0 0.4580 7.1470 54.20 6.0622 3 222.0 18.70 396.90 5.33 36.20 + 0.02985 0.00 2.180 0 0.4580 6.4300 58.70 6.0622 3 222.0 18.70 394.12 5.21 28.70 + 0.08829 12.50 7.870 0 0.5240 6.0120 66.60 5.5605 5 311.0 15.20 395.60 12.43 22.90 + 0.14455 12.50 7.870 0 0.5240 6.1720 96.10 5.9505 5 311.0 15.20 396.90 19.15 27.10 + 0.21124 12.50 7.870 0 0.5240 5.6310 100.00 6.0821 5 311.0 15.20 386.63 29.93 16.50 + 0.17004 12.50 7.870 0 0.5240 6.0040 85.90 6.5921 5 311.0 15.20 386.71 17.10 18.90 + 0.22489 12.50 7.870 0 0.5240 6.3770 94.30 6.3467 5 311.0 15.20 392.52 20.45 15.00 + 0.11747 12.50 7.870 0 0.5240 6.0090 82.90 6.2267 5 311.0 15.20 396.90 13.27 18.90 + 0.09378 12.50 7.870 0 0.5240 5.8890 39.00 5.4509 5 311.0 15.20 390.50 15.71 21.70 + 0.62976 0.00 8.140 0 0.5380 5.9490 61.80 4.7075 4 307.0 21.00 396.90 8.26 20.40 + 0.63796 0.00 8.140 0 0.5380 6.0960 84.50 4.4619 4 307.0 21.00 380.02 10.26 18.20 + 0.62739 0.00 8.140 0 0.5380 5.8340 56.50 4.4986 4 307.0 21.00 395.62 8.47 19.90 + 1.05393 0.00 8.140 0 0.5380 5.9350 29.30 4.4986 4 307.0 21.00 386.85 6.58 23.10 + 0.78420 0.00 8.140 0 0.5380 5.9900 81.70 4.2579 4 307.0 21.00 386.75 14.67 17.50 + 0.80271 0.00 8.140 0 0.5380 5.4560 36.60 3.7965 4 307.0 21.00 288.99 11.69 20.20 + 0.72580 0.00 8.140 0 0.5380 5.7270 69.50 3.7965 4 307.0 21.00 390.95 11.28 18.20 + 1.25179 0.00 8.140 0 0.5380 5.5700 98.10 3.7979 4 307.0 21.00 376.57 21.02 13.60 + 0.85204 0.00 8.140 0 0.5380 5.9650 89.20 4.0123 4 307.0 21.00 392.53 13.83 19.60 + 1.23247 0.00 8.140 0 0.5380 6.1420 91.70 3.9769 4 307.0 21.00 396.90 18.72 15.20 + 0.98843 0.00 8.140 0 0.5380 5.8130 100.00 4.0952 4 307.0 21.00 394.54 19.88 14.50 + 0.75026 0.00 8.140 0 0.5380 5.9240 94.10 4.3996 4 307.0 21.00 394.33 16.30 15.60 + 0.84054 0.00 8.140 0 0.5380 5.5990 85.70 4.4546 4 307.0 21.00 303.42 16.51 13.90 + 0.67191 0.00 8.140 0 0.5380 5.8130 90.30 4.6820 4 307.0 21.00 376.88 14.81 16.60 + 0.95577 0.00 8.140 0 0.5380 6.0470 88.80 4.4534 4 307.0 21.00 306.38 17.28 14.80 + 0.77299 0.00 8.140 0 0.5380 6.4950 94.40 4.4547 4 307.0 21.00 387.94 12.80 18.40 + 1.00245 0.00 8.140 0 0.5380 6.6740 87.30 4.2390 4 307.0 21.00 380.23 11.98 21.00 + 1.13081 0.00 8.140 0 0.5380 5.7130 94.10 4.2330 4 307.0 21.00 360.17 22.60 12.70 + 1.35472 0.00 8.140 0 0.5380 6.0720 100.00 4.1750 4 307.0 21.00 376.73 13.04 14.50 + 1.38799 0.00 8.140 0 0.5380 5.9500 82.00 3.9900 4 307.0 21.00 232.60 27.71 13.20 + 1.15172 0.00 8.140 0 0.5380 5.7010 95.00 3.7872 4 307.0 21.00 358.77 18.35 13.10 + 1.61282 0.00 8.140 0 0.5380 6.0960 96.90 3.7598 4 307.0 21.00 248.31 20.34 13.50 + 0.06417 0.00 5.960 0 0.4990 5.9330 68.20 3.3603 5 279.0 19.20 396.90 9.68 18.90 + 0.09744 0.00 5.960 0 0.4990 5.8410 61.40 3.3779 5 279.0 19.20 377.56 11.41 20.00 + 0.08014 0.00 5.960 0 0.4990 5.8500 41.50 3.9342 5 279.0 19.20 396.90 8.77 21.00 + 0.17505 0.00 5.960 0 0.4990 5.9660 30.20 3.8473 5 279.0 19.20 393.43 10.13 24.70 + 0.02763 75.00 2.950 0 0.4280 6.5950 21.80 5.4011 3 252.0 18.30 395.63 4.32 30.80 + 0.03359 75.00 2.950 0 0.4280 7.0240 15.80 5.4011 3 252.0 18.30 395.62 1.98 34.90 + 0.12744 0.00 6.910 0 0.4480 6.7700 2.90 5.7209 3 233.0 17.90 385.41 4.84 26.60 + 0.14150 0.00 6.910 0 0.4480 6.1690 6.60 5.7209 3 233.0 17.90 383.37 5.81 25.30 + 0.15936 0.00 6.910 0 0.4480 6.2110 6.50 5.7209 3 233.0 17.90 394.46 7.44 24.70 + 0.12269 0.00 6.910 0 0.4480 6.0690 40.00 5.7209 3 233.0 17.90 389.39 9.55 21.20 + 0.17142 0.00 6.910 0 0.4480 5.6820 33.80 5.1004 3 233.0 17.90 396.90 10.21 19.30 + 0.18836 0.00 6.910 0 0.4480 5.7860 33.30 5.1004 3 233.0 17.90 396.90 14.15 20.00 + 0.22927 0.00 6.910 0 0.4480 6.0300 85.50 5.6894 3 233.0 17.90 392.74 18.80 16.60 + 0.25387 0.00 6.910 0 0.4480 5.3990 95.30 5.8700 3 233.0 17.90 396.90 30.81 14.40 + 0.21977 0.00 6.910 0 0.4480 5.6020 62.00 6.0877 3 233.0 17.90 396.90 16.20 19.40 + 0.08873 21.00 5.640 0 0.4390 5.9630 45.70 6.8147 4 243.0 16.80 395.56 13.45 19.70 + 0.04337 21.00 5.640 0 0.4390 6.1150 63.00 6.8147 4 243.0 16.80 393.97 9.43 20.50 + 0.05360 21.00 5.640 0 0.4390 6.5110 21.10 6.8147 4 243.0 16.80 396.90 5.28 25.00 + 0.04981 21.00 5.640 0 0.4390 5.9980 21.40 6.8147 4 243.0 16.80 396.90 8.43 23.40 + 0.01360 75.00 4.000 0 0.4100 5.8880 47.60 7.3197 3 469.0 21.10 396.90 14.80 18.90 + 0.01311 90.00 1.220 0 0.4030 7.2490 21.90 8.6966 5 226.0 17.90 395.93 4.81 35.40 + 0.02055 85.00 0.740 0 0.4100 6.3830 35.70 9.1876 2 313.0 17.30 396.90 5.77 24.70 + 0.01432 100.00 1.320 0 0.4110 6.8160 40.50 8.3248 5 256.0 15.10 392.90 3.95 31.60 + 0.15445 25.00 5.130 0 0.4530 6.1450 29.20 7.8148 8 284.0 19.70 390.68 6.86 23.30 + 0.10328 25.00 5.130 0 0.4530 5.9270 47.20 6.9320 8 284.0 19.70 396.90 9.22 19.60 + 0.14932 25.00 5.130 0 0.4530 5.7410 66.20 7.2254 8 284.0 19.70 395.11 13.15 18.70 + 0.17171 25.00 5.130 0 0.4530 5.9660 93.40 6.8185 8 284.0 19.70 378.08 14.44 16.00 + 0.11027 25.00 5.130 0 0.4530 6.4560 67.80 7.2255 8 284.0 19.70 396.90 6.73 22.20 + 0.12650 25.00 5.130 0 0.4530 6.7620 43.40 7.9809 8 284.0 19.70 395.58 9.50 25.00 + 0.01951 17.50 1.380 0 0.4161 7.1040 59.50 9.2229 3 216.0 18.60 393.24 8.05 33.00 + 0.03584 80.00 3.370 0 0.3980 6.2900 17.80 6.6115 4 337.0 16.10 396.90 4.67 23.50 + 0.04379 80.00 3.370 0 0.3980 5.7870 31.10 6.6115 4 337.0 16.10 396.90 10.24 19.40 + 0.05789 12.50 6.070 0 0.4090 5.8780 21.40 6.4980 4 345.0 18.90 396.21 8.10 22.00 + 0.13554 12.50 6.070 0 0.4090 5.5940 36.80 6.4980 4 345.0 18.90 396.90 13.09 17.40 + 0.12816 12.50 6.070 0 0.4090 5.8850 33.00 6.4980 4 345.0 18.90 396.90 8.79 20.90 + 0.08826 0.00 10.810 0 0.4130 6.4170 6.60 5.2873 4 305.0 19.20 383.73 6.72 24.20 + 0.15876 0.00 10.810 0 0.4130 5.9610 17.50 5.2873 4 305.0 19.20 376.94 9.88 21.70 + 0.09164 0.00 10.810 0 0.4130 6.0650 7.80 5.2873 4 305.0 19.20 390.91 5.52 22.80 + 0.19539 0.00 10.810 0 0.4130 6.2450 6.20 5.2873 4 305.0 19.20 377.17 7.54 23.40 + 0.07896 0.00 12.830 0 0.4370 6.2730 6.00 4.2515 5 398.0 18.70 394.92 6.78 24.10 + 0.09512 0.00 12.830 0 0.4370 6.2860 45.00 4.5026 5 398.0 18.70 383.23 8.94 21.40 + 0.10153 0.00 12.830 0 0.4370 6.2790 74.50 4.0522 5 398.0 18.70 373.66 11.97 20.00 + 0.08707 0.00 12.830 0 0.4370 6.1400 45.80 4.0905 5 398.0 18.70 386.96 10.27 20.80 + 0.05646 0.00 12.830 0 0.4370 6.2320 53.70 5.0141 5 398.0 18.70 386.40 12.34 21.20 + 0.08387 0.00 12.830 0 0.4370 5.8740 36.60 4.5026 5 398.0 18.70 396.06 9.10 20.30 + 0.04113 25.00 4.860 0 0.4260 6.7270 33.50 5.4007 4 281.0 19.00 396.90 5.29 28.00 + 0.04462 25.00 4.860 0 0.4260 6.6190 70.40 5.4007 4 281.0 19.00 395.63 7.22 23.90 + 0.03659 25.00 4.860 0 0.4260 6.3020 32.20 5.4007 4 281.0 19.00 396.90 6.72 24.80 + 0.03551 25.00 4.860 0 0.4260 6.1670 46.70 5.4007 4 281.0 19.00 390.64 7.51 22.90 + 0.05059 0.00 4.490 0 0.4490 6.3890 48.00 4.7794 3 247.0 18.50 396.90 9.62 23.90 + 0.05735 0.00 4.490 0 0.4490 6.6300 56.10 4.4377 3 247.0 18.50 392.30 6.53 26.60 + 0.05188 0.00 4.490 0 0.4490 6.0150 45.10 4.4272 3 247.0 18.50 395.99 12.86 22.50 + 0.07151 0.00 4.490 0 0.4490 6.1210 56.80 3.7476 3 247.0 18.50 395.15 8.44 22.20 + 0.05660 0.00 3.410 0 0.4890 7.0070 86.30 3.4217 2 270.0 17.80 396.90 5.50 23.60 + 0.05302 0.00 3.410 0 0.4890 7.0790 63.10 3.4145 2 270.0 17.80 396.06 5.70 28.70 + 0.04684 0.00 3.410 0 0.4890 6.4170 66.10 3.0923 2 270.0 17.80 392.18 8.81 22.60 + 0.03932 0.00 3.410 0 0.4890 6.4050 73.90 3.0921 2 270.0 17.80 393.55 8.20 22.00 + 0.04203 28.00 15.040 0 0.4640 6.4420 53.60 3.6659 4 270.0 18.20 395.01 8.16 22.90 + 0.02875 28.00 15.040 0 0.4640 6.2110 28.90 3.6659 4 270.0 18.20 396.33 6.21 25.00 + 0.04294 28.00 15.040 0 0.4640 6.2490 77.30 3.6150 4 270.0 18.20 396.90 10.59 20.60 + 0.12204 0.00 2.890 0 0.4450 6.6250 57.80 3.4952 2 276.0 18.00 357.98 6.65 28.40 + 0.11504 0.00 2.890 0 0.4450 6.1630 69.60 3.4952 2 276.0 18.00 391.83 11.34 21.40 + 0.12083 0.00 2.890 0 0.4450 8.0690 76.00 3.4952 2 276.0 18.00 396.90 4.21 38.70 + 0.08187 0.00 2.890 0 0.4450 7.8200 36.90 3.4952 2 276.0 18.00 393.53 3.57 43.80 + 0.06860 0.00 2.890 0 0.4450 7.4160 62.50 3.4952 2 276.0 18.00 396.90 6.19 33.20 + 0.14866 0.00 8.560 0 0.5200 6.7270 79.90 2.7778 5 384.0 20.90 394.76 9.42 27.50 + 0.11432 0.00 8.560 0 0.5200 6.7810 71.30 2.8561 5 384.0 20.90 395.58 7.67 26.50 + 0.22876 0.00 8.560 0 0.5200 6.4050 85.40 2.7147 5 384.0 20.90 70.80 10.63 18.60 + 0.21161 0.00 8.560 0 0.5200 6.1370 87.40 2.7147 5 384.0 20.90 394.47 13.44 19.30 + 0.13960 0.00 8.560 0 0.5200 6.1670 90.00 2.4210 5 384.0 20.90 392.69 12.33 20.10 + 0.13262 0.00 8.560 0 0.5200 5.8510 96.70 2.1069 5 384.0 20.90 394.05 16.47 19.50 + 0.17120 0.00 8.560 0 0.5200 5.8360 91.90 2.2110 5 384.0 20.90 395.67 18.66 19.50 + 0.13117 0.00 8.560 0 0.5200 6.1270 85.20 2.1224 5 384.0 20.90 387.69 14.09 20.40 + 0.12802 0.00 8.560 0 0.5200 6.4740 97.10 2.4329 5 384.0 20.90 395.24 12.27 19.80 + 0.26363 0.00 8.560 0 0.5200 6.2290 91.20 2.5451 5 384.0 20.90 391.23 15.55 19.40 + 0.10793 0.00 8.560 0 0.5200 6.1950 54.40 2.7778 5 384.0 20.90 393.49 13.00 21.70 + 0.10084 0.00 10.010 0 0.5470 6.7150 81.60 2.6775 6 432.0 17.80 395.59 10.16 22.80 + 0.12329 0.00 10.010 0 0.5470 5.9130 92.90 2.3534 6 432.0 17.80 394.95 16.21 18.80 + 0.22212 0.00 10.010 0 0.5470 6.0920 95.40 2.5480 6 432.0 17.80 396.90 17.09 18.70 + 0.14231 0.00 10.010 0 0.5470 6.2540 84.20 2.2565 6 432.0 17.80 388.74 10.45 18.50 + 0.17134 0.00 10.010 0 0.5470 5.9280 88.20 2.4631 6 432.0 17.80 344.91 15.76 18.30 + 0.13158 0.00 10.010 0 0.5470 6.1760 72.50 2.7301 6 432.0 17.80 393.30 12.04 21.20 + 0.15098 0.00 10.010 0 0.5470 6.0210 82.60 2.7474 6 432.0 17.80 394.51 10.30 19.20 + 0.13058 0.00 10.010 0 0.5470 5.8720 73.10 2.4775 6 432.0 17.80 338.63 15.37 20.40 + 0.14476 0.00 10.010 0 0.5470 5.7310 65.20 2.7592 6 432.0 17.80 391.50 13.61 19.30 + 0.06899 0.00 25.650 0 0.5810 5.8700 69.70 2.2577 2 188.0 19.10 389.15 14.37 22.00 + 0.07165 0.00 25.650 0 0.5810 6.0040 84.10 2.1974 2 188.0 19.10 377.67 14.27 20.30 + 0.09299 0.00 25.650 0 0.5810 5.9610 92.90 2.0869 2 188.0 19.10 378.09 17.93 20.50 + 0.15038 0.00 25.650 0 0.5810 5.8560 97.00 1.9444 2 188.0 19.10 370.31 25.41 17.30 + 0.09849 0.00 25.650 0 0.5810 5.8790 95.80 2.0063 2 188.0 19.10 379.38 17.58 18.80 + 0.16902 0.00 25.650 0 0.5810 5.9860 88.40 1.9929 2 188.0 19.10 385.02 14.81 21.40 + 0.38735 0.00 25.650 0 0.5810 5.6130 95.60 1.7572 2 188.0 19.10 359.29 27.26 15.70 + 0.25915 0.00 21.890 0 0.6240 5.6930 96.00 1.7883 4 437.0 21.20 392.11 17.19 16.20 + 0.32543 0.00 21.890 0 0.6240 6.4310 98.80 1.8125 4 437.0 21.20 396.90 15.39 18.00 + 0.88125 0.00 21.890 0 0.6240 5.6370 94.70 1.9799 4 437.0 21.20 396.90 18.34 14.30 + 0.34006 0.00 21.890 0 0.6240 6.4580 98.90 2.1185 4 437.0 21.20 395.04 12.60 19.20 + 1.19294 0.00 21.890 0 0.6240 6.3260 97.70 2.2710 4 437.0 21.20 396.90 12.26 19.60 + 0.59005 0.00 21.890 0 0.6240 6.3720 97.90 2.3274 4 437.0 21.20 385.76 11.12 23.00 + 0.32982 0.00 21.890 0 0.6240 5.8220 95.40 2.4699 4 437.0 21.20 388.69 15.03 18.40 + 0.97617 0.00 21.890 0 0.6240 5.7570 98.40 2.3460 4 437.0 21.20 262.76 17.31 15.60 + 0.55778 0.00 21.890 0 0.6240 6.3350 98.20 2.1107 4 437.0 21.20 394.67 16.96 18.10 + 0.32264 0.00 21.890 0 0.6240 5.9420 93.50 1.9669 4 437.0 21.20 378.25 16.90 17.40 + 0.35233 0.00 21.890 0 0.6240 6.4540 98.40 1.8498 4 437.0 21.20 394.08 14.59 17.10 + 0.24980 0.00 21.890 0 0.6240 5.8570 98.20 1.6686 4 437.0 21.20 392.04 21.32 13.30 + 0.54452 0.00 21.890 0 0.6240 6.1510 97.90 1.6687 4 437.0 21.20 396.90 18.46 17.80 + 0.29090 0.00 21.890 0 0.6240 6.1740 93.60 1.6119 4 437.0 21.20 388.08 24.16 14.00 + 1.62864 0.00 21.890 0 0.6240 5.0190 100.00 1.4394 4 437.0 21.20 396.90 34.41 14.40 + 3.32105 0.00 19.580 1 0.8710 5.4030 100.00 1.3216 5 403.0 14.70 396.90 26.82 13.40 + 4.09740 0.00 19.580 0 0.8710 5.4680 100.00 1.4118 5 403.0 14.70 396.90 26.42 15.60 + 2.77974 0.00 19.580 0 0.8710 4.9030 97.80 1.3459 5 403.0 14.70 396.90 29.29 11.80 + 2.37934 0.00 19.580 0 0.8710 6.1300 100.00 1.4191 5 403.0 14.70 172.91 27.80 13.80 + 2.15505 0.00 19.580 0 0.8710 5.6280 100.00 1.5166 5 403.0 14.70 169.27 16.65 15.60 + 2.36862 0.00 19.580 0 0.8710 4.9260 95.70 1.4608 5 403.0 14.70 391.71 29.53 14.60 + 2.33099 0.00 19.580 0 0.8710 5.1860 93.80 1.5296 5 403.0 14.70 356.99 28.32 17.80 + 2.73397 0.00 19.580 0 0.8710 5.5970 94.90 1.5257 5 403.0 14.70 351.85 21.45 15.40 + 1.65660 0.00 19.580 0 0.8710 6.1220 97.30 1.6180 5 403.0 14.70 372.80 14.10 21.50 + 1.49632 0.00 19.580 0 0.8710 5.4040 100.00 1.5916 5 403.0 14.70 341.60 13.28 19.60 + 1.12658 0.00 19.580 1 0.8710 5.0120 88.00 1.6102 5 403.0 14.70 343.28 12.12 15.30 + 2.14918 0.00 19.580 0 0.8710 5.7090 98.50 1.6232 5 403.0 14.70 261.95 15.79 19.40 + 1.41385 0.00 19.580 1 0.8710 6.1290 96.00 1.7494 5 403.0 14.70 321.02 15.12 17.00 + 3.53501 0.00 19.580 1 0.8710 6.1520 82.60 1.7455 5 403.0 14.70 88.01 15.02 15.60 + 2.44668 0.00 19.580 0 0.8710 5.2720 94.00 1.7364 5 403.0 14.70 88.63 16.14 13.10 + 1.22358 0.00 19.580 0 0.6050 6.9430 97.40 1.8773 5 403.0 14.70 363.43 4.59 41.30 + 1.34284 0.00 19.580 0 0.6050 6.0660 100.00 1.7573 5 403.0 14.70 353.89 6.43 24.30 + 1.42502 0.00 19.580 0 0.8710 6.5100 100.00 1.7659 5 403.0 14.70 364.31 7.39 23.30 + 1.27346 0.00 19.580 1 0.6050 6.2500 92.60 1.7984 5 403.0 14.70 338.92 5.50 27.00 + 1.46336 0.00 19.580 0 0.6050 7.4890 90.80 1.9709 5 403.0 14.70 374.43 1.73 50.00 + 1.83377 0.00 19.580 1 0.6050 7.8020 98.20 2.0407 5 403.0 14.70 389.61 1.92 50.00 + 1.51902 0.00 19.580 1 0.6050 8.3750 93.90 2.1620 5 403.0 14.70 388.45 3.32 50.00 + 2.24236 0.00 19.580 0 0.6050 5.8540 91.80 2.4220 5 403.0 14.70 395.11 11.64 22.70 + 2.92400 0.00 19.580 0 0.6050 6.1010 93.00 2.2834 5 403.0 14.70 240.16 9.81 25.00 + 2.01019 0.00 19.580 0 0.6050 7.9290 96.20 2.0459 5 403.0 14.70 369.30 3.70 50.00 + 1.80028 0.00 19.580 0 0.6050 5.8770 79.20 2.4259 5 403.0 14.70 227.61 12.14 23.80 + 2.30040 0.00 19.580 0 0.6050 6.3190 96.10 2.1000 5 403.0 14.70 297.09 11.10 23.80 + 2.44953 0.00 19.580 0 0.6050 6.4020 95.20 2.2625 5 403.0 14.70 330.04 11.32 22.30 + 1.20742 0.00 19.580 0 0.6050 5.8750 94.60 2.4259 5 403.0 14.70 292.29 14.43 17.40 + 2.31390 0.00 19.580 0 0.6050 5.8800 97.30 2.3887 5 403.0 14.70 348.13 12.03 19.10 + 0.13914 0.00 4.050 0 0.5100 5.5720 88.50 2.5961 5 296.0 16.60 396.90 14.69 23.10 + 0.09178 0.00 4.050 0 0.5100 6.4160 84.10 2.6463 5 296.0 16.60 395.50 9.04 23.60 + 0.08447 0.00 4.050 0 0.5100 5.8590 68.70 2.7019 5 296.0 16.60 393.23 9.64 22.60 + 0.06664 0.00 4.050 0 0.5100 6.5460 33.10 3.1323 5 296.0 16.60 390.96 5.33 29.40 + 0.07022 0.00 4.050 0 0.5100 6.0200 47.20 3.5549 5 296.0 16.60 393.23 10.11 23.20 + 0.05425 0.00 4.050 0 0.5100 6.3150 73.40 3.3175 5 296.0 16.60 395.60 6.29 24.60 + 0.06642 0.00 4.050 0 0.5100 6.8600 74.40 2.9153 5 296.0 16.60 391.27 6.92 29.90 + 0.05780 0.00 2.460 0 0.4880 6.9800 58.40 2.8290 3 193.0 17.80 396.90 5.04 37.20 + 0.06588 0.00 2.460 0 0.4880 7.7650 83.30 2.7410 3 193.0 17.80 395.56 7.56 39.80 + 0.06888 0.00 2.460 0 0.4880 6.1440 62.20 2.5979 3 193.0 17.80 396.90 9.45 36.20 + 0.09103 0.00 2.460 0 0.4880 7.1550 92.20 2.7006 3 193.0 17.80 394.12 4.82 37.90 + 0.10008 0.00 2.460 0 0.4880 6.5630 95.60 2.8470 3 193.0 17.80 396.90 5.68 32.50 + 0.08308 0.00 2.460 0 0.4880 5.6040 89.80 2.9879 3 193.0 17.80 391.00 13.98 26.40 + 0.06047 0.00 2.460 0 0.4880 6.1530 68.80 3.2797 3 193.0 17.80 387.11 13.15 29.60 + 0.05602 0.00 2.460 0 0.4880 7.8310 53.60 3.1992 3 193.0 17.80 392.63 4.45 50.00 + 0.07875 45.00 3.440 0 0.4370 6.7820 41.10 3.7886 5 398.0 15.20 393.87 6.68 32.00 + 0.12579 45.00 3.440 0 0.4370 6.5560 29.10 4.5667 5 398.0 15.20 382.84 4.56 29.80 + 0.08370 45.00 3.440 0 0.4370 7.1850 38.90 4.5667 5 398.0 15.20 396.90 5.39 34.90 + 0.09068 45.00 3.440 0 0.4370 6.9510 21.50 6.4798 5 398.0 15.20 377.68 5.10 37.00 + 0.06911 45.00 3.440 0 0.4370 6.7390 30.80 6.4798 5 398.0 15.20 389.71 4.69 30.50 + 0.08664 45.00 3.440 0 0.4370 7.1780 26.30 6.4798 5 398.0 15.20 390.49 2.87 36.40 + 0.02187 60.00 2.930 0 0.4010 6.8000 9.90 6.2196 1 265.0 15.60 393.37 5.03 31.10 + 0.01439 60.00 2.930 0 0.4010 6.6040 18.80 6.2196 1 265.0 15.60 376.70 4.38 29.10 + 0.01381 80.00 0.460 0 0.4220 7.8750 32.00 5.6484 4 255.0 14.40 394.23 2.97 50.00 + 0.04011 80.00 1.520 0 0.4040 7.2870 34.10 7.3090 2 329.0 12.60 396.90 4.08 33.30 + 0.04666 80.00 1.520 0 0.4040 7.1070 36.60 7.3090 2 329.0 12.60 354.31 8.61 30.30 + 0.03768 80.00 1.520 0 0.4040 7.2740 38.30 7.3090 2 329.0 12.60 392.20 6.62 34.60 + 0.03150 95.00 1.470 0 0.4030 6.9750 15.30 7.6534 3 402.0 17.00 396.90 4.56 34.90 + 0.01778 95.00 1.470 0 0.4030 7.1350 13.90 7.6534 3 402.0 17.00 384.30 4.45 32.90 + 0.03445 82.50 2.030 0 0.4150 6.1620 38.40 6.2700 2 348.0 14.70 393.77 7.43 24.10 + 0.02177 82.50 2.030 0 0.4150 7.6100 15.70 6.2700 2 348.0 14.70 395.38 3.11 42.30 + 0.03510 95.00 2.680 0 0.4161 7.8530 33.20 5.1180 4 224.0 14.70 392.78 3.81 48.50 + 0.02009 95.00 2.680 0 0.4161 8.0340 31.90 5.1180 4 224.0 14.70 390.55 2.88 50.00 + 0.13642 0.00 10.590 0 0.4890 5.8910 22.30 3.9454 4 277.0 18.60 396.90 10.87 22.60 + 0.22969 0.00 10.590 0 0.4890 6.3260 52.50 4.3549 4 277.0 18.60 394.87 10.97 24.40 + 0.25199 0.00 10.590 0 0.4890 5.7830 72.70 4.3549 4 277.0 18.60 389.43 18.06 22.50 + 0.13587 0.00 10.590 1 0.4890 6.0640 59.10 4.2392 4 277.0 18.60 381.32 14.66 24.40 + 0.43571 0.00 10.590 1 0.4890 5.3440 100.00 3.8750 4 277.0 18.60 396.90 23.09 20.00 + 0.17446 0.00 10.590 1 0.4890 5.9600 92.10 3.8771 4 277.0 18.60 393.25 17.27 21.70 + 0.37578 0.00 10.590 1 0.4890 5.4040 88.60 3.6650 4 277.0 18.60 395.24 23.98 19.30 + 0.21719 0.00 10.590 1 0.4890 5.8070 53.80 3.6526 4 277.0 18.60 390.94 16.03 22.40 + 0.14052 0.00 10.590 0 0.4890 6.3750 32.30 3.9454 4 277.0 18.60 385.81 9.38 28.10 + 0.28955 0.00 10.590 0 0.4890 5.4120 9.80 3.5875 4 277.0 18.60 348.93 29.55 23.70 + 0.19802 0.00 10.590 0 0.4890 6.1820 42.40 3.9454 4 277.0 18.60 393.63 9.47 25.00 + 0.04560 0.00 13.890 1 0.5500 5.8880 56.00 3.1121 5 276.0 16.40 392.80 13.51 23.30 + 0.07013 0.00 13.890 0 0.5500 6.6420 85.10 3.4211 5 276.0 16.40 392.78 9.69 28.70 + 0.11069 0.00 13.890 1 0.5500 5.9510 93.80 2.8893 5 276.0 16.40 396.90 17.92 21.50 + 0.11425 0.00 13.890 1 0.5500 6.3730 92.40 3.3633 5 276.0 16.40 393.74 10.50 23.00 + 0.35809 0.00 6.200 1 0.5070 6.9510 88.50 2.8617 8 307.0 17.40 391.70 9.71 26.70 + 0.40771 0.00 6.200 1 0.5070 6.1640 91.30 3.0480 8 307.0 17.40 395.24 21.46 21.70 + 0.62356 0.00 6.200 1 0.5070 6.8790 77.70 3.2721 8 307.0 17.40 390.39 9.93 27.50 + 0.61470 0.00 6.200 0 0.5070 6.6180 80.80 3.2721 8 307.0 17.40 396.90 7.60 30.10 + 0.31533 0.00 6.200 0 0.5040 8.2660 78.30 2.8944 8 307.0 17.40 385.05 4.14 44.80 + 0.52693 0.00 6.200 0 0.5040 8.7250 83.00 2.8944 8 307.0 17.40 382.00 4.63 50.00 + 0.38214 0.00 6.200 0 0.5040 8.0400 86.50 3.2157 8 307.0 17.40 387.38 3.13 37.60 + 0.41238 0.00 6.200 0 0.5040 7.1630 79.90 3.2157 8 307.0 17.40 372.08 6.36 31.60 + 0.29819 0.00 6.200 0 0.5040 7.6860 17.00 3.3751 8 307.0 17.40 377.51 3.92 46.70 + 0.44178 0.00 6.200 0 0.5040 6.5520 21.40 3.3751 8 307.0 17.40 380.34 3.76 31.50 + 0.53700 0.00 6.200 0 0.5040 5.9810 68.10 3.6715 8 307.0 17.40 378.35 11.65 24.30 + 0.46296 0.00 6.200 0 0.5040 7.4120 76.90 3.6715 8 307.0 17.40 376.14 5.25 31.70 + 0.57529 0.00 6.200 0 0.5070 8.3370 73.30 3.8384 8 307.0 17.40 385.91 2.47 41.70 + 0.33147 0.00 6.200 0 0.5070 8.2470 70.40 3.6519 8 307.0 17.40 378.95 3.95 48.30 + 0.44791 0.00 6.200 1 0.5070 6.7260 66.50 3.6519 8 307.0 17.40 360.20 8.05 29.00 + 0.33045 0.00 6.200 0 0.5070 6.0860 61.50 3.6519 8 307.0 17.40 376.75 10.88 24.00 + 0.52058 0.00 6.200 1 0.5070 6.6310 76.50 4.1480 8 307.0 17.40 388.45 9.54 25.10 + 0.51183 0.00 6.200 0 0.5070 7.3580 71.60 4.1480 8 307.0 17.40 390.07 4.73 31.50 + 0.08244 30.00 4.930 0 0.4280 6.4810 18.50 6.1899 6 300.0 16.60 379.41 6.36 23.70 + 0.09252 30.00 4.930 0 0.4280 6.6060 42.20 6.1899 6 300.0 16.60 383.78 7.37 23.30 + 0.11329 30.00 4.930 0 0.4280 6.8970 54.30 6.3361 6 300.0 16.60 391.25 11.38 22.00 + 0.10612 30.00 4.930 0 0.4280 6.0950 65.10 6.3361 6 300.0 16.60 394.62 12.40 20.10 + 0.10290 30.00 4.930 0 0.4280 6.3580 52.90 7.0355 6 300.0 16.60 372.75 11.22 22.20 + 0.12757 30.00 4.930 0 0.4280 6.3930 7.80 7.0355 6 300.0 16.60 374.71 5.19 23.70 + 0.20608 22.00 5.860 0 0.4310 5.5930 76.50 7.9549 7 330.0 19.10 372.49 12.50 17.60 + 0.19133 22.00 5.860 0 0.4310 5.6050 70.20 7.9549 7 330.0 19.10 389.13 18.46 18.50 + 0.33983 22.00 5.860 0 0.4310 6.1080 34.90 8.0555 7 330.0 19.10 390.18 9.16 24.30 + 0.19657 22.00 5.860 0 0.4310 6.2260 79.20 8.0555 7 330.0 19.10 376.14 10.15 20.50 + 0.16439 22.00 5.860 0 0.4310 6.4330 49.10 7.8265 7 330.0 19.10 374.71 9.52 24.50 + 0.19073 22.00 5.860 0 0.4310 6.7180 17.50 7.8265 7 330.0 19.10 393.74 6.56 26.20 + 0.14030 22.00 5.860 0 0.4310 6.4870 13.00 7.3967 7 330.0 19.10 396.28 5.90 24.40 + 0.21409 22.00 5.860 0 0.4310 6.4380 8.90 7.3967 7 330.0 19.10 377.07 3.59 24.80 + 0.08221 22.00 5.860 0 0.4310 6.9570 6.80 8.9067 7 330.0 19.10 386.09 3.53 29.60 + 0.36894 22.00 5.860 0 0.4310 8.2590 8.40 8.9067 7 330.0 19.10 396.90 3.54 42.80 + 0.04819 80.00 3.640 0 0.3920 6.1080 32.00 9.2203 1 315.0 16.40 392.89 6.57 21.90 + 0.03548 80.00 3.640 0 0.3920 5.8760 19.10 9.2203 1 315.0 16.40 395.18 9.25 20.90 + 0.01538 90.00 3.750 0 0.3940 7.4540 34.20 6.3361 3 244.0 15.90 386.34 3.11 44.00 + 0.61154 20.00 3.970 0 0.6470 8.7040 86.90 1.8010 5 264.0 13.00 389.70 5.12 50.00 + 0.66351 20.00 3.970 0 0.6470 7.3330 100.00 1.8946 5 264.0 13.00 383.29 7.79 36.00 + 0.65665 20.00 3.970 0 0.6470 6.8420 100.00 2.0107 5 264.0 13.00 391.93 6.90 30.10 + 0.54011 20.00 3.970 0 0.6470 7.2030 81.80 2.1121 5 264.0 13.00 392.80 9.59 33.80 + 0.53412 20.00 3.970 0 0.6470 7.5200 89.40 2.1398 5 264.0 13.00 388.37 7.26 43.10 + 0.52014 20.00 3.970 0 0.6470 8.3980 91.50 2.2885 5 264.0 13.00 386.86 5.91 48.80 + 0.82526 20.00 3.970 0 0.6470 7.3270 94.50 2.0788 5 264.0 13.00 393.42 11.25 31.00 + 0.55007 20.00 3.970 0 0.6470 7.2060 91.60 1.9301 5 264.0 13.00 387.89 8.10 36.50 + 0.76162 20.00 3.970 0 0.6470 5.5600 62.80 1.9865 5 264.0 13.00 392.40 10.45 22.80 + 0.78570 20.00 3.970 0 0.6470 7.0140 84.60 2.1329 5 264.0 13.00 384.07 14.79 30.70 + 0.57834 20.00 3.970 0 0.5750 8.2970 67.00 2.4216 5 264.0 13.00 384.54 7.44 50.00 + 0.54050 20.00 3.970 0 0.5750 7.4700 52.60 2.8720 5 264.0 13.00 390.30 3.16 43.50 + 0.09065 20.00 6.960 1 0.4640 5.9200 61.50 3.9175 3 223.0 18.60 391.34 13.65 20.70 + 0.29916 20.00 6.960 0 0.4640 5.8560 42.10 4.4290 3 223.0 18.60 388.65 13.00 21.10 + 0.16211 20.00 6.960 0 0.4640 6.2400 16.30 4.4290 3 223.0 18.60 396.90 6.59 25.20 + 0.11460 20.00 6.960 0 0.4640 6.5380 58.70 3.9175 3 223.0 18.60 394.96 7.73 24.40 + 0.22188 20.00 6.960 1 0.4640 7.6910 51.80 4.3665 3 223.0 18.60 390.77 6.58 35.20 + 0.05644 40.00 6.410 1 0.4470 6.7580 32.90 4.0776 4 254.0 17.60 396.90 3.53 32.40 + 0.09604 40.00 6.410 0 0.4470 6.8540 42.80 4.2673 4 254.0 17.60 396.90 2.98 32.00 + 0.10469 40.00 6.410 1 0.4470 7.2670 49.00 4.7872 4 254.0 17.60 389.25 6.05 33.20 + 0.06127 40.00 6.410 1 0.4470 6.8260 27.60 4.8628 4 254.0 17.60 393.45 4.16 33.10 + 0.07978 40.00 6.410 0 0.4470 6.4820 32.10 4.1403 4 254.0 17.60 396.90 7.19 29.10 + 0.21038 20.00 3.330 0 0.4429 6.8120 32.20 4.1007 5 216.0 14.90 396.90 4.85 35.10 + 0.03578 20.00 3.330 0 0.4429 7.8200 64.50 4.6947 5 216.0 14.90 387.31 3.76 45.40 + 0.03705 20.00 3.330 0 0.4429 6.9680 37.20 5.2447 5 216.0 14.90 392.23 4.59 35.40 + 0.06129 20.00 3.330 1 0.4429 7.6450 49.70 5.2119 5 216.0 14.90 377.07 3.01 46.00 + 0.01501 90.00 1.210 1 0.4010 7.9230 24.80 5.8850 1 198.0 13.60 395.52 3.16 50.00 + 0.00906 90.00 2.970 0 0.4000 7.0880 20.80 7.3073 1 285.0 15.30 394.72 7.85 32.20 + 0.01096 55.00 2.250 0 0.3890 6.4530 31.90 7.3073 1 300.0 15.30 394.72 8.23 22.00 + 0.01965 80.00 1.760 0 0.3850 6.2300 31.50 9.0892 1 241.0 18.20 341.60 12.93 20.10 + 0.03871 52.50 5.320 0 0.4050 6.2090 31.30 7.3172 6 293.0 16.60 396.90 7.14 23.20 + 0.04590 52.50 5.320 0 0.4050 6.3150 45.60 7.3172 6 293.0 16.60 396.90 7.60 22.30 + 0.04297 52.50 5.320 0 0.4050 6.5650 22.90 7.3172 6 293.0 16.60 371.72 9.51 24.80 + 0.03502 80.00 4.950 0 0.4110 6.8610 27.90 5.1167 4 245.0 19.20 396.90 3.33 28.50 + 0.07886 80.00 4.950 0 0.4110 7.1480 27.70 5.1167 4 245.0 19.20 396.90 3.56 37.30 + 0.03615 80.00 4.950 0 0.4110 6.6300 23.40 5.1167 4 245.0 19.20 396.90 4.70 27.90 + 0.08265 0.00 13.920 0 0.4370 6.1270 18.40 5.5027 4 289.0 16.00 396.90 8.58 23.90 + 0.08199 0.00 13.920 0 0.4370 6.0090 42.30 5.5027 4 289.0 16.00 396.90 10.40 21.70 + 0.12932 0.00 13.920 0 0.4370 6.6780 31.10 5.9604 4 289.0 16.00 396.90 6.27 28.60 + 0.05372 0.00 13.920 0 0.4370 6.5490 51.00 5.9604 4 289.0 16.00 392.85 7.39 27.10 + 0.14103 0.00 13.920 0 0.4370 5.7900 58.00 6.3200 4 289.0 16.00 396.90 15.84 20.30 + 0.06466 70.00 2.240 0 0.4000 6.3450 20.10 7.8278 5 358.0 14.80 368.24 4.97 22.50 + 0.05561 70.00 2.240 0 0.4000 7.0410 10.00 7.8278 5 358.0 14.80 371.58 4.74 29.00 + 0.04417 70.00 2.240 0 0.4000 6.8710 47.40 7.8278 5 358.0 14.80 390.86 6.07 24.80 + 0.03537 34.00 6.090 0 0.4330 6.5900 40.40 5.4917 7 329.0 16.10 395.75 9.50 22.00 + 0.09266 34.00 6.090 0 0.4330 6.4950 18.40 5.4917 7 329.0 16.10 383.61 8.67 26.40 + 0.10000 34.00 6.090 0 0.4330 6.9820 17.70 5.4917 7 329.0 16.10 390.43 4.86 33.10 + 0.05515 33.00 2.180 0 0.4720 7.2360 41.10 4.0220 7 222.0 18.40 393.68 6.93 36.10 + 0.05479 33.00 2.180 0 0.4720 6.6160 58.10 3.3700 7 222.0 18.40 393.36 8.93 28.40 + 0.07503 33.00 2.180 0 0.4720 7.4200 71.90 3.0992 7 222.0 18.40 396.90 6.47 33.40 + 0.04932 33.00 2.180 0 0.4720 6.8490 70.30 3.1827 7 222.0 18.40 396.90 7.53 28.20 + 0.49298 0.00 9.900 0 0.5440 6.6350 82.50 3.3175 4 304.0 18.40 396.90 4.54 22.80 + 0.34940 0.00 9.900 0 0.5440 5.9720 76.70 3.1025 4 304.0 18.40 396.24 9.97 20.30 + 2.63548 0.00 9.900 0 0.5440 4.9730 37.80 2.5194 4 304.0 18.40 350.45 12.64 16.10 + 0.79041 0.00 9.900 0 0.5440 6.1220 52.80 2.6403 4 304.0 18.40 396.90 5.98 22.10 + 0.26169 0.00 9.900 0 0.5440 6.0230 90.40 2.8340 4 304.0 18.40 396.30 11.72 19.40 + 0.26938 0.00 9.900 0 0.5440 6.2660 82.80 3.2628 4 304.0 18.40 393.39 7.90 21.60 + 0.36920 0.00 9.900 0 0.5440 6.5670 87.30 3.6023 4 304.0 18.40 395.69 9.28 23.80 + 0.25356 0.00 9.900 0 0.5440 5.7050 77.70 3.9450 4 304.0 18.40 396.42 11.50 16.20 + 0.31827 0.00 9.900 0 0.5440 5.9140 83.20 3.9986 4 304.0 18.40 390.70 18.33 17.80 + 0.24522 0.00 9.900 0 0.5440 5.7820 71.70 4.0317 4 304.0 18.40 396.90 15.94 19.80 + 0.40202 0.00 9.900 0 0.5440 6.3820 67.20 3.5325 4 304.0 18.40 395.21 10.36 23.10 + 0.47547 0.00 9.900 0 0.5440 6.1130 58.80 4.0019 4 304.0 18.40 396.23 12.73 21.00 + 0.16760 0.00 7.380 0 0.4930 6.4260 52.30 4.5404 5 287.0 19.60 396.90 7.20 23.80 + 0.18159 0.00 7.380 0 0.4930 6.3760 54.30 4.5404 5 287.0 19.60 396.90 6.87 23.10 + 0.35114 0.00 7.380 0 0.4930 6.0410 49.90 4.7211 5 287.0 19.60 396.90 7.70 20.40 + 0.28392 0.00 7.380 0 0.4930 5.7080 74.30 4.7211 5 287.0 19.60 391.13 11.74 18.50 + 0.34109 0.00 7.380 0 0.4930 6.4150 40.10 4.7211 5 287.0 19.60 396.90 6.12 25.00 + 0.19186 0.00 7.380 0 0.4930 6.4310 14.70 5.4159 5 287.0 19.60 393.68 5.08 24.60 + 0.30347 0.00 7.380 0 0.4930 6.3120 28.90 5.4159 5 287.0 19.60 396.90 6.15 23.00 + 0.24103 0.00 7.380 0 0.4930 6.0830 43.70 5.4159 5 287.0 19.60 396.90 12.79 22.20 + 0.06617 0.00 3.240 0 0.4600 5.8680 25.80 5.2146 4 430.0 16.90 382.44 9.97 19.30 + 0.06724 0.00 3.240 0 0.4600 6.3330 17.20 5.2146 4 430.0 16.90 375.21 7.34 22.60 + 0.04544 0.00 3.240 0 0.4600 6.1440 32.20 5.8736 4 430.0 16.90 368.57 9.09 19.80 + 0.05023 35.00 6.060 0 0.4379 5.7060 28.40 6.6407 1 304.0 16.90 394.02 12.43 17.10 + 0.03466 35.00 6.060 0 0.4379 6.0310 23.30 6.6407 1 304.0 16.90 362.25 7.83 19.40 + 0.05083 0.00 5.190 0 0.5150 6.3160 38.10 6.4584 5 224.0 20.20 389.71 5.68 22.20 + 0.03738 0.00 5.190 0 0.5150 6.3100 38.50 6.4584 5 224.0 20.20 389.40 6.75 20.70 + 0.03961 0.00 5.190 0 0.5150 6.0370 34.50 5.9853 5 224.0 20.20 396.90 8.01 21.10 + 0.03427 0.00 5.190 0 0.5150 5.8690 46.30 5.2311 5 224.0 20.20 396.90 9.80 19.50 + 0.03041 0.00 5.190 0 0.5150 5.8950 59.60 5.6150 5 224.0 20.20 394.81 10.56 18.50 + 0.03306 0.00 5.190 0 0.5150 6.0590 37.30 4.8122 5 224.0 20.20 396.14 8.51 20.60 + 0.05497 0.00 5.190 0 0.5150 5.9850 45.40 4.8122 5 224.0 20.20 396.90 9.74 19.00 + 0.06151 0.00 5.190 0 0.5150 5.9680 58.50 4.8122 5 224.0 20.20 396.90 9.29 18.70 + 0.01301 35.00 1.520 0 0.4420 7.2410 49.30 7.0379 1 284.0 15.50 394.74 5.49 32.70 + 0.02498 0.00 1.890 0 0.5180 6.5400 59.70 6.2669 1 422.0 15.90 389.96 8.65 16.50 + 0.02543 55.00 3.780 0 0.4840 6.6960 56.40 5.7321 5 370.0 17.60 396.90 7.18 23.90 + 0.03049 55.00 3.780 0 0.4840 6.8740 28.10 6.4654 5 370.0 17.60 387.97 4.61 31.20 + 0.03113 0.00 4.390 0 0.4420 6.0140 48.50 8.0136 3 352.0 18.80 385.64 10.53 17.50 + 0.06162 0.00 4.390 0 0.4420 5.8980 52.30 8.0136 3 352.0 18.80 364.61 12.67 17.20 + 0.01870 85.00 4.150 0 0.4290 6.5160 27.70 8.5353 4 351.0 17.90 392.43 6.36 23.10 + 0.01501 80.00 2.010 0 0.4350 6.6350 29.70 8.3440 4 280.0 17.00 390.94 5.99 24.50 + 0.02899 40.00 1.250 0 0.4290 6.9390 34.50 8.7921 1 335.0 19.70 389.85 5.89 26.60 + 0.06211 40.00 1.250 0 0.4290 6.4900 44.40 8.7921 1 335.0 19.70 396.90 5.98 22.90 + 0.07950 60.00 1.690 0 0.4110 6.5790 35.90 10.7103 4 411.0 18.30 370.78 5.49 24.10 + 0.07244 60.00 1.690 0 0.4110 5.8840 18.50 10.7103 4 411.0 18.30 392.33 7.79 18.60 + 0.01709 90.00 2.020 0 0.4100 6.7280 36.10 12.1265 5 187.0 17.00 384.46 4.50 30.10 + 0.04301 80.00 1.910 0 0.4130 5.6630 21.90 10.5857 4 334.0 22.00 382.80 8.05 18.20 + 0.10659 80.00 1.910 0 0.4130 5.9360 19.50 10.5857 4 334.0 22.00 376.04 5.57 20.60 + 8.98296 0.00 18.100 1 0.7700 6.2120 97.40 2.1222 24 666.0 20.20 377.73 17.60 17.80 + 3.84970 0.00 18.100 1 0.7700 6.3950 91.00 2.5052 24 666.0 20.20 391.34 13.27 21.70 + 5.20177 0.00 18.100 1 0.7700 6.1270 83.40 2.7227 24 666.0 20.20 395.43 11.48 22.70 + 4.26131 0.00 18.100 0 0.7700 6.1120 81.30 2.5091 24 666.0 20.20 390.74 12.67 22.60 + 4.54192 0.00 18.100 0 0.7700 6.3980 88.00 2.5182 24 666.0 20.20 374.56 7.79 25.00 + 3.83684 0.00 18.100 0 0.7700 6.2510 91.10 2.2955 24 666.0 20.20 350.65 14.19 19.90 + 3.67822 0.00 18.100 0 0.7700 5.3620 96.20 2.1036 24 666.0 20.20 380.79 10.19 20.80 + 4.22239 0.00 18.100 1 0.7700 5.8030 89.00 1.9047 24 666.0 20.20 353.04 14.64 16.80 + 3.47428 0.00 18.100 1 0.7180 8.7800 82.90 1.9047 24 666.0 20.20 354.55 5.29 21.90 + 4.55587 0.00 18.100 0 0.7180 3.5610 87.90 1.6132 24 666.0 20.20 354.70 7.12 27.50 + 3.69695 0.00 18.100 0 0.7180 4.9630 91.40 1.7523 24 666.0 20.20 316.03 14.00 21.90 +13.52220 0.00 18.100 0 0.6310 3.8630 100.00 1.5106 24 666.0 20.20 131.42 13.33 23.10 + 4.89822 0.00 18.100 0 0.6310 4.9700 100.00 1.3325 24 666.0 20.20 375.52 3.26 50.00 + 5.66998 0.00 18.100 1 0.6310 6.6830 96.80 1.3567 24 666.0 20.20 375.33 3.73 50.00 + 6.53876 0.00 18.100 1 0.6310 7.0160 97.50 1.2024 24 666.0 20.20 392.05 2.96 50.00 + 9.23230 0.00 18.100 0 0.6310 6.2160 100.00 1.1691 24 666.0 20.20 366.15 9.53 50.00 + 8.26725 0.00 18.100 1 0.6680 5.8750 89.60 1.1296 24 666.0 20.20 347.88 8.88 50.00 +11.10810 0.00 18.100 0 0.6680 4.9060 100.00 1.1742 24 666.0 20.20 396.90 34.77 13.80 +18.49820 0.00 18.100 0 0.6680 4.1380 100.00 1.1370 24 666.0 20.20 396.90 37.97 13.80 +19.60910 0.00 18.100 0 0.6710 7.3130 97.90 1.3163 24 666.0 20.20 396.90 13.44 15.00 +15.28800 0.00 18.100 0 0.6710 6.6490 93.30 1.3449 24 666.0 20.20 363.02 23.24 13.90 + 9.82349 0.00 18.100 0 0.6710 6.7940 98.80 1.3580 24 666.0 20.20 396.90 21.24 13.30 +23.64820 0.00 18.100 0 0.6710 6.3800 96.20 1.3861 24 666.0 20.20 396.90 23.69 13.10 +17.86670 0.00 18.100 0 0.6710 6.2230 100.00 1.3861 24 666.0 20.20 393.74 21.78 10.20 +88.97620 0.00 18.100 0 0.6710 6.9680 91.90 1.4165 24 666.0 20.20 396.90 17.21 10.40 +15.87440 0.00 18.100 0 0.6710 6.5450 99.10 1.5192 24 666.0 20.20 396.90 21.08 10.90 + 9.18702 0.00 18.100 0 0.7000 5.5360 100.00 1.5804 24 666.0 20.20 396.90 23.60 11.30 + 7.99248 0.00 18.100 0 0.7000 5.5200 100.00 1.5331 24 666.0 20.20 396.90 24.56 12.30 +20.08490 0.00 18.100 0 0.7000 4.3680 91.20 1.4395 24 666.0 20.20 285.83 30.63 8.80 +16.81180 0.00 18.100 0 0.7000 5.2770 98.10 1.4261 24 666.0 20.20 396.90 30.81 7.20 +24.39380 0.00 18.100 0 0.7000 4.6520 100.00 1.4672 24 666.0 20.20 396.90 28.28 10.50 +22.59710 0.00 18.100 0 0.7000 5.0000 89.50 1.5184 24 666.0 20.20 396.90 31.99 7.40 +14.33370 0.00 18.100 0 0.7000 4.8800 100.00 1.5895 24 666.0 20.20 372.92 30.62 10.20 + 8.15174 0.00 18.100 0 0.7000 5.3900 98.90 1.7281 24 666.0 20.20 396.90 20.85 11.50 + 6.96215 0.00 18.100 0 0.7000 5.7130 97.00 1.9265 24 666.0 20.20 394.43 17.11 15.10 + 5.29305 0.00 18.100 0 0.7000 6.0510 82.50 2.1678 24 666.0 20.20 378.38 18.76 23.20 +11.57790 0.00 18.100 0 0.7000 5.0360 97.00 1.7700 24 666.0 20.20 396.90 25.68 9.70 + 8.64476 0.00 18.100 0 0.6930 6.1930 92.60 1.7912 24 666.0 20.20 396.90 15.17 13.80 +13.35980 0.00 18.100 0 0.6930 5.8870 94.70 1.7821 24 666.0 20.20 396.90 16.35 12.70 + 8.71675 0.00 18.100 0 0.6930 6.4710 98.80 1.7257 24 666.0 20.20 391.98 17.12 13.10 + 5.87205 0.00 18.100 0 0.6930 6.4050 96.00 1.6768 24 666.0 20.20 396.90 19.37 12.50 + 7.67202 0.00 18.100 0 0.6930 5.7470 98.90 1.6334 24 666.0 20.20 393.10 19.92 8.50 +38.35180 0.00 18.100 0 0.6930 5.4530 100.00 1.4896 24 666.0 20.20 396.90 30.59 5.00 + 9.91655 0.00 18.100 0 0.6930 5.8520 77.80 1.5004 24 666.0 20.20 338.16 29.97 6.30 +25.04610 0.00 18.100 0 0.6930 5.9870 100.00 1.5888 24 666.0 20.20 396.90 26.77 5.60 +14.23620 0.00 18.100 0 0.6930 6.3430 100.00 1.5741 24 666.0 20.20 396.90 20.32 7.20 + 9.59571 0.00 18.100 0 0.6930 6.4040 100.00 1.6390 24 666.0 20.20 376.11 20.31 12.10 +24.80170 0.00 18.100 0 0.6930 5.3490 96.00 1.7028 24 666.0 20.20 396.90 19.77 8.30 +41.52920 0.00 18.100 0 0.6930 5.5310 85.40 1.6074 24 666.0 20.20 329.46 27.38 8.50 +67.92080 0.00 18.100 0 0.6930 5.6830 100.00 1.4254 24 666.0 20.20 384.97 22.98 5.00 +20.71620 0.00 18.100 0 0.6590 4.1380 100.00 1.1781 24 666.0 20.20 370.22 23.34 11.90 +11.95110 0.00 18.100 0 0.6590 5.6080 100.00 1.2852 24 666.0 20.20 332.09 12.13 27.90 + 7.40389 0.00 18.100 0 0.5970 5.6170 97.90 1.4547 24 666.0 20.20 314.64 26.40 17.20 +14.43830 0.00 18.100 0 0.5970 6.8520 100.00 1.4655 24 666.0 20.20 179.36 19.78 27.50 +51.13580 0.00 18.100 0 0.5970 5.7570 100.00 1.4130 24 666.0 20.20 2.60 10.11 15.00 +14.05070 0.00 18.100 0 0.5970 6.6570 100.00 1.5275 24 666.0 20.20 35.05 21.22 17.20 +18.81100 0.00 18.100 0 0.5970 4.6280 100.00 1.5539 24 666.0 20.20 28.79 34.37 17.90 +28.65580 0.00 18.100 0 0.5970 5.1550 100.00 1.5894 24 666.0 20.20 210.97 20.08 16.30 +45.74610 0.00 18.100 0 0.6930 4.5190 100.00 1.6582 24 666.0 20.20 88.27 36.98 7.00 +18.08460 0.00 18.100 0 0.6790 6.4340 100.00 1.8347 24 666.0 20.20 27.25 29.05 7.20 +10.83420 0.00 18.100 0 0.6790 6.7820 90.80 1.8195 24 666.0 20.20 21.57 25.79 7.50 +25.94060 0.00 18.100 0 0.6790 5.3040 89.10 1.6475 24 666.0 20.20 127.36 26.64 10.40 +73.53410 0.00 18.100 0 0.6790 5.9570 100.00 1.8026 24 666.0 20.20 16.45 20.62 8.80 +11.81230 0.00 18.100 0 0.7180 6.8240 76.50 1.7940 24 666.0 20.20 48.45 22.74 8.40 +11.08740 0.00 18.100 0 0.7180 6.4110 100.00 1.8589 24 666.0 20.20 318.75 15.02 16.70 + 7.02259 0.00 18.100 0 0.7180 6.0060 95.30 1.8746 24 666.0 20.20 319.98 15.70 14.20 +12.04820 0.00 18.100 0 0.6140 5.6480 87.60 1.9512 24 666.0 20.20 291.55 14.10 20.80 + 7.05042 0.00 18.100 0 0.6140 6.1030 85.10 2.0218 24 666.0 20.20 2.52 23.29 13.40 + 8.79212 0.00 18.100 0 0.5840 5.5650 70.60 2.0635 24 666.0 20.20 3.65 17.16 11.70 +15.86030 0.00 18.100 0 0.6790 5.8960 95.40 1.9096 24 666.0 20.20 7.68 24.39 8.30 +12.24720 0.00 18.100 0 0.5840 5.8370 59.70 1.9976 24 666.0 20.20 24.65 15.69 10.20 +37.66190 0.00 18.100 0 0.6790 6.2020 78.70 1.8629 24 666.0 20.20 18.82 14.52 10.90 + 7.36711 0.00 18.100 0 0.6790 6.1930 78.10 1.9356 24 666.0 20.20 96.73 21.52 11.00 + 9.33889 0.00 18.100 0 0.6790 6.3800 95.60 1.9682 24 666.0 20.20 60.72 24.08 9.50 + 8.49213 0.00 18.100 0 0.5840 6.3480 86.10 2.0527 24 666.0 20.20 83.45 17.64 14.50 +10.06230 0.00 18.100 0 0.5840 6.8330 94.30 2.0882 24 666.0 20.20 81.33 19.69 14.10 + 6.44405 0.00 18.100 0 0.5840 6.4250 74.80 2.2004 24 666.0 20.20 97.95 12.03 16.10 + 5.58107 0.00 18.100 0 0.7130 6.4360 87.90 2.3158 24 666.0 20.20 100.19 16.22 14.30 +13.91340 0.00 18.100 0 0.7130 6.2080 95.00 2.2222 24 666.0 20.20 100.63 15.17 11.70 +11.16040 0.00 18.100 0 0.7400 6.6290 94.60 2.1247 24 666.0 20.20 109.85 23.27 13.40 +14.42080 0.00 18.100 0 0.7400 6.4610 93.30 2.0026 24 666.0 20.20 27.49 18.05 9.60 +15.17720 0.00 18.100 0 0.7400 6.1520 100.00 1.9142 24 666.0 20.20 9.32 26.45 8.70 +13.67810 0.00 18.100 0 0.7400 5.9350 87.90 1.8206 24 666.0 20.20 68.95 34.02 8.40 + 9.39063 0.00 18.100 0 0.7400 5.6270 93.90 1.8172 24 666.0 20.20 396.90 22.88 12.80 +22.05110 0.00 18.100 0 0.7400 5.8180 92.40 1.8662 24 666.0 20.20 391.45 22.11 10.50 + 9.72418 0.00 18.100 0 0.7400 6.4060 97.20 2.0651 24 666.0 20.20 385.96 19.52 17.10 + 5.66637 0.00 18.100 0 0.7400 6.2190 100.00 2.0048 24 666.0 20.20 395.69 16.59 18.40 + 9.96654 0.00 18.100 0 0.7400 6.4850 100.00 1.9784 24 666.0 20.20 386.73 18.85 15.40 +12.80230 0.00 18.100 0 0.7400 5.8540 96.60 1.8956 24 666.0 20.20 240.52 23.79 10.80 +10.67180 0.00 18.100 0 0.7400 6.4590 94.80 1.9879 24 666.0 20.20 43.06 23.98 11.80 + 6.28807 0.00 18.100 0 0.7400 6.3410 96.40 2.0720 24 666.0 20.20 318.01 17.79 14.90 + 9.92485 0.00 18.100 0 0.7400 6.2510 96.60 2.1980 24 666.0 20.20 388.52 16.44 12.60 + 9.32909 0.00 18.100 0 0.7130 6.1850 98.70 2.2616 24 666.0 20.20 396.90 18.13 14.10 + 7.52601 0.00 18.100 0 0.7130 6.4170 98.30 2.1850 24 666.0 20.20 304.21 19.31 13.00 + 6.71772 0.00 18.100 0 0.7130 6.7490 92.60 2.3236 24 666.0 20.20 0.32 17.44 13.40 + 5.44114 0.00 18.100 0 0.7130 6.6550 98.20 2.3552 24 666.0 20.20 355.29 17.73 15.20 + 5.09017 0.00 18.100 0 0.7130 6.2970 91.80 2.3682 24 666.0 20.20 385.09 17.27 16.10 + 8.24809 0.00 18.100 0 0.7130 7.3930 99.30 2.4527 24 666.0 20.20 375.87 16.74 17.80 + 9.51363 0.00 18.100 0 0.7130 6.7280 94.10 2.4961 24 666.0 20.20 6.68 18.71 14.90 + 4.75237 0.00 18.100 0 0.7130 6.5250 86.50 2.4358 24 666.0 20.20 50.92 18.13 14.10 + 4.66883 0.00 18.100 0 0.7130 5.9760 87.90 2.5806 24 666.0 20.20 10.48 19.01 12.70 + 8.20058 0.00 18.100 0 0.7130 5.9360 80.30 2.7792 24 666.0 20.20 3.50 16.94 13.50 + 7.75223 0.00 18.100 0 0.7130 6.3010 83.70 2.7831 24 666.0 20.20 272.21 16.23 14.90 + 6.80117 0.00 18.100 0 0.7130 6.0810 84.40 2.7175 24 666.0 20.20 396.90 14.70 20.00 + 4.81213 0.00 18.100 0 0.7130 6.7010 90.00 2.5975 24 666.0 20.20 255.23 16.42 16.40 + 3.69311 0.00 18.100 0 0.7130 6.3760 88.40 2.5671 24 666.0 20.20 391.43 14.65 17.70 + 6.65492 0.00 18.100 0 0.7130 6.3170 83.00 2.7344 24 666.0 20.20 396.90 13.99 19.50 + 5.82115 0.00 18.100 0 0.7130 6.5130 89.90 2.8016 24 666.0 20.20 393.82 10.29 20.20 + 7.83932 0.00 18.100 0 0.6550 6.2090 65.40 2.9634 24 666.0 20.20 396.90 13.22 21.40 + 3.16360 0.00 18.100 0 0.6550 5.7590 48.20 3.0665 24 666.0 20.20 334.40 14.13 19.90 + 3.77498 0.00 18.100 0 0.6550 5.9520 84.70 2.8715 24 666.0 20.20 22.01 17.15 19.00 + 4.42228 0.00 18.100 0 0.5840 6.0030 94.50 2.5403 24 666.0 20.20 331.29 21.32 19.10 +15.57570 0.00 18.100 0 0.5800 5.9260 71.00 2.9084 24 666.0 20.20 368.74 18.13 19.10 +13.07510 0.00 18.100 0 0.5800 5.7130 56.70 2.8237 24 666.0 20.20 396.90 14.76 20.10 + 4.34879 0.00 18.100 0 0.5800 6.1670 84.00 3.0334 24 666.0 20.20 396.90 16.29 19.90 + 4.03841 0.00 18.100 0 0.5320 6.2290 90.70 3.0993 24 666.0 20.20 395.33 12.87 19.60 + 3.56868 0.00 18.100 0 0.5800 6.4370 75.00 2.8965 24 666.0 20.20 393.37 14.36 23.20 + 4.64689 0.00 18.100 0 0.6140 6.9800 67.60 2.5329 24 666.0 20.20 374.68 11.66 29.80 + 8.05579 0.00 18.100 0 0.5840 5.4270 95.40 2.4298 24 666.0 20.20 352.58 18.14 13.80 + 6.39312 0.00 18.100 0 0.5840 6.1620 97.40 2.2060 24 666.0 20.20 302.76 24.10 13.30 + 4.87141 0.00 18.100 0 0.6140 6.4840 93.60 2.3053 24 666.0 20.20 396.21 18.68 16.70 +15.02340 0.00 18.100 0 0.6140 5.3040 97.30 2.1007 24 666.0 20.20 349.48 24.91 12.00 +10.23300 0.00 18.100 0 0.6140 6.1850 96.70 2.1705 24 666.0 20.20 379.70 18.03 14.60 +14.33370 0.00 18.100 0 0.6140 6.2290 88.00 1.9512 24 666.0 20.20 383.32 13.11 21.40 + 5.82401 0.00 18.100 0 0.5320 6.2420 64.70 3.4242 24 666.0 20.20 396.90 10.74 23.00 + 5.70818 0.00 18.100 0 0.5320 6.7500 74.90 3.3317 24 666.0 20.20 393.07 7.74 23.70 + 5.73116 0.00 18.100 0 0.5320 7.0610 77.00 3.4106 24 666.0 20.20 395.28 7.01 25.00 + 2.81838 0.00 18.100 0 0.5320 5.7620 40.30 4.0983 24 666.0 20.20 392.92 10.42 21.80 + 2.37857 0.00 18.100 0 0.5830 5.8710 41.90 3.7240 24 666.0 20.20 370.73 13.34 20.60 + 3.67367 0.00 18.100 0 0.5830 6.3120 51.90 3.9917 24 666.0 20.20 388.62 10.58 21.20 + 5.69175 0.00 18.100 0 0.5830 6.1140 79.80 3.5459 24 666.0 20.20 392.68 14.98 19.10 + 4.83567 0.00 18.100 0 0.5830 5.9050 53.20 3.1523 24 666.0 20.20 388.22 11.45 20.60 + 0.15086 0.00 27.740 0 0.6090 5.4540 92.70 1.8209 4 711.0 20.10 395.09 18.06 15.20 + 0.18337 0.00 27.740 0 0.6090 5.4140 98.30 1.7554 4 711.0 20.10 344.05 23.97 7.00 + 0.20746 0.00 27.740 0 0.6090 5.0930 98.00 1.8226 4 711.0 20.10 318.43 29.68 8.10 + 0.10574 0.00 27.740 0 0.6090 5.9830 98.80 1.8681 4 711.0 20.10 390.11 18.07 13.60 + 0.11132 0.00 27.740 0 0.6090 5.9830 83.50 2.1099 4 711.0 20.10 396.90 13.35 20.10 + 0.17331 0.00 9.690 0 0.5850 5.7070 54.00 2.3817 6 391.0 19.20 396.90 12.01 21.80 + 0.27957 0.00 9.690 0 0.5850 5.9260 42.60 2.3817 6 391.0 19.20 396.90 13.59 24.50 + 0.17899 0.00 9.690 0 0.5850 5.6700 28.80 2.7986 6 391.0 19.20 393.29 17.60 23.10 + 0.28960 0.00 9.690 0 0.5850 5.3900 72.90 2.7986 6 391.0 19.20 396.90 21.14 19.70 + 0.26838 0.00 9.690 0 0.5850 5.7940 70.60 2.8927 6 391.0 19.20 396.90 14.10 18.30 + 0.23912 0.00 9.690 0 0.5850 6.0190 65.30 2.4091 6 391.0 19.20 396.90 12.92 21.20 + 0.17783 0.00 9.690 0 0.5850 5.5690 73.50 2.3999 6 391.0 19.20 395.77 15.10 17.50 + 0.22438 0.00 9.690 0 0.5850 6.0270 79.70 2.4982 6 391.0 19.20 396.90 14.33 16.80 + 0.06263 0.00 11.930 0 0.5730 6.5930 69.10 2.4786 1 273.0 21.00 391.99 9.67 22.40 + 0.04527 0.00 11.930 0 0.5730 6.1200 76.70 2.2875 1 273.0 21.00 396.90 9.08 20.60 + 0.06076 0.00 11.930 0 0.5730 6.9760 91.00 2.1675 1 273.0 21.00 396.90 5.64 23.90 + 0.10959 0.00 11.930 0 0.5730 6.7940 89.30 2.3889 1 273.0 21.00 393.45 6.48 22.00 + 0.04741 0.00 11.930 0 0.5730 6.0300 80.80 2.5050 1 273.0 21.00 396.90 7.88 11.90 diff --git a/ex1/linear_regression.m b/ex1/linear_regression.m new file mode 100644 index 0000000..b5613be --- /dev/null +++ b/ex1/linear_regression.m @@ -0,0 +1,24 @@ +function [f,g] = linear_regression(theta, X,y) + % + % Arguments: + % theta - A vector containing the parameter values to optimize. + % X - The examples stored in a matrix. + % X(i,j) is the i'th coordinate of the j'th example. + % y - The target value for each example. y(j) is the target for example j. + % + + m=size(X,2); + n=size(X,1); + + f=0; + g=zeros(size(theta)); + + % + % TODO: Compute the linear regression objective by looping over the examples in X. + % Store the objective function value in 'f'. + % + % TODO: Compute the gradient of the objective with respect to theta by looping over + % the examples in X and adding up the gradient for each example. Store the + % computed gradient in 'g'. + +%%% YOUR CODE HERE %%% diff --git a/ex1/linear_regression_vec.m b/ex1/linear_regression_vec.m new file mode 100644 index 0000000..ef807a6 --- /dev/null +++ b/ex1/linear_regression_vec.m @@ -0,0 +1,20 @@ +function [f,g] = linear_regression_vec(theta, X,y) + % + % Arguments: + % theta - A vector containing the parameter values to optimize. + % X - The examples stored in a matrix. + % X(i,j) is the i'th coordinate of the j'th example. + % y - The target value for each example. y(j) is the target for example j. + % + m=size(X,2); + + % initialize objective value and gradient. + f = 0; + g = zeros(size(theta)); + + % + % TODO: Compute the linear regression objective function and gradient + % using vectorized code. (It will be just a few lines of code!) + % Store the objective function value in 'f', and the gradient in 'g'. + % +%%% YOUR CODE HERE %%% diff --git a/ex1/logistic_regression.m b/ex1/logistic_regression.m new file mode 100644 index 0000000..8c7f5fb --- /dev/null +++ b/ex1/logistic_regression.m @@ -0,0 +1,24 @@ +function [f,g] = logistic_regression(theta, X,y) + % + % Arguments: + % theta - A column vector containing the parameter values to optimize. + % X - The examples stored in a matrix. + % X(i,j) is the i'th coordinate of the j'th example. + % y - The label for each example. y(j) is the j'th example's label. + % + + m=size(X,2); + + % initialize objective value and gradient. + f = 0; + g = zeros(size(theta)); + + + % + % TODO: Compute the objective function by looping over the dataset and summing + % up the objective values for each example. Store the result in 'f'. + % + % TODO: Compute the gradient of the objective by looping over the dataset and summing + % up the gradients (df/dtheta) for each example. Store the result in 'g'. + % +%%% YOUR CODE HERE %%% diff --git a/ex1/logistic_regression_vec.m b/ex1/logistic_regression_vec.m new file mode 100644 index 0000000..1c36559 --- /dev/null +++ b/ex1/logistic_regression_vec.m @@ -0,0 +1,21 @@ +function [f,g] = logistic_regression_vec(theta, X,y) + % + % Arguments: + % theta - A column vector containing the parameter values to optimize. + % X - The examples stored in a matrix. + % X(i,j) is the i'th coordinate of the j'th example. + % y - The label for each example. y(j) is the j'th example's label. + % + m=size(X,2); + + % initialize objective value and gradient. + f = 0; + g = zeros(size(theta)); + + + % + % TODO: Compute the logistic regression objective function and gradient + % using vectorized code. (It will be just a few lines of code!) + % Store the objective function value in 'f', and the gradient in 'g'. + % +%%% YOUR CODE HERE %%% diff --git a/ex1/multi_classifier_accuracy.m b/ex1/multi_classifier_accuracy.m new file mode 100644 index 0000000..6560456 --- /dev/null +++ b/ex1/multi_classifier_accuracy.m @@ -0,0 +1,5 @@ +function accuracy=binary_classifier_accuracy(theta, X,y) + [~,labels] = max(theta'*X, [], 1); + + correct=sum(y == labels); + accuracy = correct / length(y); diff --git a/ex1/sigmoid.m b/ex1/sigmoid.m new file mode 100644 index 0000000..604f4b5 --- /dev/null +++ b/ex1/sigmoid.m @@ -0,0 +1,2 @@ +function h=sigmoid(a) + h=1./(1+exp(-a)); diff --git a/ex1/softmax_regression_vec.m b/ex1/softmax_regression_vec.m new file mode 100644 index 0000000..4fb5222 --- /dev/null +++ b/ex1/softmax_regression_vec.m @@ -0,0 +1,32 @@ +function [f,g] = softmax_regression(theta, X,y) + % + % Arguments: + % theta - A vector containing the parameter values to optimize. + % In minFunc, theta is reshaped to a long vector. So we need to + % resize it to an n-by-(num_classes-1) matrix. + % Recall that we assume theta(:,num_classes) = 0. + % + % X - The examples stored in a matrix. + % X(i,j) is the i'th coordinate of the j'th example. + % y - The label for each example. y(j) is the j'th example's label. + % + m=size(X,2); + n=size(X,1); + + % theta is a vector; need to reshape to n x num_classes. + theta=reshape(theta, n, []); + num_classes=size(theta,2)+1; + + % initialize objective value and gradient. + f = 0; + g = zeros(size(theta)); + + % + % TODO: Compute the softmax objective function and gradient using vectorized code. + % Store the objective function value in 'f', and the gradient in 'g'. + % Before returning g, make sure you form it back into a vector with g=g(:); + % +%%% YOUR CODE HERE %%% + + g=g(:); % make gradient a vector for minFunc + diff --git a/multilayer_supervised/initialize_weights.m b/multilayer_supervised/initialize_weights.m new file mode 100644 index 0000000..f355a1d --- /dev/null +++ b/multilayer_supervised/initialize_weights.m @@ -0,0 +1,22 @@ +function [ stack ] = initialize_weights( ei ) +%INITIALIZE_WEIGHTS Random weight structures for a network architecture +% eI describes a network via the fields layerSizes, inputDim, and outputDim +% +% This uses Xavier's weight initialization tricks for better backprop +% See: X. Glorot, Y. Bengio. Understanding the difficulty of training +% deep feedforward neural networks. AISTATS 2010. + +%% initialize hidden layers +stack = cell(1, numel(ei.layer_sizes)); +for l = 1 : numel(ei.layer_sizes) + if l > 1 + prev_size = ei.layer_sizes(l-1); + else + prev_size = ei.input_dim; + end; + cur_size = ei.layer_sizes(l); + % Xaxier's scaling factor + s = sqrt(6) / sqrt(prev_size + cur_size); + stack{l}.W = rand(cur_size, prev_size)*2*s - s; + stack{l}.b = zeros(cur_size, 1); +end diff --git a/multilayer_supervised/load_preprocess_mnist.m b/multilayer_supervised/load_preprocess_mnist.m new file mode 100644 index 0000000..2daf619 --- /dev/null +++ b/multilayer_supervised/load_preprocess_mnist.m @@ -0,0 +1,14 @@ +function [data_train, labels_train, data_test, labels_test] = load_preprocess_mnist() +%% TODO ensure this is consistent with common loaders +% assumes relative paths to the common directory +% assumes common directory on paty for access to load functions +% adds 1 to the labels to make them 1-indexed + +data_train = loadMNISTImages('../common/train-images-idx3-ubyte'); +labels_train = loadMNISTLabels(['../common/train-labels-idx1-ubyte']); +labels_train = labels_train + 1; + +data_test = loadMNISTImages('../common/t10k-images-idx3-ubyte'); +labels_test = loadMNISTLabels(['../common/t10k-labels-idx1-ubyte']); +labels_test = labels_test + 1; + diff --git a/multilayer_supervised/params2stack.m b/multilayer_supervised/params2stack.m new file mode 100644 index 0000000..ef77a40 --- /dev/null +++ b/multilayer_supervised/params2stack.m @@ -0,0 +1,43 @@ +function stack = params2stack(params, ei) + +% Converts a flattened parameter vector into a nice "stack" structure +% for us to work with. This is useful when you're building multilayer +% networks. +% +% stack = params2stack(params, netconfig) +% +% params - flattened parameter vector +% ei - auxiliary variable containing +% the configuration of the network +% + + +% Map the params (a vector into a stack of weights) +depth = numel(ei.layer_sizes); +stack = cell(depth,1); +% the size of the previous layer +prev_size = ei.input_dim; +% mark current position in parameter vector +cur_pos = 1; + +for d = 1:depth + % Create layer d + stack{d} = struct; + + hidden = ei.layer_sizes(d); + % Extract weights + wlen = double(hidden * prev_size); + stack{d}.W = reshape(params(cur_pos:cur_pos+wlen-1), hidden, prev_size); + cur_pos = cur_pos+wlen; + + % Extract bias + blen = hidden; + stack{d}.b = reshape(params(cur_pos:cur_pos+blen-1), hidden, 1); + cur_pos = cur_pos+blen; + + % Set previous layer size + prev_size = hidden; + +end + +end \ No newline at end of file diff --git a/multilayer_supervised/run_train.m b/multilayer_supervised/run_train.m new file mode 100644 index 0000000..ef8b626 --- /dev/null +++ b/multilayer_supervised/run_train.m @@ -0,0 +1,58 @@ +% runs training procedure for supervised multilayer network +% softmax output layer with cross entropy loss function + +%% setup environment +% experiment information +% a struct containing network layer sizes etc +ei = []; + +% add common directory to your path for +% minfunc and mnist data helpers +addpath ../common; +addpath(genpath('../common/minFunc_2012/minFunc')); + +%% load mnist data +[data_train, labels_train, data_test, labels_test] = load_preprocess_mnist(); + +%% populate ei with the network architecture to train +% ei is a structure you can use to store hyperparameters of the network +% the architecture specified below should produce 100% training accuracy +% You should be able to try different network architectures by changing ei +% only (no changes to the objective function code) + +% dimension of input features +ei.input_dim = 784; +% number of output classes +ei.output_dim = 10; +% sizes of all hidden layers and the output layer +ei.layer_sizes = [256, ei.output_dim]; +% scaling parameter for l2 weight regularization penalty +ei.lambda = 0; +% which type of activation function to use in hidden layers +% feel free to implement support for only the logistic sigmoid function +ei.activation_fun = 'logistic'; + +%% setup random initial weights +stack = initialize_weights(ei); +params = stack2params(stack); + +%% setup minfunc options +options = []; +options.display = 'iter'; +options.maxFunEvals = 1e6; +options.Method = 'lbfgs'; + +%% run training +[opt_params,opt_value,exitflag,output] = minFunc(@supervised_dnn_cost,... + params,options,ei, data_train, labels_train); + +%% compute accuracy on the test and train set +[~, ~, pred] = supervised_dnn_cost( opt_params, ei, data_test, [], true); +[~,pred] = max(pred); +acc_test = mean(pred'==labels_test); +fprintf('test accuracy: %f\n', acc_test); + +[~, ~, pred] = supervised_dnn_cost( opt_params, ei, data_train, [], true); +[~,pred] = max(pred); +acc_train = mean(pred'==labels_train); +fprintf('train accuracy: %f\n', acc_train); diff --git a/multilayer_supervised/stack2params.m b/multilayer_supervised/stack2params.m new file mode 100644 index 0000000..8d66c64 --- /dev/null +++ b/multilayer_supervised/stack2params.m @@ -0,0 +1,38 @@ +function [params] = stack2params(stack) + +% Converts a "stack" structure into a flattened parameter vector and also +% stores the network configuration. This is useful when working with +% optimization toolboxes such as minFunc. +% +% [params, netconfig] = stack2params(stack) +% +% stack - the stack structure, where stack{1}.w = weights of first layer +% stack{1}.b = weights of first layer +% stack{2}.w = weights of second layer +% stack{2}.b = weights of second layer +% ... etc. +% This is a non-standard version of the code to support conv nets +% it allows higher layers to have window sizes >= 1 of the previous layer +% If using a gpu pass inParams as your gpu datatype +% Setup the compressed param vector +params = []; + + +for d = 1:numel(stack) + % This can be optimized. But since our stacks are relatively short, it + % is okay + params = [params ; stack{d}.W(:) ; stack{d}.b(:) ]; + + % Check that stack is of the correct form + assert(size(stack{d}.W, 1) == size(stack{d}.b, 1), ... + ['The bias should be a *column* vector of ' ... + int2str(size(stack{d}.W, 1)) 'x1']); + % no layer size constrain with conv nets + if d < numel(stack) + assert(mod(size(stack{d+1}.W, 2), size(stack{d}.W, 1)) == 0, ... + ['The adjacent layers L' int2str(d) ' and L' int2str(d+1) ... + ' should have matching sizes.']); + end +end + +end \ No newline at end of file diff --git a/multilayer_supervised/supervised_dnn_cost.m b/multilayer_supervised/supervised_dnn_cost.m new file mode 100644 index 0000000..e7ac463 --- /dev/null +++ b/multilayer_supervised/supervised_dnn_cost.m @@ -0,0 +1,42 @@ +function [ cost, grad, pred_prob] = supervised_dnn_cost( theta, ei, data, labels, pred_only) +%SPNETCOSTSLAVE Slave cost function for simple phone net +% Does all the work of cost / gradient computation +% Returns cost broken into cross-entropy, weight norm, and prox reg +% components (ceCost, wCost, pCost) + +%% default values +po = false; +if exist('pred_only','var') + po = pred_only; +end; + +%% reshape into network +stack = params2stack(theta, ei); +numHidden = numel(ei.layer_sizes) - 1; +hAct = cell(numHidden+1, 1); +gradStack = cell(numHidden+1, 1); +%% forward prop +%%% YOUR CODE HERE %%% + +%% return here if only predictions desired. +if po + cost = -1; ceCost = -1; wCost = -1; numCorrect = -1; + grad = []; + return; +end; + +%% compute cost +%%% YOUR CODE HERE %%% + +%% compute gradients using backpropagation +%%% YOUR CODE HERE %%% + +%% compute weight penalty cost and gradient for non-bias terms +%%% YOUR CODE HERE %%% + +%% reshape gradients into vector +[grad] = stack2params(gradStack); +end + + + diff --git a/pca/pca_gen.m b/pca/pca_gen.m new file mode 100644 index 0000000..dfcbf02 --- /dev/null +++ b/pca/pca_gen.m @@ -0,0 +1,119 @@ +%%================================================================ +%% Step 0a: Load data +% Here we provide the code to load natural image data into x. +% x will be a 784 * 600000 matrix, where the kth column x(:, k) corresponds to +% the raw image data from the kth 12x12 image patch sampled. +% You do not need to change the code below. + +addpath(genpath('../common')) +x = loadMNISTImages('../common/train-images-idx3-ubyte'); +figure('name','Raw images'); +randsel = randi(size(x,2),200,1); % A random selection of samples for visualization +display_network(x(:,randsel)); + +%%================================================================ +%% Step 0b: Zero-mean the data (by row) +% You can make use of the mean and repmat/bsxfun functions. + +%%% YOUR CODE HERE %%% + +%%================================================================ +%% Step 1a: Implement PCA to obtain xRot +% Implement PCA to obtain xRot, the matrix in which the data is expressed +% with respect to the eigenbasis of sigma, which is the matrix U. + +%%% YOUR CODE HERE %%% + +%%================================================================ +%% Step 1b: Check your implementation of PCA +% The covariance matrix for the data expressed with respect to the basis U +% should be a diagonal matrix with non-zero entries only along the main +% diagonal. We will verify this here. +% Write code to compute the covariance matrix, covar. +% When visualised as an image, you should see a straight line across the +% diagonal (non-zero entries) against a blue background (zero entries). + +%%% YOUR CODE HERE %%% + +% Visualise the covariance matrix. You should see a line across the +% diagonal against a blue background. +figure('name','Visualisation of covariance matrix'); +imagesc(covar); + +%%================================================================ +%% Step 2: Find k, the number of components to retain +% Write code to determine k, the number of components to retain in order +% to retain at least 99% of the variance. + +%%% YOUR CODE HERE %%% + +%%================================================================ +%% Step 3: Implement PCA with dimension reduction +% Now that you have found k, you can reduce the dimension of the data by +% discarding the remaining dimensions. In this way, you can represent the +% data in k dimensions instead of the original 144, which will save you +% computational time when running learning algorithms on the reduced +% representation. +% +% Following the dimension reduction, invert the PCA transformation to produce +% the matrix xHat, the dimension-reduced data with respect to the original basis. +% Visualise the data and compare it to the raw data. You will observe that +% there is little loss due to throwing away the principal components that +% correspond to dimensions with low variation. + +%%% YOUR CODE HERE %%% + +% Visualise the data, and compare it to the raw data +% You should observe that the raw and processed data are of comparable quality. +% For comparison, you may wish to generate a PCA reduced image which +% retains only 90% of the variance. + +figure('name',['PCA processed images ',sprintf('(%d / %d dimensions)', k, size(x, 1)),'']); +display_network(xHat(:,randsel)); +figure('name','Raw images'); +display_network(x(:,randsel)); + +%%================================================================ +%% Step 4a: Implement PCA with whitening and regularisation +% Implement PCA with whitening and regularisation to produce the matrix +% xPCAWhite. + +epsilon = 1e-1; +%%% YOUR CODE HERE %%% + +%% Step 4b: Check your implementation of PCA whitening +% Check your implementation of PCA whitening with and without regularisation. +% PCA whitening without regularisation results a covariance matrix +% that is equal to the identity matrix. PCA whitening with regularisation +% results in a covariance matrix with diagonal entries starting close to +% 1 and gradually becoming smaller. We will verify these properties here. +% Write code to compute the covariance matrix, covar. +% +% Without regularisation (set epsilon to 0 or close to 0), +% when visualised as an image, you should see a red line across the +% diagonal (one entries) against a blue background (zero entries). +% With regularisation, you should see a red line that slowly turns +% blue across the diagonal, corresponding to the one entries slowly +% becoming smaller. + +%%% YOUR CODE HERE %%% + +% Visualise the covariance matrix. You should see a red line across the +% diagonal against a blue background. +figure('name','Visualisation of covariance matrix'); +imagesc(covar); + +%%================================================================ +%% Step 5: Implement ZCA whitening +% Now implement ZCA whitening to produce the matrix xZCAWhite. +% Visualise the data and compare it to the raw data. You should observe +% that whitening results in, among other things, enhanced edges. + +%%% YOUR CODE HERE %%% + +% Visualise the data, and compare it to the raw data. +% You should observe that the whitened images have enhanced edges. +figure('name','ZCA whitened images'); +display_network(xZCAWhite(:,randsel)); +figure('name','Raw images'); +display_network(x(:,randsel)); diff --git a/rica/bsxfunwrap.m b/rica/bsxfunwrap.m new file mode 100644 index 0000000..97d8a7b --- /dev/null +++ b/rica/bsxfunwrap.m @@ -0,0 +1,28 @@ + +function c = bsxfunwrap(func, a, b) + +global usegpu; + +if usegpu + if size(a,1) > 1 && size(b,1) == 1 + assert(size(a,2) == size(b,2), 'bsxfunwrap singleton dimensions dont agree'); + c = func(a, repmat(b, size(a,1), 1)); + elseif size(a,2) > 1 && size(b,2) == 1 + assert(size(a,1) == size(b,1), 'bsxfunwrap singleton dimensions dont agree'); + c = func(a, repmat(b, 1, size(a,2))); + elseif size(b,1) > 1 && size(a,1) == 1 + assert(size(b,2) == size(a,2), 'bsxfunwrap singleton dimensions dont agree'); + c = func(repmat(a, size(b, 1), 1), b); + elseif size(b,2) > 1 && size(a,2) == 1 + assert(size(b,1) == size(a,1), 'bsxfunwrap singleton dimensions dont agree'); + c = func(repmat(a, 1, size(b, 2)), b); + else + assert(size(a,1) == size(b,1), 'no bsxfun to do, bsxfunwrap dimensions dont agree'); + assert(size(a,2) == size(b,2), 'no bsxfun to do, bsxfunwrap dimensions dont agree'); + c = func(a, b); + end +else + c = bsxfun(func, a, b); +end + +end \ No newline at end of file diff --git a/rica/l2rowscaled.m b/rica/l2rowscaled.m new file mode 100644 index 0000000..9977601 --- /dev/null +++ b/rica/l2rowscaled.m @@ -0,0 +1,7 @@ +function [y] = l2rowscaled(x, alpha) + +normeps = 1e-5; +epssumsq = sum(x.^2,2) + normeps; + +l2rows=sqrt(epssumsq)*alpha; +y=bsxfunwrap(@rdivide,x,l2rows); diff --git a/rica/l2rowscaledg.m b/rica/l2rowscaledg.m new file mode 100644 index 0000000..1302740 --- /dev/null +++ b/rica/l2rowscaledg.m @@ -0,0 +1,18 @@ +function [grad] = l2rowscaledg(x,y,outderv, alpha) + +normeps = 1e-5; +if (~exist('outderv','var')||isempty(outderv)) + error('Requires outderv of previous layer to compute gradient!'); +end + +epssumsq = sum(x.^2,2) + normeps; + +l2rows = sqrt(epssumsq)*alpha; + +if (~exist('y','var')||isempty(y)) + y = bsxfunwrap(@rdivide,x,l2rows); +end + +grad = bsxfunwrap(@rdivide, outderv, l2rows) - ... + bsxfunwrap(@times, y, sum(outderv.*x, 2) ./ epssumsq); + diff --git a/rica/preprocess.m b/rica/preprocess.m new file mode 100644 index 0000000..4f08ea1 --- /dev/null +++ b/rica/preprocess.m @@ -0,0 +1,5 @@ +function [patches, mean_patch, V] = preprocess(patches) + +%[patches, mean_patch] = removeDC(patches,2); +mean_patch = []; +[patches,V,E,D] = zca2(patches); diff --git a/rica/readme.txt b/rica/readme.txt new file mode 100644 index 0000000..f015060 --- /dev/null +++ b/rica/readme.txt @@ -0,0 +1,3 @@ +to get started run runSoftICA.m +you need to download minFunc here: +http://www.di.ens.fr/~mschmidt/Software/minFunc_2009.zip diff --git a/rica/removeDC.m b/rica/removeDC.m new file mode 100644 index 0000000..9851f3f --- /dev/null +++ b/rica/removeDC.m @@ -0,0 +1,20 @@ +% Removes DC component from image patches +% Data given as a matrix where each patch is one column vectors +% That is, the patches are vectorized. + +function [Y,meanX]=removeDC(X, dim); + +% Subtract local mean gray-scale value from each patch in X to give output Y +if nargin == 1 + dim = 1; +end + +meanX = mean(X,dim); + +if dim==1 + Y = X-meanX(ones(size(X,1),1),:); +else + Y = X-meanX(:,ones(size(X,2),1)); +end + +return; diff --git a/rica/runSoftICA.m b/rica/runSoftICA.m new file mode 100644 index 0000000..f369ea6 --- /dev/null +++ b/rica/runSoftICA.m @@ -0,0 +1,49 @@ +%% We will use minFunc for this exercise, but you can use your +% own optimizer of choice +clear all; +addpath(genpath('../common/')) % path to minfunc +%% These parameters should give you sane results. We recommend experimenting +% with these values after you have a working solution. +global params; +params.m=10000; % num patches +params.patchWidth=9; % width of a patch +params.n=params.patchWidth^2; % dimensionality of input to RICA +params.lambda = 0.0005; % sparsity cost +params.numFeatures = 50; % number of filter banks to learn +params.epsilon = 1e-2; % epsilon to use in square-sqrt nonlinearity + +% Load MNIST data set +data = loadMNISTImages('../common/train-images-idx3-ubyte'); + +%% Preprocessing +% Our strategy is as follows: +% 1) Sample random patches in the images +% 2) Apply standard ZCA transformation to the data +% 3) Normalize each patch to be between 0 and 1 with l2 normalization + +% Step 1) Sample patches +patches = samplePatches(data,params.patchWidth,params.m); +% Step 2) Apply ZCA +%%% YOUR CODE HERE %%% +% Step 3) Normalize each patch. Each patch should be normalized as +% x / ||x||_2 where x is the vector representation of the patch +%%% YOUR CODE HERE %%% + +%% Run the optimization +options.Method = 'lbfgs'; +options.MaxFunEvals = Inf; +options.MaxIter = 500; +%options.display = 'off'; +options.outputFcn = @showBases; + +% initialize with random weights +randTheta = randn(params.numFeatures,params.n)*0.01; % 1/sqrt(params.n); +randTheta = randTheta ./ repmat(sqrt(sum(randTheta.^2,2)), 1, size(randTheta,2)); +randTheta = randTheta(:); + +% optimize +[opttheta, cost, exitflag] = minFunc( @(theta) softICACost(theta, x, params), randTheta, options); % Use x or xw + +% display result +W = reshape(opttheta, params.numFeatures, params.n); +display_network(W'); diff --git a/rica/showBases.m b/rica/showBases.m new file mode 100644 index 0000000..ec67013 --- /dev/null +++ b/rica/showBases.m @@ -0,0 +1,7 @@ +function [out] = showBases(theta,iterationType,i,funEvals,f,t,gtd,g,d,optCond,varargin) +global params +if mod(i,2) == 0 + W = reshape(theta, params.numFeatures, params.n); + display_network(W'); +end +out = 0; \ No newline at end of file diff --git a/rica/softICACost.m b/rica/softICACost.m new file mode 100644 index 0000000..d8788ba --- /dev/null +++ b/rica/softICACost.m @@ -0,0 +1,15 @@ +%% Your job is to implement the RICA cost and gradient +function [cost,grad] = softICACost(theta, x, params) + +% unpack weight matrix +W = reshape(theta, params.numFeatures, params.n); + +% project weights to norm ball (prevents degenerate bases) +Wold = W; +W = l2rowscaled(W, 1); + +%%% YOUR CODE HERE %%% + +% unproject gradient for minFunc +grad = l2rowscaledg(Wold, W, Wgrad, 1); +grad = grad(:); \ No newline at end of file diff --git a/rica/zca2.m b/rica/zca2.m new file mode 100644 index 0000000..7101790 --- /dev/null +++ b/rica/zca2.m @@ -0,0 +1,10 @@ +function [Z] = zca2(x) +epsilon = 1e-4; +% You should be able to use the code from your PCA/ZCA exercise +% Retain all of the components from the ZCA transform (i.e. do not do +% dimensionality reduction) + +% x is the input patch data of size +% z is the ZCA transformed data. The dimenison of z = x. + +%%% YOUR CODE HERE %%% diff --git a/stl/feedfowardRICA.m b/stl/feedfowardRICA.m new file mode 100644 index 0000000..0f56d70 --- /dev/null +++ b/stl/feedfowardRICA.m @@ -0,0 +1,75 @@ +function features = feedfowardRICA(filterDim, poolDim, numFilters, images, W) +% feedfowardRICA Returns the convolution of the features given by W with +% the given images. It should be very similar to cnnConvolve.m+cnnPool.m +% in the CNN exercise, except that there is no bias term b, and the pooling +% is RICA-style square-square-root pooling instead of average pooling. +% +% Parameters: +% filterDim - filter (feature) dimension +% numFilters - number of feature maps +% images - large images to convolve with, matrix in the form +% images(r, c, image number) +% W - W should be the weights learnt using RICA +% W is of shape (filterDim,filterDim,numFilters) +% +% Returns: +% features - matrix of convolved and pooled features in the form +% features(imageRow, imageCol, featureNum, imageNum) +global params; +numImages = size(images, 3); +imageDim = size(images, 1); +convDim = imageDim - filterDim + 1; + +features = zeros(convDim / poolDim, ... + convDim / poolDim, numFilters, numImages); +poolMat = ones(poolDim); +% Instructions: +% Convolve every filter with every image just like what you did in +% cnnConvolve.m to get a response. +% Then perform square-square-root pooling on the response with 3 steps: +% 1. Square every element in the response +% 2. Sum everything in each pooling region +% 3. add params.epsilon to every element before taking element-wise square-root +% (Hint: use poolMat similarly as in cnnPool.m) + + + +for imageNum = 1:numImages + if mod(imageNum,500)==0 + fprintf('forward-prop image %d\n', imageNum); + end + for filterNum = 1:numFilters + + filter = zeros(8,8); % You should replace this + % Form W, obtain the feature (filterDim x filterDim) needed during the + % convolution + % ---- YOUR CODE HERE ---- + filter = squeeze(W(:,:,filterNum)); + % ------------------------ + + % Flip the feature matrix because of the definition of convolution, as explained later + filter = rot90(squeeze(filter),2); + + % Obtain the image + im = squeeze(images(:, :, imageNum)); + + resp = zeros(convDim, convDim); % You should replace this + % Convolve "filter" with "im" to find "resp" + % be sure to do a 'valid' convolution + % ---- YOUR CODE HERE ---- + resp = conv2(im,filter,'valid'); + + % Then, apply square-square-root pooling on "resp" to get the hidden + % activation "act" + act = zeros(convDim / poolDim, convDim / poolDim); % You should replace this + % ---- YOUR CODE HERE ---% + act=conv2(resp.^2,poolMat,'valid'); + act = sqrt(act(1:poolDim:end,1:poolDim:end)+params.epsilon); + % ------------------------ + features(:, :, filterNum, imageNum) = act; + end +end + + +end + diff --git a/stl/stlExercise.m b/stl/stlExercise.m new file mode 100644 index 0000000..1aefdd6 --- /dev/null +++ b/stl/stlExercise.m @@ -0,0 +1,162 @@ +%% CS294A/CS294W Self-taught Learning Exercise + +% Instructions +% ------------ +% +% This file contains code that helps you get started on the +% self-taught learning. You will need to complete code in feedForwardAutoencoder.m +% You will also need to have implemented sparseAutoencoderCost.m and +% softmaxCost.m from previous exercises. +% +%% ====================================================================== +% STEP 0: Here we provide the relevant parameters values that will +% allow your RICA to get good filters; you do not need to +% change the parameters below. +addpath(genpath('..')) +imgSize = 28; +global params; +params.patchWidth=9; % width of a patch +params.n=params.patchWidth^2; % dimensionality of input to RICA +params.lambda = 0.0005; % sparsity cost +params.numFeatures = 32; % number of filter banks to learn +params.epsilon = 1e-2; + +%% ====================================================================== +% STEP 1: Load data from the MNIST database +% +% This loads our training and test data from the MNIST database files. +% We have sorted the data for you in this so that you will not have to +% change it. + +% Load MNIST database files +mnistData = loadMNISTImages('../common/train-images-idx3-ubyte'); +mnistLabels = loadMNISTLabels('../common/train-labels-idx1-ubyte'); + +numExamples = size(mnistData, 2); +% 50000 of the data are pretended to be unlabelled +unlabeledSet = 1:50000; +unlabeledData = mnistData(:, unlabeledSet); + +% the rest are equally splitted into labelled train and test data + + +trainSet = 50001:55000; +testSet = 55001:60000; +trainData = mnistData(:, trainSet); +trainLabels = mnistLabels(trainSet)' + 1; % Shift Labels to the Range 1-10 +% only keep digits 0-4, so that unlabelled dataset has different distribution +% than the labelled one. +removeSet = find(trainLabels > 5); +trainData(:,removeSet)= [] ; +trainLabels(removeSet) = []; + +testData = mnistData(:, testSet); +testLabels = mnistLabels(testSet)' + 1; % Shift Labels to the Range 1-10 +% only keep digits 0-4 +removeSet = find(testLabels > 5); +testData(:,removeSet)= [] ; +testLabels(removeSet) = []; + + +% Output Some Statistics +fprintf('# examples in unlabeled set: %d\n', size(unlabeledData, 2)); +fprintf('# examples in supervised training set: %d\n\n', size(trainData, 2)); +fprintf('# examples in supervised testing set: %d\n\n', size(testData, 2)); + +%% ====================================================================== +% STEP 2: Train the RICA +% This trains the RICA on the unlabeled training images. + +% Randomly initialize the parameters +randTheta = randn(params.numFeatures,params.n)*0.01; % 1/sqrt(params.n); +randTheta = randTheta ./ repmat(sqrt(sum(randTheta.^2,2)), 1, size(randTheta,2)); +randTheta = randTheta(:); + +% subsample random patches from the unlabelled+training data +patches = samplePatches([unlabeledData,trainData],params.patchWidth,200000); + +%configure minFunc +options.Method = 'lbfgs'; +options.MaxFunEvals = Inf; +options.MaxIter = 1000; +% You'll need to replace this line with RICA training code +opttheta = randTheta; + +% Find opttheta by running the RICA on all the training patches. +% You will need to whitened the patches with the zca2 function +% then call minFunc with the softICACost function as seen in the RICA exercise. +%% ----------------- YOUR CODE HERE ---------------------- +[patches,V,E,D] = zca2(patches); +% optimize +[opttheta, cost, exitflag] = minFunc( @(theta) softICACost(theta, patches, params), randTheta, options); +%% ------------------------------------------------------- +% reshape visualize weights +W = reshape(opttheta, params.numFeatures, params.n); +display_network(W'); + +%% ====================================================================== + +%% STEP 3: Extract Features from the Supervised Dataset +% pre-multiply the weights with whitening matrix, equivalent to whitening +% each image patch before applying convolution. V should be the same V +% returned by the zca2 when you whiten the patches. +W = W*V; +% reshape RICA weights to be convolutional weights. +W = reshape(W, params.numFeatures, params.patchWidth, params.patchWidth); +W = permute(W, [2,3,1]); + +% setting up convolutional feed-forward. You do need to modify this code. +filterDim = params.patchWidth; +poolDim = 5; +numFilters = params.numFeatures; +trainImages=reshape(trainData, imgSize, imgSize, size(trainData, 2)); +testImages=reshape(testData, imgSize, imgSize, size(testData, 2)); +% Compute convolutional responses +% TODO: You will need to complete feedfowardRICA.m +trainAct = feedfowardRICA(filterDim, poolDim, numFilters, trainImages, W); +testAct = feedfowardRICA(filterDim, poolDim, numFilters, testImages, W); +% reshape the responses into feature vectors +featureSize = size(trainAct,1)*size(trainAct,2)*size(trainAct,3); +trainFeatures = reshape(trainAct, featureSize, size(trainData, 2)); +testFeatures = reshape(testAct, featureSize, size(testData, 2)); +%% ====================================================================== +%% STEP 4: Train the softmax classifier + +numClasses = 5; % doing 5-class digit recognition +% initialize softmax weights randomly +randTheta2 = randn(numClasses, featureSize)*0.01; % 1/sqrt(params.n); +randTheta2 = randTheta2 ./ repmat(sqrt(sum(randTheta2.^2,2)), 1, size(randTheta2,2)); +randTheta2 = randTheta2'; +randTheta2 = randTheta2(:); + +% Use minFunc and softmax_regression_vec from the previous exercise to +% train a multi-class classifier. +%% ----------------- YOUR CODE HERE ---------------------- +options.Method = 'lbfgs'; +options.MaxFunEvals = Inf; +options.MaxIter = 300; +% optimize +[opttheta_softmax, cost, exitflag] = minFunc( @(theta) softmax_regression_vec(theta, trainFeatures, trainLabels), randTheta2, options); + +%% ----------------------------------------------------- + + +%%====================================================================== +%% STEP 5: Testing +% Compute Predictions on tran and test sets using softmaxPredict +% and softmaxModel +%% ----------------- YOUR CODE HERE ---------------------- +W_softmax = reshape(opttheta_softmax, featureSize, numClasses); +train_score = trainFeatures'*W_softmax; +[~,train_pred] = max(train_score, [], 2); +score = testFeatures'*W_softmax; +[~,pred] = max(score, [], 2); +%% ----------------------------------------------------- + +% Classification Score +fprintf('Train Accuracy: %f%%\n', 100*mean(train_pred(:) == trainLabels(:))); +fprintf('Test Accuracy: %f%%\n', 100*mean(pred(:) == testLabels(:))); +% You should get 100% train accuracy and ~99% test accuracy. With random +% convolutional weights we get 97.5% test accuracy. Actual results may +% vary as a result of random initializations +