Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

OHE doc, Activation functions+doc #102

Open
wants to merge 30 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
30 commits
Select commit Hold shift + click to select a range
7053fa7
OHE doc, Activation functions+doc
NandiniGera Feb 11, 2023
56af84e
updated
NandiniGera Feb 12, 2023
5eebc29
updated
NandiniGera Feb 12, 2023
8184ff7
Formatted code using clang-format
NandiniGera Feb 12, 2023
6a0fd9d
resolved conflicts
NandiniGera Feb 12, 2023
4111d84
updated
NandiniGera Feb 12, 2023
5fb3790
Merge branch 'main' of https://github.com/NandiniGera/slowmokit
NandiniGera Feb 12, 2023
5015e46
Update src/slowmokit/methods/activation_functions.hpp
NandiniGera Feb 12, 2023
357ee7b
Update src/slowmokit/methods/activation_functions.hpp
NandiniGera Feb 12, 2023
c26aba7
Update src/slowmokit/methods/activation_functions.cpp
NandiniGera Feb 12, 2023
c7438e6
updated
NandiniGera Feb 12, 2023
9459616
Merge branch 'main' of https://github.com/NandiniGera/slowmokit
NandiniGera Feb 12, 2023
41a1f6e
Formatted code using clang-format
NandiniGera Feb 12, 2023
8f287f1
Formatted code using clang-format
NandiniGera Feb 12, 2023
110d2f6
updated all changes
NandiniGera Feb 12, 2023
b1a9bba
updated all changes
NandiniGera Feb 12, 2023
643a0d6
Merge branch 'main' of https://github.com/NandiniGera/slowmokit
NandiniGera Feb 12, 2023
7c81c58
Formatted code using clang-format
NandiniGera Feb 12, 2023
a5ae5f4
Merge branch 'main' into main
Ishwarendra Feb 14, 2023
3e8af8e
fixed bracket issue
Ishwarendra Feb 14, 2023
eaa4e3a
updated
NandiniGera Feb 15, 2023
a411fdb
Merge remote-tracking branch 'upstream/main'
NandiniGera Feb 15, 2023
2b09f57
Formatted code using clang-format
NandiniGera Feb 15, 2023
ef42bac
updated
NandiniGera Feb 15, 2023
b4d5efd
Merge branch 'main' of https://github.com/NandiniGera/slowmokit
NandiniGera Feb 15, 2023
3f00e98
Formatted code using clang-format
NandiniGera Feb 15, 2023
ee101ce
minor changes
uttammittal02 Feb 15, 2023
4618b53
updated
NandiniGera Feb 16, 2023
46d1d8c
Merge branch 'main' of https://github.com/NandiniGera/slowmokit
NandiniGera Feb 16, 2023
194be70
Merge remote-tracking branch 'upstream/main'
NandiniGera Feb 16, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
61 changes: 60 additions & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -25,4 +25,63 @@ add_subdirectory(src/slowmokit)

add_library(slowmokit
src/slowmokit.hpp
src/slowmokit.cpp)


# base files
src/slowmokit/base.hpp
src/slowmokit/core.hpp
src/slowmokit/prereqs.hpp

# ducks
src/slowmokit/ducks/ducks.hpp
src/slowmokit/ducks/io/io.hpp
src/slowmokit/ducks/io/io.cpp
src/slowmokit/ducks/matrix/matrix.hpp
src/slowmokit/ducks/matrix/matrix_main.cpp
src/slowmokit/ducks/matrix/matrix_free.cpp

# methods
src/slowmokit/models/model.hpp
src/slowmokit/methods/neighbors/knn/knn.cpp
src/slowmokit/methods/neighbors/knn/knn.hpp
src/slowmokit/methods/linear_model/linear_regression.hpp
src/slowmokit/methods/linear_model/linear_regression/linear_regression.hpp
src/slowmokit/methods/linear_model/linear_regression/linear_regression.cpp
src/slowmokit/methods/cluster/kMeans/kMeans.cpp
src/slowmokit/methods/cluster/kMeans/kMeans.hpp
src/slowmokit/methods/cluster/kMeans.hpp
src/slowmokit/methods/metrics/accuracy.hpp
src/slowmokit/methods/metrics/accuracy.cpp
src/slowmokit/methods/preprocessing/normalization.hpp
src/slowmokit/methods/preprocessing/normalization.cpp
src/slowmokit/methods/preprocessing/standardization.hpp
src/slowmokit/methods/preprocessing/standardization.cpp
src/slowmokit/methods/neighbors/bernoulli_nb.hpp
src/slowmokit/methods/neighbors/bernoulli_nb/bernoulli_nb.hpp
src/slowmokit/methods/neighbors/bernoulli_nb/bernoulli_nb.cpp
src/slowmokit/methods/linear_model/logistic_regression.hpp
src/slowmokit/methods/linear_model/logistic_regression/logistic_regression.hpp
src/slowmokit/methods/linear_model/logistic_regression/logistic_regression.cpp
src/slowmokit/methods/preprocessing/label_encoder.cpp
src/slowmokit/methods/preprocessing/label_encoder.hpp
src/slowmokit/methods/metrics/classification_report.hpp
src/slowmokit/methods/metrics/classification_report.cpp
src/slowmokit/methods/neighbors/gaussian_nb.hpp
src/slowmokit/methods/neighbors/gaussian_nb/gaussian_nb.cpp
src/slowmokit/methods/neighbors/gaussian_nb/gaussian_nb.hpp
src/slowmokit/methods/neighbors/knn.hpp
src/slowmokit/methods/neighbors/knn/knn.hpp
src/slowmokit/methods/neighbors/knn/knn.cpp
src/slowmokit/methods/preprocessing/one_hot_encoder.hpp
src/slowmokit/methods/preprocessing/one_hot_encoder.cpp
src/slowmokit/methods/metrics/precision.hpp
src/slowmokit/methods/metrics/precision.cpp
src/slowmokit/methods/metrics/recall.hpp
src/slowmokit/methods/metrics/recall.cpp
src/slowmokit/methods/metrics/f1score.hpp
src/slowmokit/methods/metrics/f1score.cpp
src/slowmokit/methods/metrics/mean_squared_error.hpp
src/slowmokit/methods/metrics/mean_squared_error.cpp
src/slowmokit/methods/metrics/silhouette_score.cpp
src/slowmokit/methods/metrics/silhouette_score.hpp)

117 changes: 117 additions & 0 deletions docs/methods/activation_functions.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,117 @@
# Activation Functions

Sigmoid-It is computationally expensive, causes vanishing gradient problem and not zero-centred. This method is generally used for binary classification problems.

tanh- The Tanh activation function is a hyperbolic tangent sigmoid function that has a range of -1 to 1. It is often used in deep learning models for its ability to model nonlinear boundaries

arctan-The ArcTan function is a sigmoid function to model accelerating and decelerating outputs but with useful output ranges.This activation function maps the input values in the range (−π/2,π/2). Its derivative converges quadratically against 0 for large input values.

ReLU-This The ReLU activation function returns 0 if the input value to the function is less than 0 but for any positive input, the output is the same as the input. It is also continuous but non-differentiable at 0 and at values less than 0 because its derivative is 0 for any negative input.

leakyReLU- With Leaky ReLU there is a small negative slope so instead of that firing at all, for large gradients, our neurons do output some value and that makes our layer much more optimized too.

softmax-The softmax is a more generalised form of the sigmoid. It is used in multi-class classification problems. Similar to sigmoid, it produces values in the range of 0–1 therefore it is used as the final layer in classification models.

binaryStep-The Step activation function is used in the perceptron network. This is usually used in single-layer networks to convert to an output that is binary (0 or 1).These are called Binary Step Function.


The following function definitions are defined for a single variable for making it comprehendible and are implemented for a vector of 'double' values in the original code.
## Parameters

| Name | Definition | Type |
|--------------|--------------------------------------------|--------------|
| x | double value on which the function is applied. | `double` |


## Functions

| Name | Definition | Return value |
|----------------------------------------|-----------------------------------------------|---------------|
|sigmoid(x)| 1 / (1 + e^(-x)) | `double` |
|tanh(x)| (e^x - e^(-x)) / (e^x + e^(-x)) | `double` |
|arctan(x)| the inverse of tan(x) | `double` |
|ReLU(x)| max(0, x) | `double` |
|leakyReLU(x)| max(αx, x),α=0.1 | `double` |
|binaryStep(x)| 0, if x < 0
| | 1, if x ≥ 0 | `double` |
|softmax(x)| e^(x_i) / Σ_j e^(x_j) | `double` |


## Example

```cpp
int main(){
//sigmoid example
std::vector<double> x = {-1,0,1};
sigmoid(x);
for (int i = 0; i < x.size(); i++) {
std::cout << x[i] << " ";
}
return 0;

//tanh example
std::vector<double> x = {-1,0,1};
tanh(x);
for (int i = 0; i < x.size(); i++) {
std::cout << x[i] << " ";
}
return 0;

//arctan example
std::vector<double> x = {-1,0,1};
arctan(x);
for (int i = 0; i < x.size(); i++) {
std::cout << x[i] << " ";
}
return 0;

//ReLU example
std::vector<double> x = {1, 2, 3};
ReLU(x);
for (int i = 0; i < x.size(); i++) {
std::cout << x[i] << " ";
}
return 0;

//leakyReLU example
std::vector<double> x = {1, 2, 3};
leakyReLU(x);
for (int i = 0; i < x.size(); i++) {
std::cout << x[i] << " ";
}
return 0;

//binaryStep example
std::vector<double> x = {1, 2, 3};
binaryStep(x);
for (int i = 0; i < x.size(); i++) {
std::cout << x[i] << " ";
}
return 0;

//softmax example
std::vector<double> x = {1, 2, 3};
softmax(x);
for (int i = 0; i < x.size(); i++) {
std::cout << x[i] << " ";
}
return 0;

//binaryToBipolar example
std::vector<double> x = {-1,1};
binaryToBipolar(x);
for (int i = 0; i < x.size(); i++) {
std::cout << x[i] << " ";
}
return 0;

//bipolarToBinary example
std::vector<double> x = {-1,1};
bipolarToBinary(x);
for (int i = 0; i < x.size(); i++) {
std::cout << x[i] << " ";
}
return 0;

}
```
36 changes: 36 additions & 0 deletions docs/methods/preprocessing/one_hot_encoder.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
# One Hot Encoder

One hot encoding is a technique to represent categorical variables as numerical values.Each unique value of a categorical variable is assigned a binary code, where a "1" in the code represents the presence of that value and a "0" represents its absence.

One hot encoding makes our training data more useful and expressive, and it can be rescaled easily.


## Parameters

| Name | Definition | Type |
|--------------|--------------------------------------------|--------------|
| data | The data that has to be encoded is passed as the data parameter in the oneHotEncoder function. | `vector<string>` |
| nClasses | This parameter is an integer that specifies the number of classes or categories in the input data. | `int` |

## Methods

| Name | Definition | Return value |
|----------------------------------------|-----------------------------------------------|---------------|
| `oneHotEncoder(vector<T> data, nClasses)` | To encode the data into numerical values. | `vector<vector<int>>` |

## Example

```cpp
int main() {
std::vector<std::string> data = {"apples", "banana", "mango", "pear", "mango","apples","pear"};
int nClasses = 4;
std::vector<std::vector<int>> oneHotEncodedData = oneHotEncoder(data, nClasses);
for (const auto &row : oneHotEncodedData) {
for (const auto &column : row) {
std::cout << column << " ";
}
std::cout << std::endl;
}
return 0;
}
```
74 changes: 74 additions & 0 deletions examples/activation_functions_eg.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
//int main(){
//sigmoid example
//std::vector<double> x = {-1,0,1};
// sigmoid(x);
// for (int i = 0; i < x.size(); i++) {
// std::cout << x[i] << " ";
// }
// return 0;

//tanh example
//std::vector<double> x = {-1,0,1};
// tanh(x);
// for (int i = 0; i < x.size(); i++) {
// std::cout << x[i] << " ";
// }
// return 0;

//arctan example
//std::vector<double> x = {-1,0,1};
// arctan(x);
// for (int i = 0; i < x.size(); i++) {
// std::cout << x[i] << " ";
// }
// return 0;

//ReLU example
//std::vector<double> x = {1, 2, 3};
// ReLU(x);
// for (int i = 0; i < x.size(); i++) {
// std::cout << x[i] << " ";
// }
// return 0;

//leakyReLU example
//std::vector<double> x = {1, 2, 3};
// leakyReLU(x);
// for (int i = 0; i < x.size(); i++) {
// std::cout << x[i] << " ";
// }
// return 0;

//binaryStep example
//std::vector<double> x = {1, 2, 3};
// binaryStep(x);
// for (int i = 0; i < x.size(); i++) {
// std::cout << x[i] << " ";
// }
// return 0;

//softmax example
//std::vector<double> x = {1, 2, 3};
// softmax(x);
// for (int i = 0; i < x.size(); i++) {
// std::cout << x[i] << " ";
// }
// return 0;

//binaryToBipolar example
//std::vector<double> x = {-1,1};
// binaryToBipolar(x);
// for (int i = 0; i < x.size(); i++) {
// std::cout << x[i] << " ";
// }
// return 0;

//bipolarToBinary example
//std::vector<double> x = {-1,1};
// bipolarToBinary(x);
// for (int i = 0; i < x.size(); i++) {
// std::cout << x[i] << " ";
// }
// return 0;

//}
Loading