diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml
index d581799..d1e848e 100644
--- a/.github/workflows/coverage.yml
+++ b/.github/workflows/coverage.yml
@@ -23,7 +23,7 @@ jobs:
uses: actions/cache@v2
with:
path: ~/.cache/pip
- key: pip-cache
+ key: pip-cache-datadriven
- name: Install additional dependencies
run: |
pip install coverage
diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml
index 2cc108b..ef54c36 100644
--- a/.github/workflows/python-package.yml
+++ b/.github/workflows/python-package.yml
@@ -113,9 +113,9 @@ jobs:
uses: actions/cache@v2
with:
path: ~/.cache/pip
- key: pip-cache
+ key: pip-cache-datadriven
- name: Update
- run: pip install --upgrade --upgrade-strategy eager -e .
+ run: pip install --upgrade --upgrade-strategy eager -e .[datadriven]
- name: Run tests
run: python -m tests.test_data_model
test_datasets:
@@ -421,32 +421,32 @@ jobs:
uses: actions/cache@v2
with:
path: ~/.cache/pip
- key: pip-cache
+ key: pip-cache-datadriven
- name: Update
- run: pip install --upgrade --upgrade-strategy eager -e .
+ run: pip install --upgrade --upgrade-strategy eager -e .[datadriven]
- name: Run tests
run: python -m tests.test_surrogates
- test_tutorials:
- timeout-minutes: 5
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v3
- - name: Set up Python
- uses: actions/setup-python@v4
- with:
- python-version: '3.7'
- - name: Install dependencies cache
- uses: actions/cache@v2
- with:
- path: ~/.cache/pip
- key: pip-cache
- - name: Update
- run: |
- pip install --upgrade --upgrade-strategy eager -e .
- pip install notebook
- pip install testbook
- - name: Run tests
- run: python -m tests.test_tutorials
+ # test_tutorials:
+ # timeout-minutes: 5
+ # runs-on: ubuntu-latest
+ # steps:
+ # - uses: actions/checkout@v3
+ # - name: Set up Python
+ # uses: actions/setup-python@v4
+ # with:
+ # python-version: '3.7'
+ # - name: Install dependencies cache
+ # uses: actions/cache@v2
+ # with:
+ # path: ~/.cache/pip
+ # key: pip-cache-datadriven
+ # - name: Update
+ # run: |
+ # pip install --upgrade --upgrade-strategy eager -e .
+ # pip install notebook
+ # pip install testbook
+ # - name: Run tests
+ # run: python -m tests.test_tutorials
test_uav_model:
timeout-minutes: 10
runs-on: ubuntu-latest
diff --git a/.github/workflows/update-cache.yml b/.github/workflows/update-cache.yml
index a1c8137..608ad5c 100644
--- a/.github/workflows/update-cache.yml
+++ b/.github/workflows/update-cache.yml
@@ -6,7 +6,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
- python-version: ['3.7']
+ python-version: ['3.10']
steps:
- uses: actions/checkout@v3
- name: Set up Python ${{ matrix.python-version }}
@@ -16,7 +16,7 @@ jobs:
- name: Install dependencies
run: |
python -m pip install --upgrade pip
- python -m pip install -e .
+ python -m pip install -e .[datadriven]
python -m pip install notebook
python -m pip install testbook
python -m pip install requests
@@ -24,3 +24,27 @@ jobs:
with:
path: ~/.cache/pip
key: pip-cache
+
+ cache-dependencies-data-driven:
+ timeout-minutes: 15
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ python-version: ['3.10']
+ steps:
+ - uses: actions/checkout@v3
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v4
+ with:
+ python-version: ${{ matrix.python-version }}
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ python -m pip install -e '.[datadriven]'
+ python -m pip install notebook
+ python -m pip install testbook
+ python -m pip install requests
+ - uses: actions/cache@v3
+ with:
+ path: ~/.cache/pip
+ key: pip-cache-datadriven
diff --git a/README.md b/README.md
index 95054ac..82dd86e 100644
--- a/README.md
+++ b/README.md
@@ -13,6 +13,12 @@ ProgPy combines NASAs prog_models and prog_algs packages into a single python pa
## Installation
`pip3 install progpy`
+or
+
+`pip3 install progpy[datadriven]`
+
+to include dependencies for data driven models
+
## [Documentation](https://nasa.github.io/progpy/)
See documentation [here](https://nasa.github.io/progpy/)
@@ -26,26 +32,24 @@ Here is the directory structure for the github repository
`prog_model_template.py` - Template for Prognostics Model
`state_estimator_template.py` - Template for State Estimators
`predictor_template.py` - Template for Predictor
-`tutorial.ipynb` - Tutorial (Juypter Notebook)
## Citing this repository
Use the following to cite this repository:
```
-@misc{2023_nasa_progpy,
+@misc{2024_nasa_progpy,
| author = {Christopher Teubert and Katelyn Jarvis Griffith and Matteo Corbetta and Chetan Kulkarni and Portia Banerjee and Matthew Daigle},
| title = {{ProgPy Python Prognostics Packages}},
- | month = Oct,
- | year = 2023,
- | version = {1.6},
+ | month = May,
+ | year = 2024,
+ | version = {1.7},
| url = {https://nasa.github.io/progpy}
- | doi = {10.5281/ZENODO.8097013}
| }
```
The corresponding reference should look like this:
-C. Teubert, K. Jarvis Griffith, M. Corbetta, C. Kulkarni, P. Banerjee, M. Daigle, ProgPy Python Prognostics Packages, v1.6, Oct 2023. URL https://github.com/nasa/progpy.
+C. Teubert, K. Jarvis Griffith, M. Corbetta, C. Kulkarni, P. Banerjee, M. Daigle, ProgPy Python Prognostics Packages, v1.7, May 2024. URL https://github.com/nasa/progpy.
## Contributing Organizations
ProgPy was created by a partnership of multiple organizations, working together to build a set of high-quality prognostic tools for the wider PHM Community. We would like to give a big thank you for the ProgPy community, especially the following contributing organizations:
diff --git a/docs/.buildinfo b/docs/.buildinfo
index c396bfd..0af31a2 100644
--- a/docs/.buildinfo
+++ b/docs/.buildinfo
@@ -1,4 +1,4 @@
# Sphinx build info version 1
# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
-config: e5348f79dd5ab46a199baa607858f4a9
+config: 7d941a484a4f3440c5e3ef5478d11930
tags: 645f666f9bcd5a90fca523b33c5a78b7
diff --git a/docs/.doctrees/api_ref/prog_server/load_ests.doctree b/docs/.doctrees/api_ref/prog_server/load_ests.doctree
index 438b721..6c8c330 100644
Binary files a/docs/.doctrees/api_ref/prog_server/load_ests.doctree and b/docs/.doctrees/api_ref/prog_server/load_ests.doctree differ
diff --git a/docs/.doctrees/api_ref/prog_server/prog_client.doctree b/docs/.doctrees/api_ref/prog_server/prog_client.doctree
index 70436ac..ba09ad6 100644
Binary files a/docs/.doctrees/api_ref/prog_server/prog_client.doctree and b/docs/.doctrees/api_ref/prog_server/prog_client.doctree differ
diff --git a/docs/.doctrees/api_ref/prog_server/prog_server.doctree b/docs/.doctrees/api_ref/prog_server/prog_server.doctree
index 74262de..80d44d8 100644
Binary files a/docs/.doctrees/api_ref/prog_server/prog_server.doctree and b/docs/.doctrees/api_ref/prog_server/prog_server.doctree differ
diff --git a/docs/.doctrees/api_ref/progpy/CompositeModel.doctree b/docs/.doctrees/api_ref/progpy/CompositeModel.doctree
index 2ad74f3..7d31ab6 100644
Binary files a/docs/.doctrees/api_ref/progpy/CompositeModel.doctree and b/docs/.doctrees/api_ref/progpy/CompositeModel.doctree differ
diff --git a/docs/.doctrees/api_ref/progpy/DataModel.doctree b/docs/.doctrees/api_ref/progpy/DataModel.doctree
index 5e3aa08..8303653 100644
Binary files a/docs/.doctrees/api_ref/progpy/DataModel.doctree and b/docs/.doctrees/api_ref/progpy/DataModel.doctree differ
diff --git a/docs/.doctrees/api_ref/progpy/EnsembleModel.doctree b/docs/.doctrees/api_ref/progpy/EnsembleModel.doctree
index 73156cc..4bcd1dd 100644
Binary files a/docs/.doctrees/api_ref/progpy/EnsembleModel.doctree and b/docs/.doctrees/api_ref/progpy/EnsembleModel.doctree differ
diff --git a/docs/.doctrees/api_ref/progpy/LinearModel.doctree b/docs/.doctrees/api_ref/progpy/LinearModel.doctree
index e8293aa..349c35c 100644
Binary files a/docs/.doctrees/api_ref/progpy/LinearModel.doctree and b/docs/.doctrees/api_ref/progpy/LinearModel.doctree differ
diff --git a/docs/.doctrees/api_ref/progpy/Loading.doctree b/docs/.doctrees/api_ref/progpy/Loading.doctree
index c2703a4..874a658 100644
Binary files a/docs/.doctrees/api_ref/progpy/Loading.doctree and b/docs/.doctrees/api_ref/progpy/Loading.doctree differ
diff --git a/docs/.doctrees/api_ref/progpy/Predictor.doctree b/docs/.doctrees/api_ref/progpy/Predictor.doctree
index d2598b9..8e2c292 100644
Binary files a/docs/.doctrees/api_ref/progpy/Predictor.doctree and b/docs/.doctrees/api_ref/progpy/Predictor.doctree differ
diff --git a/docs/.doctrees/api_ref/progpy/PrognosticModel.doctree b/docs/.doctrees/api_ref/progpy/PrognosticModel.doctree
index 3e21a67..e4eed8c 100644
Binary files a/docs/.doctrees/api_ref/progpy/PrognosticModel.doctree and b/docs/.doctrees/api_ref/progpy/PrognosticModel.doctree differ
diff --git a/docs/.doctrees/api_ref/progpy/StateEstimator.doctree b/docs/.doctrees/api_ref/progpy/StateEstimator.doctree
index 056b063..9f7d198 100644
Binary files a/docs/.doctrees/api_ref/progpy/StateEstimator.doctree and b/docs/.doctrees/api_ref/progpy/StateEstimator.doctree differ
diff --git a/docs/.doctrees/api_ref/progpy/Utils.doctree b/docs/.doctrees/api_ref/progpy/Utils.doctree
index f4a8bb9..a0b2592 100644
Binary files a/docs/.doctrees/api_ref/progpy/Utils.doctree and b/docs/.doctrees/api_ref/progpy/Utils.doctree differ
diff --git a/docs/.doctrees/environment.pickle b/docs/.doctrees/environment.pickle
index 46ce0b8..f108617 100644
Binary files a/docs/.doctrees/environment.pickle and b/docs/.doctrees/environment.pickle differ
diff --git a/docs/.doctrees/guide.doctree b/docs/.doctrees/guide.doctree
index 49e18c7..07a6e43 100644
Binary files a/docs/.doctrees/guide.doctree and b/docs/.doctrees/guide.doctree differ
diff --git a/docs/.doctrees/index.doctree b/docs/.doctrees/index.doctree
index b7bc83a..6747602 100644
Binary files a/docs/.doctrees/index.doctree and b/docs/.doctrees/index.doctree differ
diff --git a/docs/.doctrees/installing.doctree b/docs/.doctrees/installing.doctree
new file mode 100644
index 0000000..2c52ea3
Binary files /dev/null and b/docs/.doctrees/installing.doctree differ
diff --git a/docs/.doctrees/prog_algs_guide.doctree b/docs/.doctrees/prog_algs_guide.doctree
index a7d379f..003d816 100644
Binary files a/docs/.doctrees/prog_algs_guide.doctree and b/docs/.doctrees/prog_algs_guide.doctree differ
diff --git a/docs/.doctrees/prog_models_guide.doctree b/docs/.doctrees/prog_models_guide.doctree
index 55b98e1..398857f 100644
Binary files a/docs/.doctrees/prog_models_guide.doctree and b/docs/.doctrees/prog_models_guide.doctree differ
diff --git a/docs/.doctrees/releases.doctree b/docs/.doctrees/releases.doctree
index 2657d17..245bd6c 100644
Binary files a/docs/.doctrees/releases.doctree and b/docs/.doctrees/releases.doctree differ
diff --git a/docs/_downloads/0e5fd1a6bf0ee8b3ff6785357ecebade/online_prog.py b/docs/_downloads/0e5fd1a6bf0ee8b3ff6785357ecebade/online_prog.py
index 0428a38..c4081ac 100644
--- a/docs/_downloads/0e5fd1a6bf0ee8b3ff6785357ecebade/online_prog.py
+++ b/docs/_downloads/0e5fd1a6bf0ee8b3ff6785357ecebade/online_prog.py
@@ -13,10 +13,10 @@
from time import sleep
def run_example():
- # Step 1: Open a session with the server for a thrown object.
+ # Step 1: Open a session with the server for a thrown object.
# Use all default configuration options.
# Except for the save frequency, which we'll set to 1 second.
- session = prog_client.Session('ThrownObject', pred_cfg = {'save_freq': 1})
+ session = prog_client.Session('ThrownObject', pred_cfg={'save_freq': 1})
print(session) # Printing the Session Information
# Step 2: Prepare data to send to server
@@ -58,7 +58,7 @@ def run_example():
for i in range(len(example_data)):
# Send data to server
print(f'{example_data[i][0]}s: Sending data to server... ', end='')
- session.send_data(time = example_data[i][0], **example_data[i][1])
+ session.send_data(time=example_data[i][0], **example_data[i][1])
# Check for a prediction result
status = session.get_prediction_status()
diff --git a/docs/_downloads/412560afd93e3e54078df8279ff54887/04_New Models.ipynb b/docs/_downloads/412560afd93e3e54078df8279ff54887/04_New Models.ipynb
new file mode 100644
index 0000000..0918929
--- /dev/null
+++ b/docs/_downloads/412560afd93e3e54078df8279ff54887/04_New Models.ipynb
@@ -0,0 +1,1661 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# 4. Defining new Physics-Based Prognostic Models"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "All of the past sections describe how to use an existing model. In this section we will describe how to create a new model. This section specifically describes creating a new physics-based model. For training and creating data-driven models see 5. Data-driven Models."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Linear Models"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The easiest model to build is a linear model. Linear models are defined as a linear time series, which can be defined by the following equations:\n",
+ "\n",
+ "\n",
+ "\n",
+ "**The State Equation**:\n",
+ "$$\n",
+ "\\frac{dx}{dt} = Ax + Bu + E\n",
+ "$$\n",
+ "\n",
+ "**The Output Equation**:\n",
+ "$$\n",
+ "z = Cx + D\n",
+ "$$\n",
+ "\n",
+ "**The Event State Equation**:\n",
+ "$$\n",
+ "es = Fx + G\n",
+ "$$\n",
+ "\n",
+ "$x$ is `state`, $u$ is `input`, $z$ is `output`, and $es$ is `event state`\n",
+ "\n",
+ "Linear Models are defined by creating a new model class that inherits from progpy's LinearModel class and defines the following properties:\n",
+ "* $A$: 2-D np.array[float], dimensions: n_states x n_states. The state transition matrix. It dictates how the current state affects the change in state dx/dt.\n",
+ "* $B$: 2-D np.array[float], optional (zeros by default), dimensions: n_states x n_inputs. The input matrix. It dictates how the input affects the change in state dx/dt.\n",
+ "* $C$: 2-D np.array[float], dimensions: n_outputs x n_states. The output matrix. It determines how the state variables contribute to the output.\n",
+ "* $D$: 1-D np.array[float], optional (zeros by default), dimensions: n_outputs x 1. A constant term that can represent any biases or offsets in the output.\n",
+ "* $E$: 1-D np.array[float], optional (zeros by default), dimensions: n_states x 1. A constant term, representing any external effects that are not captured by the state and input.\n",
+ "* $F$: 2-D np.array[float], dimensions: n_es x n_states. The event state matrix, dictating how state variables contribute to the event state.\n",
+ "* $G$: 1-D np.array[float], optional (zeros by default), dimensions: n_es x 1. A constant term that can represent any biases or offsets in the event state.\n",
+ "* __inputs__: list[str] - `input` keys\n",
+ "* __states__: list[str] - `state` keys\n",
+ "* __outputs__: list[str] - `output` keys\n",
+ "* __events__: list[str] - `event` keys"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We will now utilize our LinearModel to model the classical physics problem throwing an object into the air. This is a common example model, the non-linear version of which (`progpy.examples.ThrownObject`) has been used frequently throughout the examples. This version of ThrownObject will behave nearly identically to the non-linear ThrownObject, except it will not have the non-linear effects of air resistance.\n",
+ "\n",
+ "We can create a subclass of LinearModel which will be used to simulate an object thrown, which we will call the ThrownObject Class.\n",
+ "\n",
+ "First, some definitions for our Model:\n",
+ "\n",
+ "**Events**: (2)\n",
+ "* `falling: The object is falling`\n",
+ "* `impact: The object has hit the ground`\n",
+ "\n",
+ "**Inputs/Loading**: (0)\n",
+ "* `None`\n",
+ "\n",
+ "**States**: (2)\n",
+ "* `x: Position in space (m)`\n",
+ "* `v: Velocity in space (m/s)`\n",
+ "\n",
+ "**Outputs/Measurements**: (1)\n",
+ "* `x: Position in space (m)`"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Now, for our keyword arguments:\n",
+ "\n",
+ "* __thrower_height : Optional, float__\n",
+ " * Height of the thrower (m). Default is 1.83 m\n",
+ "* __throwing_speed : Optional, float__\n",
+ " * Speed at which the ball is thrown (m/s). Default is 40 m/s"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "With our definitions, we can now create the ThrownObject Model.\n",
+ "\n",
+ "First, we need to import the necessary packages."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import numpy as np\n",
+ "from progpy import LinearModel"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Now we'll define some features of a ThrownObject LinearModel. Recall that all LinearModels follow a set of core equations and require some specific properties (see above). In the next step, we'll define our inputs, states, outputs, and events, along with the $A$, $C$, $E$, and $F$ values.\n",
+ "\n",
+ "First, let's consider state transition. For an object thrown into the air without air resistance, velocity would decrease linearly by __-9.81__ \n",
+ "$\\dfrac{m}{s^2}$ due to the effect of gravity, as described below:\n",
+ "\n",
+ " $$\\frac{dv}{dt} = -9.81$$\n",
+ "\n",
+ " Position change is defined by velocity (v), as described below:\n",
+ " \n",
+ " $$\\frac{dx}{dt} = v$$\n",
+ "\n",
+ " Note: For the above equation x is position not state. Combining these equations with the model $\\frac{dx}{dt}$ equation defined above yields the A and E matrix defined below. Note that there is no B defined because this model does not have any inputs."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "class ThrownObject(LinearModel):\n",
+ " events = ['impact']\n",
+ " inputs = [] \n",
+ " states = ['x', 'v']\n",
+ " outputs = ['x']\n",
+ " \n",
+ " A = np.array([[0, 1], [0, 0]])\n",
+ " C = np.array([[1, 0]])\n",
+ " E = np.array([[0], [-9.81]])\n",
+ " F = None"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Note that we defined our `A`, `C`, `E`, and `F` values to fit the dimensions that were stated at the beginning of the notebook! Since the parameter `F` is not optional, we have to explicitly set the value as __None__.\n",
+ "\n",
+ "Next, we'll define some default parameters for our ThrownObject model."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "class ThrownObject(ThrownObject): # Continue the ThrownObject class\n",
+ " default_parameters = {\n",
+ " 'thrower_height': 1.83,\n",
+ " 'throwing_speed': 40,\n",
+ " }"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "In the following cells, we'll define some class functions necessary to perform prognostics on the model.\n",
+ "\n",
+ "The `initialize()` function sets the initial system state. Since we have defined the `x` and `v` values for our ThrownObject model to represent position and velocity in space, our initial values would be the thrower_height and throwing_speed parameters, respectively."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "class ThrownObject(ThrownObject):\n",
+ " def initialize(self, u=None, z=None):\n",
+ " return self.StateContainer({\n",
+ " 'x': self['thrower_height'],\n",
+ " 'v': self['throwing_speed']\n",
+ " })"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "For our `threshold_met()`, we define the function to return True for event 'falling' when our thrown object model has a velocity value of less than 0 (object is 'falling') and for event 'impact' when our thrown object has a distance from of the ground of less than or equal to 0 (object is on the ground, or has made 'impact').\n",
+ "\n",
+ "`threshold_met()` returns a _dict_ of values, if each entry of the _dict_ is __True__, then our threshold has been met!"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "class ThrownObject(ThrownObject):\n",
+ " def threshold_met(self, x):\n",
+ " return {\n",
+ " 'falling': x['v'] < 0,\n",
+ " 'impact': x['x'] <= 0\n",
+ " }"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Finally, for our `event_state()`, we will calculate the measurement of progress towards the events. We normalize our values such that they are in the range of 0 to 1, where 0 means the event has occurred."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "class ThrownObject(ThrownObject):\n",
+ " def event_state(self, x): \n",
+ " x_max = x['x'] + np.square(x['v'])/(9.81*2)\n",
+ " return {\n",
+ " 'falling': np.maximum(x['v']/self['throwing_speed'],0),\n",
+ " 'impact': np.maximum(x['x']/x_max,0) if x['v'] < 0 else 1\n",
+ " }"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "With these functions created, we can now use the `simulate_to_threshold()` function to simulate the movement of the thrown object in air. For more information, see 1. Simulation."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "m = ThrownObject()\n",
+ "save = m.simulate_to_threshold(print=True, save_freq=1, events='impact', dt=0.1)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "__Note__: Because our model takes in no inputs, we have no need to define a future loading function. However, for most models, there would be inputs, and thus a need for a future loading function. For more information on future loading functions and when to use them, please refer to the future loading section in 1. Simulation.\n",
+ "\n",
+ "Let's take a look at the outputs of this model"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fig = save.outputs.plot(title='generated model')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Notice that that plot resembles a parabola, which represents the position of the ball through space as time progresses!"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "For more information on Linear Models, see the [Linear Model](https://nasa.github.io/progpy/api_ref/prog_models/LinearModel.html) Documentation."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## New State Transition Models"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "In the previous section, we defined a new prognostic model using the LinearModel class. This can be a powerful tool for defining models that can be described as a linear time series. Physics-based state transition models that cannot be described linearly are constructed by subclassing [progpy.PrognosticsModel](https://nasa.github.io/progpy/api_ref/prog_models/PrognosticModel.html#prog_models.PrognosticsModel). To demonstrate this, we'll create a new model class that inherits from this class. Once constructed in this way, the analysis and simulation tools for PrognosticsModels will work on the new model.\n",
+ "\n",
+ "For this example, we'll create a simple state-transition model of an object thrown upward into the air without air resistance. Note that this is the same dynamic system as the linear model example above, but formulated in a different way. "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "First, we'll import the necessary packages to create a general prognostics model."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import numpy as np\n",
+ "from progpy import PrognosticsModel"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Next, we'll define our model class. PrognosticsModels require defining [inputs](https://nasa.github.io/progpy/glossary.html#term-input), [states](https://nasa.github.io/progpy/glossary.html#term-state), [outputs](https://nasa.github.io/progpy/glossary.html#term-output), and [event](https://nasa.github.io/progpy/glossary.html#term-event) keys. As in the above example, the states include position (`x`) and velocity(`v`) of the object, the output is position (`x`), and the events are `falling` and `impact`. \n",
+ "\n",
+ "Note that we define this class as `ThrownObject_ST` to distinguish it as a state-transition model compared to the previous linear model class. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "class ThrownObject_ST(PrognosticsModel):\n",
+ " \"\"\"\n",
+ " Model that simulates an object thrown into the air without air resistance\n",
+ " \"\"\"\n",
+ "\n",
+ " inputs = [] # no inputs, no way to control\n",
+ " states = [\n",
+ " 'x', # Position (m) \n",
+ " 'v' # Velocity (m/s)\n",
+ " ]\n",
+ " outputs = [ # Anything we can measure\n",
+ " 'x' # Position (m)\n",
+ " ]\n",
+ " events = [\n",
+ " 'falling', # Event- object is falling\n",
+ " 'impact' # Event- object has impacted ground\n",
+ " ]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Next, we'll add some default parameter definitions. These values can be overwritten by passing parameters into the constructor. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "class ThrownObject_ST(ThrownObject_ST):\n",
+ "\n",
+ " default_parameters = {\n",
+ " 'thrower_height': 1.83, # default height \n",
+ " 'throwing_speed': 40, # default speed\n",
+ " 'g': -9.81, # Acceleration due to gravity (m/s^2)\n",
+ " }"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "All prognostics models require some specific class functions. We'll define those next. "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "First, we'll need to add the functionality to set the initial state of the system. There are two ways to provide the logic to initialize model state. \n",
+ "\n",
+ "1. Provide the initial state in `parameters['x0']`, or \n",
+ "2. Provide an `initialize` function \n",
+ "\n",
+ "The first method here is preferred. If `parameters['x0']` are defined, there is no need to explicitly define an initialize method, and these parameter values will be used as the initial state. \n",
+ "\n",
+ "However, there are some cases where the initial state is a function of the input (`u`) or output (`z`) (e.g. a use-case where the input is also a state). In this case, an explicitly defined `initialize` method is required. \n",
+ "\n",
+ "Here, we'll set our initial state by defining an `initialize` function. In the code below, note that the function can take arguments for both input `u` and output `z`, though these are optional. \n",
+ "\n",
+ "Note that for this example, defining initialize in this way is not necessary. We could have simply defined `parameters['x0']`. However, we choose to use this method for ease when using the `derived_params` feature, discussed in the next section. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "class ThrownObject_ST(ThrownObject_ST):\n",
+ "\n",
+ " def initialize(self, u=None, z=None):\n",
+ " return self.StateContainer({\n",
+ " 'x': self['thrower_height'], # Thrown, so initial altitude is height of thrower\n",
+ " 'v': self['throwing_speed'] # Velocity at which the ball is thrown - this guy is a professional baseball pitcher\n",
+ " })"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Next, the PrognosticsModel class requires that we define how the state transitions throughout time. For continuous models, this is defined with the method `dx`, which calculates the first derivative of the state at a specific time. For discrete systems, this is defined with the method `next_state`, using the state transition equation for the system. When possible, it is recommended to use the continuous (`dx`) form, as some algorithms will only work on continuous models.\n",
+ "\n",
+ "Here, we use the equations for the derivatives of our system (i.e., the continuous form)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "class ThrownObject_ST(ThrownObject_ST):\n",
+ "\n",
+ " def dx(self, x, u):\n",
+ " return self.StateContainer({\n",
+ " 'x': x['v'], \n",
+ " 'v': self['g']}) # Acceleration of gravity"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Next, we'll define the `output` method, which will calculate the output (i.e., measurable values) given the current state. Here, our output is position (`x`). "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "class ThrownObject_ST(ThrownObject_ST):\n",
+ " \n",
+ " def output(self, x):\n",
+ " return self.OutputContainer({'x': x['x']})"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The next method we define is [`event_state`](https://nasa.github.io/progpy/glossary.html#term-event-state). As before, \n",
+ "`event_state` calculates the progress towards the events. Normalized to be between 0 and 1, 1 means there is no progress towards the event and 0 means the event has occurred. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "class ThrownObject_ST(ThrownObject_ST):\n",
+ " \n",
+ " def event_state(self, x): \n",
+ " # Use speed and position to estimate maximum height\n",
+ " x_max = x['x'] + np.square(x['v'])/(-self['g']*2)\n",
+ " # 1 until falling begins\n",
+ " x_max = np.where(x['v'] > 0, x['x'], x_max)\n",
+ " return {\n",
+ " 'falling': max(x['v']/self['throwing_speed'],0), # Throwing speed is max speed\n",
+ " 'impact': max(x['x']/x_max,0) # 1 until falling begins, then it's fraction of height\n",
+ " }"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "At this point, we have defined all necessary information for the PrognosticsModel to be complete. There are other methods that can additionally be defined (see the [PrognosticsModel](https://nasa.github.io/progpy/api_ref/prog_models/PrognosticModel.html) documentation for more information) to provide further configuration for new models. We'll highlight some of these in the following sections. "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "As an example of one of these, we additionally define a `threshold_met` equation. Note that this is optional. Leaving `threshold_met` empty will use the event state to define thresholds (threshold = event state == 0). However, this implementation is more efficient, so we include it. \n",
+ "\n",
+ "Here, we define `threshold_met` in the same way as our linear model example. `threshold_met` will return a _dict_ of values, one for each event. Threshold is met when all dictionary entries are __True__. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "class ThrownObject_ST(ThrownObject_ST):\n",
+ "\n",
+ " def threshold_met(self, x):\n",
+ " return {\n",
+ " 'falling': x['v'] < 0, # Falling occurs when velocity becomes negative\n",
+ " 'impact': x['x'] <= 0 # Impact occurs when the object hits the ground, i.e. position is <= 0\n",
+ " }"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "With that, we have created a new ThrownObject state-transition model. "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Now let's can test our model through simulation. First, we'll create an instance of the model."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "m_st = ThrownObject_ST()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We'll start by simulating to impact. We'll specify the `events` to specifically indicate we are interested in impact. For more information on simulation, see 1. Simulation. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Simulate to impact\n",
+ "event = 'impact'\n",
+ "simulated_results = m_st.simulate_to_threshold(events=event, dt=0.005, save_freq=1, print = True)\n",
+ "\n",
+ "# Print result: \n",
+ "print('The object hit the ground in {} seconds'.format(round(simulated_results.times[-1],2)))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "To summarize this section, we have illustrated how to construct new physics-based models by subclassing from [progpy.PrognosticsModel](https://nasa.github.io/progpy/api_ref/prog_models/PrognosticModel.html#prog_models.PrognosticsModel). Some elements (e.g. inputs, states, outputs, events keys; methods for initialization, dx/next_state, output, and event_state) are required. Models can be additionally configured with additional methods and parameters.\n",
+ "\n",
+ "Note that in this example, we defined each part one piece at a time, recursively subclassing the partially defined class. This was done to illustrate the parts of the model. In reality, all methods and properties would be defined together in a single class definition. "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Derived Parameters"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "In the previous section, we constructed a new model from scratch by subclassing from [progpy.PrognosticsModel](https://nasa.github.io/progpy/api_ref/prog_models/PrognosticModel.html#prog_models.PrognosticsModel) and specifying all of the necessary model components. An additional optional feature of PrognosticsModels is derived parameters, illustrated below. \n",
+ "\n",
+ "A derived parameter is a parameter (see parameter section in 1. Simulation) that is a function of another parameter. For example, in the case of a thrown object, one could assume that throwing speed is a function of thrower height, with taller throwing height resulting in faster throwing speeds. In the electrochemistry battery model (see 3. Included Models), there are parameters for the maximum and minimum charge at the surface and bulk, and these are dependent on the capacity of the battery (i.e. another parameter, qMax). When such derived parameters exist, they must be updated whenever the parameters they depend on are updated. In PrognosticsModels, this is achieved with the `derived_params` feature. \n",
+ "\n",
+ "This feature can also be used to cache combinations of parameters that are used frequently in state transition or other model methods. Creating lumped parameters using `derived_params` causes them to be calculated once when configuring, instead of each time step in simulation or prediction. "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "For this example, we will use the `ThrownObject_ST` model created in the previous section. We will extend this model to include a derived parameter, namely `throwing_speed` will be dependent on `thrower_height`.\n",
+ "\n",
+ "To implement this, we must first define a function for the relationship between the two parameters. We'll assume that `throwing_speed` is a linear function of `thrower_height`. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def update_thrown_speed(params):\n",
+ " return {\n",
+ " 'throwing_speed': params['thrower_height'] * 21.85\n",
+ " } \n",
+ " # Note: one or more parameters can be changed in these functions, whatever parameters are changed are returned in the dictionary"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Next, we'll define the parameter callbacks, so that `throwing_speed` is updated appropriately any time that `thrower_height` changes. The following effectively tells the derived callbacks feature to call the `update_thrown_speed` function whenever the `thrower_height` changes. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "class ThrownObject_ST(ThrownObject_ST):\n",
+ "\n",
+ " param_callbacks = {\n",
+ " 'thrower_height': [update_thrown_speed]\n",
+ " }"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "You can also have more than one function be called when a single parameter is changed. You would do this by adding the additional callbacks to the list (e.g., 'thrower_height': [update_thrown_speed, other_fcn])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We have now added the capability for `throwing_speed` to be a derived parameter. Let's try it out. First, we'll create an instance of our class and print out the default parameters. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "obj = ThrownObject_ST()\n",
+ "print(\"Default Settings:\\n\\tthrower_height: {}\\n\\tthrowing_speed: {}\".format(obj['thrower_height'], obj['throwing_speed']))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Now, let's change the thrower height. If our derived parameters work correctly, the thrower speed should change accordingly. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "obj['thrower_height'] = 1.75 # Our thrower is 1.75 m tall\n",
+ "print(\"\\nUpdated Settings:\\n\\tthrower_height: {}\\n\\tthowing_speed: {}\".format(obj['thrower_height'], obj['throwing_speed']))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "As we can see, when the thrower height was changed, the throwing speed was re-calculated too. \n",
+ "\n",
+ "In this example, we illustrated how to use the `derived_params` feature, which allows a parameter to be a function of another parameter. "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Direct Models"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "In the previous sections, we illustrated how to create and use state-transition models, or models that use state transition differential equations to propagate the state forward. In this example, we'll explore another type of model implemented within ProgPy - Direct Models. "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Direct models estimate the time of event directly from the system state and future load, rather than through state transitions. This approach is particularly useful for physics-based models where the differential equations of state transitions can be explicitly solved, or for data-driven models that map sensor data directly to the time of an event. When applicable, using a direct model approach provides a more efficient way to estimate the time of an event, especially for events that occur later in the simulation. "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "To illustrate this concept, we will extend the state-transition model, `ThrownObject_ST`, defined above, to create a new model class, `DirectThrownObject`. The dynamics of a thrown object lend easily to a direct model, since we can solve the differential equations explicitly to estimate the time at which the events occur. \n",
+ "\n",
+ "Recall that our physical system is described by the following differential equations: \n",
+ "\\begin{align*}\n",
+ "\\frac{dx}{dt} &= v \\\\ \\\\\n",
+ "\\frac{dv}{dt} &= -g \n",
+ "\\end{align*}\n",
+ "\n",
+ "which can be solved explicity, given initial position $x_0$ and initial velocity $v_0$, to get:\n",
+ "\\begin{align*}\n",
+ "x(t) &= -\\frac{1}{2} gt^2 + v_0 t + x_0 \\\\ \\\\ \n",
+ "v(t) &= -gt + v_0\n",
+ "\\end{align*}\n",
+ "\n",
+ "Setting these equations to 0 and solving for time, we get the time at which the object hits the ground and begins falling, respectively. "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "To construct our direct model, we'll extend the `ThrownObject_ST` model to additionally include the method [time_to_event](https://nasa.github.io/progpy/api_ref/prog_models/PrognosticModel.html#prog_models.PrognosticsModel.time_of_event). This method will calculate the time at which each event occurs (i.e., time when the event threshold is met), based on the equations above. `time_of_event` must be implemented by any direct model. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "class DirectThrownObject(ThrownObject_ST):\n",
+ " \n",
+ " def time_of_event(self, x, *args, **kwargs):\n",
+ " # calculate time when object hits ground given x['x'] and x['v']\n",
+ " # 0 = x0 + v0*t - 0.5*g*t^2\n",
+ " g = self['g']\n",
+ " t_impact = -(x['v'] + np.sqrt(x['v']*x['v'] - 2*g*x['x']))/g\n",
+ "\n",
+ " # 0 = v0 - g*t\n",
+ " t_falling = -x['v']/g\n",
+ " \n",
+ " return {'falling': t_falling, 'impact': t_impact}\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "With this, our direct model is created. Note that adding `*args` and `**kwargs` is optional. Having these arguments makes the function interchangeable with other models which may have arguments or keyword arguments. "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Now let's test out this capability. To do so, we'll use the `time` package to compare the direct model to our original timeseries model. \n",
+ "\n",
+ "Let's start by creating an instance of our timeseries model, calculating the time of event, and timing this computation. Note that for a state transition model, `time_of_event` still returns the time at which `threshold_met` returns true for each event, but this is calculated by simulating to threshold."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import time \n",
+ "\n",
+ "m_timeseries = ThrownObject_ST()\n",
+ "x = m_timeseries.initialize()\n",
+ "print(m_timeseries.__class__.__name__, \"(Direct Model)\" if m_timeseries.is_direct else \"(Timeseries Model)\")\n",
+ "tic = time.perf_counter()\n",
+ "print('Time of event: ', m_timeseries.time_of_event(x, dt = 0.05))\n",
+ "toc = time.perf_counter()\n",
+ "print(f'execution: {(toc-tic)*1000:0.4f} milliseconds')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Now let's do the same using our direct model implementation. In this case, when `time_to_event` is called, the event time will be estimated directly from the state, instead of through simulation to threshold. \n",
+ "\n",
+ "Note that a limitation of a direct model is that you cannot get intermediate states (i.e., save_pts or save_freq) since the time of event is calculated directly. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "m_direct = DirectThrownObject()\n",
+ "x = m_direct.initialize() # Using Initial state\n",
+ "# Now instead of simulating to threshold, we can estimate it directly from the state, like so\n",
+ "print('\\n', m_direct.__class__.__name__, \"(Direct Model)\" if m_direct.is_direct else \"(Timeseries Model)\")\n",
+ "tic = time.perf_counter()\n",
+ "print('Time of event: ', m_direct.time_of_event(x))\n",
+ "toc = time.perf_counter()\n",
+ "print(f'execution: {(toc-tic)*1000:0.4f} milliseconds')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Notice that execution is significantly faster for the direct model. Furthermore, the result is actually more accurate, since it's not limited by the timestep (see dt section in 1. Simulation). These observations will be even more pronounced for events that occur later in the simulation. \n",
+ "\n",
+ "It's important to note that this is a very simple example, as there are no inputs. For models with inputs, future loading must be provided to `time_of_event` (see the Future Loading section in 1. Simulation). In these cases, most direct models will encode or discretize the future loading profile to use it in a direct estimation of time of event."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "In the example provided, we have illustrated how to use a direct model. Direct models are a powerful tool for estimating the time of an event directly from the system state. By avoiding the process of state transitions, direct models can provide more efficient event time estimates. Additionally, the direct model approach is not limited to physics-based models. It can also be applied to data-driven models that can map sensor data directly to the time of an event. \n",
+ "\n",
+ "In conclusion, direct models offer an efficient and versatile approach for prognostics modeling, enabling faster and more direct estimations of event times. "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Matrix Data Access Feature"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "In the above models, we have used dictionaries to represent the states. For example, in the implementation of `ThrownObject_ST` above, see how `dx` is defined with a StateContainer dictionary. While all models can be constructed using dictionaries in this way, some dynamical systems allow for the state of the system to be represented with a matrix. For such use-cases, ProgPy has an advanced *matrix data access feature* that provides a more efficient way to define these models.\n",
+ "\n",
+ "In ProgPy's implementation, the provided model.StateContainer, InputContainer, and OutputContainers can be treated as dictionaries but use an underlying matrix. This is important for some applications like surrogate and machine-learned models where the state is represented by a tensor. ProgPy's *matrix data access feature* allows the matrices to be used directly. Simulation functions propagate the state using the matrix form, preventing the inefficiency of having to convert to and from dictionaries. Additionally, this implementation is faster than recreating the StateContainer each time, especially when updating inplace."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "In this example, we'll illustrate how to use the matrix data access feature. We'll continue with our ThrownObject system, and create a model to simulate this using matrix notation (instead of dictionary notation as in the standard model, seen above in `ThrownObject_ST`). The implementation of the model is comparable to a standard model, except that it uses matrix operations within each function, as seen below. "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "First, the necessary imports."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import numpy as np\n",
+ "from progpy import PrognosticsModel"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "To use the matrix data access feature, we'll subclass from our state-transition model defined above, `ThrownObject_ST`. Our new model will therefore inherit the default parameters and methods for initialization, output, threshold met, and event state. \n",
+ "\n",
+ "To use the matrix data access feature, we'll use matrices to define how the state transitions. Since we are working with a discrete version of the system now, we'll define the `next_state` method, and this will override the `dx` method in the parent class. \n",
+ "\n",
+ "In the following, we will use the matrix version for each variable, accessed with `.matrix`. We implement this within `next_state`, but this feature can also be used in other functions. Here, both `x.matrix` and `u.matrix` are column vectors, and `u.matrix` is in the same order as model.inputs."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "class ThrownObject_MM(ThrownObject_ST):\n",
+ "\n",
+ " def next_state(self, x, u, dt):\n",
+ "\n",
+ " A = np.array([[0, 1], [0, 0]]) # State transition matrix\n",
+ " B = np.array([[0], [self['g']]]) # Acceleration due to gravity\n",
+ " x.matrix += (np.matmul(A, x.matrix) + B) * dt\n",
+ "\n",
+ " return x"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Our model is now specified. Let's try simulating with it."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "First, we'll create an instance of the model."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "m_matrix = ThrownObject_MM()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Now, let's simulate to threshold. We'll also time the simulation so we can compare with the non-matrix state-transition model below. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import time \n",
+ "\n",
+ "tic_matrix = time.perf_counter()\n",
+ "# Simulate to threshold \n",
+ "m_matrix.simulate_to_threshold(\n",
+ " print = True, \n",
+ " events = 'impact', \n",
+ " dt = 0.1, \n",
+ " save_freq = 1)\n",
+ "toc_matrix = time.perf_counter()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Our matrix notation was successful in simulating the thrown object's behavior throughout time. \n",
+ "\n",
+ "Finally, let's simulate the non-matrix version to compare computation time. \n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "tic_st = time.perf_counter()\n",
+ "m_st.simulate_to_threshold(\n",
+ " print = True, \n",
+ " events = 'impact', \n",
+ " dt = 0.1, \n",
+ " save_freq = 1)\n",
+ "toc_st = time.perf_counter()\n",
+ "\n",
+ "print(f'Matrix execution: {(toc_matrix-tic_matrix)*1000:0.4f} milliseconds')\n",
+ "print(f'Non-matrix execution: {(toc_st-tic_st)*1000:0.4f} milliseconds')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "As we can see, for this system, using the matrix data access feature is computationally faster than a standard state-transition matrix that uses dictionaries. "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "As illustrated here, the matrix data access feature is an advanced capability that represents the state of a system using matrices. This can provide efficiency for use-cases where the state is easily represented by a tensor and operations are defined by matrices."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## State Limits"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "In real-world physical systems, there are often constraints on what values the states can take. For example, in the case of a thrown object, if we define our reference frame with the ground at a position of $x=0$, then the position of the object should only be greater than or equal to 0, and should never take on negative values. In ProgPy, we can enforce constraints on the range of each state for a state-transition model using the [state limits](https://nasa.github.io/progpy/prog_models_guide.html#state-limits) attribute. "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "To illustrate the use of `state_limits`, we'll use our thrown object model `ThrownObject_ST`, created in an above section. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "m_limits = ThrownObject_ST()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Before adding state limits, let's take a look at the standard model without state limits. We'll consider the event of `impact`, and simulate the object to threshold."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "event = 'impact'\n",
+ "simulated_results = m_limits.simulate_to_threshold(events=event, dt=0.005, save_freq=1)\n",
+ "\n",
+ "print('Example: No State Limits')\n",
+ "for i, state in enumerate(simulated_results.states):\n",
+ " print(f'State {i}: {state}')\n",
+ "print()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Notice that at the end of the simulation, the object's position (`x`) is negative. This doesn't make sense physically, since the object cannot fall below ground level (at $x=0$)."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "To avoid this, and keep the state in a realistic range, we can change the `state_limits` attribute of the model. The `state_limits` attribute is a dictionary that contains the state limits for each state. The keys of the dictionary are the state names, and the values are tuples that contain the lower and upper limits of the state. \n",
+ "\n",
+ "In our Thrown Object model, our states are position, which can range from 0 to infinity, and velocity, which we'll limit to not exceed the speed of light."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Import inf\n",
+ "from math import inf\n",
+ "\n",
+ "m_limits.state_limits = {\n",
+ " # object position may not go below ground height\n",
+ " 'x': (0, inf),\n",
+ "\n",
+ " # object velocity may not exceed the speed of light\n",
+ " 'v': (-299792458, 299792458)\n",
+ "}"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Now that we've specified the ranges for our state values, let's try simulating again. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "event = 'impact'\n",
+ "simulated_results = m_limits.simulate_to_threshold(events=event, dt=0.005, save_freq=1)\n",
+ "\n",
+ "print('Example: With State Limits')\n",
+ "for i, state in enumerate(simulated_results.states):\n",
+ " print(f'State {i}: {state}')\n",
+ "print()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Notice that now the position (`x`) becomes 0 but never reaches a negative value. This is because we have defined a state limit for the `x` state that prevents it from going below 0. Also note that a warning is provided to notify the user that a state value was limited. "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Let's try a more complicated example. This time, we'll try setting the initial position value to be a number outside of its bounds. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "x0 = m_limits.initialize(u = {}, z = {})\n",
+ "x0['x'] = -1 # Initial position value set to an unrealistic value of -1\n",
+ "\n",
+ "simulated_results = m_limits.simulate_to_threshold(events=event, dt=0.005, save_freq=1, x = x0)\n",
+ "\n",
+ "# Print states\n",
+ "print('Example 2: With -1 as initial x value')\n",
+ "for i, state in enumerate(simulated_results.states):\n",
+ " print('State ', i, ': ', state)\n",
+ "print()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Notice that the simulation stops after just two iterations. In this case, the initial position value is outside the state limit. On the first iteration, the position value is therefore adjusted to be within the appropriate range of 0 to $\\infty$. Since we are simulating to impact, which is defined as when position is 0, the threshold is immediately satisfied and the simulation stops. "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Finally, note that limits can also be applied manually using the `apply_limits` function. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "x = {'x': -5, 'v': 3e8} # Too fast and below the ground\n",
+ "print('\\t Pre-limit: {}'.format(x))\n",
+ "\n",
+ "x = m_limits.apply_limits(x)\n",
+ "print('\\t Post-limit: {}'.format(x))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "In conclusion, setting appropriate [state limits](https://nasa.github.io/progpy/prog_models_guide.html#state-limits) is crucial in creating realistic and accurate state-transition models. It ensures that the model's behavior stays within the constraints of the physical system. The limits should be set based on the physical or practical constraints of the system being modeled. \n",
+ "\n",
+ "As a final note, state limits are especially important for state estimation (to be discussed in the State Estimation section), as it will force the state estimator to only consider states that are possible or feasible. State estimation will be described in more detail in section 08. State Estimation. "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Custom Events"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "In the examples above, we have focused on the simple event of a thrown object hitting the ground or reaching `impact`. In this section, we highlight additional uses of ProgPy's generalizable concept of `events`. "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The term [events](https://nasa.github.io/progpy/prog_models_guide.html#events) is used to describe something to be predicted. Generally in the PHM community, these are referred to as End of Life (EOL). However, they can be much more. \n",
+ "\n",
+ "In ProgPy, events can be anything that needs to be predicted. Systems will often have multiple failure modes, and each of these modes can be represented by a separate event. Additionally, events can also be used to predict other events of interest other than failure, such as special system states or warning thresholds. Thus, `events` in ProgPy can represent End of Life (EOL), End of Mission (EOM), warning thresholds, or any Event of Interest (EOI). "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "There are a few components of the model that must be specified in order to define events:\n",
+ "\n",
+ "1. The `events` property defines the expected events \n",
+ "\n",
+ "2. The `threshold_met` method defines the conditions under which an event occurs \n",
+ "\n",
+ "3. The `event_state` method returns an estimate of progress towards the threshold \n",
+ "\n",
+ "Note that because of the interconnected relationship between `threshold_met` and `event_state`, it is only required to define one of these. However, it is generally beneficial to specify both. "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "To illustrate this concept, we will use the `BatteryElectroChemEOD` model (see section 03. Included Models). In the standard implementation of this model, the defined event is `EOD` or End of Discharge. This occurs when the voltage drops below a pre-defined threshold value. The State-of-Charge (SOC) of the battery is the event state for the EOD event. Recall that event states (and therefore SOC) vary between 0 and 1, where 1 is healthy and 0 signifies the event has occurred. "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Suppose we have the requirement that our battery must not fall below 5% State-of-Charge. This would correspond to an `EOD` event state of 0.05. Additionally, let's add events for two warning thresholds, a $\\text{\\textcolor{yellow}{yellow}}$ threshold at 15% SOC and a $\\text{\\textcolor{red}{red}}$ threshold at 10% SOC. "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "To define the model, we'll start with the necessary imports."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import matplotlib.pyplot as plt\n",
+ "from progpy.loading import Piecewise\n",
+ "from progpy.models import BatteryElectroChemEOD"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Next, let's define our threshold values. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "YELLOW_THRESH = 0.15 # 15% SOC\n",
+ "RED_THRESH = 0.1 # 10% SOC\n",
+ "THRESHOLD = 0.05 # 5% SOC"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Now we'll create our model by subclassing from the `BatteryElectroChemEOD` model. First, we'll re-define `events` to include three new events for our two warnings and new threshold value, as well as the event `EOD` from the parent class."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "class BattNewEvent(BatteryElectroChemEOD):\n",
+ " events = BatteryElectroChemEOD.events + ['EOD_warn_yellow', 'EOD_warn_red', 'EOD_requirement_threshold']\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Next, we'll override the `event_state` method to additionally include calculations for progress towards each of our new events. We'll add yellow, red, and failure states by scaling the EOD state. We scale so that the threshold SOC is 0 at their associated events, while SOC of 1 is still 1. For example, for yellow, we want `EOD_warn_yellow` to be 1 when SOC is 1, and 0 when SOC is 0.15 or lower. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "class BattNewEvent(BattNewEvent):\n",
+ " \n",
+ " def event_state(self, state):\n",
+ " # Get event state from parent\n",
+ " event_state = super().event_state(state)\n",
+ "\n",
+ " # Add yellow, red, and failure states by scaling EOD state\n",
+ " event_state['EOD_warn_yellow'] = (event_state['EOD']-YELLOW_THRESH)/(1-YELLOW_THRESH) \n",
+ " event_state['EOD_warn_red'] = (event_state['EOD']-RED_THRESH)/(1-RED_THRESH)\n",
+ " event_state['EOD_requirement_threshold'] = (event_state['EOD']-THRESHOLD)/(1-THRESHOLD)\n",
+ "\n",
+ " # Return\n",
+ " return event_state"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Finally, we'll override the `threshold_met` method to define when each event occurs. Based on the scaling in `event_state` each event is reached when the corresponding `event_state` value is less than or equal to 0. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "class BattNewEvent(BattNewEvent):\n",
+ " def threshold_met(self, x):\n",
+ " # Get threshold met from parent\n",
+ " t_met = super().threshold_met(x)\n",
+ "\n",
+ " # Add yell and red states from event_state\n",
+ " event_state = self.event_state(x)\n",
+ " t_met['EOD_warn_yellow'] = event_state['EOD_warn_yellow'] <= 0\n",
+ " t_met['EOD_warn_red'] = event_state['EOD_warn_red'] <= 0\n",
+ " t_met['EOD_requirement_threshold'] = event_state['EOD_requirement_threshold'] <= 0\n",
+ "\n",
+ " return t_met"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "With this, we have defined the three key model components for defining new events. \n",
+ "\n",
+ "Let's test out the model. First, create an instance of it. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "m = BattNewEvent()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Recall that the battery model takes input of current. We will use a piecewise loading scheme (see 01. Simulation)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Variable (piecewise) future loading scheme\n",
+ "future_loading = Piecewise(\n",
+ " m.InputContainer,\n",
+ " [600, 900, 1800, 3000],\n",
+ " {'i': [2, 1, 4, 2, 3]})"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Now we can simulate to threshold and plot the results. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "simulated_results = m.simulate_to_threshold(future_loading, events='EOD', print = True)\n",
+ "\n",
+ "simulated_results.event_states.plot()\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Here, we can see the SOC plotted for the different events throughout time. The yellow warning (15% SOC) reaches threshold first, followed by the red warning (10% SOC), new EOD threshold (5% SOC), and finally the original EOD value. "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "In this section, we have illustrated how to define custom [events](https://nasa.github.io/progpy/prog_models_guide.html#events) for prognostics models. Events can be used to define anything that a user is interested in predicting, including common values like Remaining Useful Life (RUL) and End of Discharge (EOD), as well as other values like special intermediate states or warning thresholds. "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Serialization "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "ProgPy includes a feature to serialize models, which we highlight in this section. \n",
+ "\n",
+ "Model serialization has a variety of purposes. For example, serialization allows us to save a specific model or model configuration to a file to be loaded later, or can aid us in sending a model to another machine over a network connection. Some users maintain a directory or repository of configured models representing specific systems in their stock.\n",
+ "\n",
+ "In this section, we'll show how to serialize and deserialize model objects using `pickle` and `JSON` methods. "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "First, we'll import the necessary modules."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import matplotlib.pyplot as plt\n",
+ "import pickle\n",
+ "import numpy as np\n",
+ "from progpy.models import BatteryElectroChemEOD\n",
+ "from progpy.loading import Piecewise"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "For this example, we'll use the BatteryElectroChemEOD model. We'll start by creating a model object. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "batt = BatteryElectroChemEOD()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "First, we'll serialize the model in two different ways using 1) `pickle` and 2) `JSON`. Then, we'll plot the results from simulating the deserialized models to show equivalence of the methods. "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "To save using the `pickle` package, we'll serialize the model using the `dump` method. Once saved, we can then deserialize using the `load` method. In practice, deserializing will likely occur in a different file or in a later use-case, but here we deserialize to show equivalence of the saved model. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "pickle.dump(batt, open('save_pkl.pkl', 'wb')) # Serialize model\n",
+ "load_pkl = pickle.load(open('save_pkl.pkl', 'rb')) # Deserialize model "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Next, we'll serialize using the `to_json` method. We deserialize by calling the model directly with the serialized result using the `from_json` method."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "save_json = batt.to_json() # Serialize model\n",
+ "json_1 = BatteryElectroChemEOD.from_json(save_json) # Deserialize model"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Note that the serialized result can also be saved to a text file and uploaded for later use. We demonstrate this below:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "txtFile = open(\"save_json.txt\", \"w\")\n",
+ "txtFile.write(save_json)\n",
+ "txtFile.close()\n",
+ "\n",
+ "with open('save_json.txt') as infile: \n",
+ " load_json = infile.read()\n",
+ "\n",
+ "json_2 = BatteryElectroChemEOD.from_json(load_json)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We have now serialized and deserialized the model using `pickle` and `JSON` methods. Let's compare the resulting models. To do so, we'll use ProgPy's [simulation](https://nasa.github.io/progpy/prog_models_guide.html#simulation) to simulate the model to threshold and compare the results. \n",
+ "\n",
+ "First, we'll need to define our [future loading profile](https://nasa.github.io/progpy/prog_models_guide.html#future-loading) using the PiecewiseLoad class. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Variable (piecewise) future loading scheme\n",
+ "future_loading = Piecewise(\n",
+ " batt.InputContainer,\n",
+ " [600, 1000, 1500, 3000],\n",
+ " {'i': [3, 2, 1.5, 4]})"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Now, let's simulate each model to threshold using the `simulate_to_threshold` method. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Original model \n",
+ "results_orig = batt.simulate_to_threshold(future_loading, save_freq = 1)\n",
+ "# Pickled version \n",
+ "results_pkl = load_pkl.simulate_to_threshold(future_loading, save_freq = 1)\n",
+ "# JSON versions\n",
+ "results_json_1 = json_1.simulate_to_threshold(future_loading, save_freq = 1)\n",
+ "results_json_2 = json_2.simulate_to_threshold(future_loading, save_freq = 1)\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Finally, let's plot the results for comparison."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "voltage_orig = [results_orig.outputs[iter]['v'] for iter in range(len(results_orig.times))]\n",
+ "voltage_pkl = [results_pkl.outputs[iter]['v'] for iter in range(len(results_pkl.times))]\n",
+ "voltage_json_1 = [results_json_1.outputs[iter]['v'] for iter in range(len(results_json_1.times))]\n",
+ "voltage_json_2 = [results_json_2.outputs[iter]['v'] for iter in range(len(results_json_2.times))]\n",
+ "\n",
+ "plt.plot(results_orig.times,voltage_orig,'-b',label='Original surrogate') \n",
+ "plt.plot(results_pkl.times,voltage_pkl,'--r',label='Pickled serialized surrogate') \n",
+ "plt.plot(results_json_1.times,voltage_json_1,'-.g',label='First JSON serialized surrogate') \n",
+ "plt.plot(results_json_2.times, voltage_json_2, '--y', label='Second JSON serialized surrogate')\n",
+ "plt.legend()\n",
+ "plt.xlabel('Time (sec)')\n",
+ "plt.ylabel('Voltage (volts)')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "All of the voltage curves overlap, showing that the different serialization methods produce the same results. \n",
+ "\n",
+ "Additionally, we can compare the output arrays directly, to ensure equivalence. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import numpy as np\n",
+ "\n",
+ "# Check if the arrays are the same\n",
+ "are_arrays_same = np.array_equal(voltage_orig, voltage_pkl) and \\\n",
+ " np.array_equal(voltage_orig, voltage_json_1) and \\\n",
+ " np.array_equal(voltage_orig, voltage_json_2)\n",
+ "\n",
+ "print(f\"The simulated results from the original and serialized models are {'identical. This means that our serialization works!' if are_arrays_same else 'not identical. This means that our serialization does not work.'}\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "To conclude, we have shown how to serialize models in ProgPy using both `pickle` and `JSON` methods. Understanding how to serialize and deserialize models can be a powerful tool for prognostics developers. It enables the saving of models to a disk and the re-loading of these models back into memory at a later time. "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Conclusions"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "In these examples, we have described how to create new physics-based models. We have illustrated how to construct a generic physics-based model, as well as highlighted some specific types of models including linear models and direct models. We highlighted the matrix data access feature for using matrix operations more efficiently. Additionally, we discussed a few important components of any prognostics model including derived parameters, state limits, and events. \n",
+ "\n",
+ "With these tools, users are well-equipped to build their own prognostics models for their specific physics-based use-cases. In the next example, we'll discuss how to create data-driven models."
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3.11.0 ('env': venv)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.0"
+ },
+ "orig_nbformat": 4,
+ "vscode": {
+ "interpreter": {
+ "hash": "71ccad9e81d0b15f7bb5ef75e2d2ca570011b457fb5a41421e3ae9c0e4c33dfc"
+ }
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/examples/param_est.ipynb b/docs/_downloads/4dfd843259a83464e8e48a02a37a07b0/02_Parameter Estimation.ipynb
similarity index 89%
rename from examples/param_est.ipynb
rename to docs/_downloads/4dfd843259a83464e8e48a02a37a07b0/02_Parameter Estimation.ipynb
index 9d56072..dc60b0e 100644
--- a/examples/param_est.ipynb
+++ b/docs/_downloads/4dfd843259a83464e8e48a02a37a07b0/02_Parameter Estimation.ipynb
@@ -5,7 +5,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "# Welcome to the Parameter Estimation Feature Example"
+ "# 2. Parameter Estimation"
]
},
{
@@ -13,11 +13,9 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "The goal of this notebook is to instruct ProgPy users on how to use the estimate_params feature for PrognosticModels.\n",
+ "Parameter estimation is used to tune the parameters of a general model so its behavior matches the behavior of a specific system. For example, parameters of the battery model can be tuned to configure the model to describe the behavior of a specific battery.\n",
"\n",
- "First some background. Parameter estimation is used to tune the parameters of a general model so its behavior matches the behavior of a specific system. For example, parameters of the battery model can be tuned to configure the model to describe the behavior of a specific battery.\n",
- "\n",
- "Generally, parameter estimation is done by tuning the parameters of the model so that simulation best matches the behavior observed in some available data. In ProgPy, this is done using the progpy.PrognosticsModel.estimate_params() method. This method takes input and output data from one or more runs, and uses scipy.optimize.minimize function to estimate the parameters of the model. For more information, refer to our Documentation [here](https://nasa.github.io/progpy/prog_models_guide.html#parameter-estimation)\n",
+ "Generally, parameter estimation is done by tuning the parameters of the model so that simulation (see 1. Simulation) best matches the behavior observed in some available data. In ProgPy, this is done using the progpy.PrognosticsModel.estimate_params() method. This method takes input and output data from one or more runs, and uses scipy.optimize.minimize function to estimate the parameters of the model. For more information, refer to our Documentation [here](https://nasa.github.io/progpy/prog_models_guide.html#parameter-estimation)\n",
"\n",
"A few definitions:\n",
"* __`keys`__ `(list[str])`: Parameter keys to optimize\n",
@@ -139,7 +137,7 @@
"# Printing state before\n",
"print('Model configuration before')\n",
"for key in keys:\n",
- " print(\"-\", key, m.parameters[key])\n",
+ " print(\"-\", key, m[key])\n",
"print(' Error: ', m.calc_error(times, inputs, outputs, dt=0.1))"
]
},
@@ -178,7 +176,7 @@
"source": [
"print('\\nOptimized configuration')\n",
"for key in keys:\n",
- " print(\"-\", key, m.parameters[key])\n",
+ " print(\"-\", key, m[key])\n",
"print(' Error: ', m.calc_error(times, inputs, outputs, dt=0.1))"
]
},
@@ -225,7 +223,7 @@
"m.estimate_params(times = times, inputs = inputs, outputs = outputs, keys = keys, dt=0.1, tol=1e-6)\n",
"print('\\nOptimized configuration')\n",
"for key in keys:\n",
- " print(\"-\", key, m.parameters[key])\n",
+ " print(\"-\", key, m[key])\n",
"print(' Error: ', m.calc_error(times, inputs, outputs, dt=0.1))"
]
},
@@ -262,14 +260,14 @@
"metadata": {},
"outputs": [],
"source": [
- "m.parameters['thrower_height'] = 3.1\n",
- "m.parameters['throwing_speed'] = 29\n",
+ "m['thrower_height'] = 3.1\n",
+ "m['throwing_speed'] = 29\n",
"\n",
"# Using MAE, or Mean Absolute Error instead of the default Mean Squared Error.\n",
"m.estimate_params(times = times, inputs = inputs, outputs = outputs, keys = keys, dt=0.1, tol=1e-9, error_method='MAX_E')\n",
"print('\\nOptimized configuration')\n",
"for key in keys:\n",
- " print(\"-\", key, m.parameters[key])\n",
+ " print(\"-\", key, m[key])\n",
"print(' Error: ', m.calc_error(times, inputs, outputs, dt=0.1, method='MAX_E'))"
]
},
@@ -311,9 +309,9 @@
"results = m.simulate_to_threshold(save_freq=0.5, dt=('auto', 0.1))\n",
"\n",
"# Resetting parameters to their incorrectly set values.\n",
- "m.parameters['thrower_height'] = 20\n",
- "m.parameters['throwing_speed'] = 3.1\n",
- "m.parameters['g'] = 15\n",
+ "m['thrower_height'] = 20\n",
+ "m['throwing_speed'] = 3.1\n",
+ "m['g'] = 15\n",
"keys = ['thrower_height', 'throwing_speed', 'g']"
]
},
@@ -326,7 +324,7 @@
"m.estimate_params(times = results.times, inputs = results.inputs, outputs = results.outputs, keys = keys)\n",
"print('\\nOptimized configuration')\n",
"for key in keys:\n",
- " print(\"-\", key, m.parameters[key])\n",
+ " print(\"-\", key, m[key])\n",
"print(' Error: ', m.calc_error(results.times, results.inputs, results.outputs))"
]
},
@@ -355,7 +353,7 @@
"def AME(m, keys):\n",
" error = 0\n",
" for key in keys:\n",
- " error += abs(m.parameters[key] - true_Values.parameters[key])\n",
+ " error += abs(m[key] - true_Values[key])\n",
" return error"
]
},
@@ -395,9 +393,9 @@
" results = m.simulate_to_threshold(save_freq=0.5, dt=('auto', 0.1))\n",
" \n",
" # Resetting parameters to their originally incorrectly set values.\n",
- " m.parameters['thrower_height'] = 20\n",
- " m.parameters['throwing_speed'] = 3.1\n",
- " m.parameters['g'] = 15\n",
+ " m['thrower_height'] = 20\n",
+ " m['throwing_speed'] = 3.1\n",
+ " m['g'] = 15\n",
"\n",
" m.estimate_params(times = results.times, inputs = results.inputs, outputs = results.outputs, keys = keys, dt=0.1)\n",
" error = AME(m, ['thrower_height', 'throwing_speed', 'g'])\n",
@@ -441,9 +439,9 @@
"metadata": {},
"outputs": [],
"source": [
- "m.parameters['thrower_height'] = 20\n",
- "m.parameters['throwing_speed'] = 3.1\n",
- "m.parameters['g'] = 15"
+ "m['thrower_height'] = 20\n",
+ "m['throwing_speed'] = 3.1\n",
+ "m['g'] = 15"
]
},
{
@@ -463,7 +461,7 @@
"m.estimate_params(times=times, inputs=inputs, outputs=outputs, keys=keys, dt=0.1)\n",
"print('\\nOptimized configuration')\n",
"for key in keys:\n",
- " print(\"-\", key, m.parameters[key])\n",
+ " print(\"-\", key, m[key])\n",
"error = AME(m, ['thrower_height', 'throwing_speed', 'g'])\n",
"print('AME Error: ', error)"
]
@@ -493,7 +491,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.11.0"
+ "version": "3.12.0"
},
"orig_nbformat": 4,
"vscode": {
diff --git a/docs/_downloads/638751a90485d06ba802c6a90154e62b/basic_example.py b/docs/_downloads/638751a90485d06ba802c6a90154e62b/basic_example.py
index da4420d..edd2847 100644
--- a/docs/_downloads/638751a90485d06ba802c6a90154e62b/basic_example.py
+++ b/docs/_downloads/638751a90485d06ba802c6a90154e62b/basic_example.py
@@ -19,7 +19,7 @@
def run_example():
# Step 1: Setup model & future loading
- m = ThrownObject(process_noise=1)
+ m = ThrownObject(process_noise = 1)
initial_state = m.initialize()
# Step 2: Demonstrating state estimator
diff --git a/docs/_downloads/6b940c6b760d99b131616029abefd14c/05_Data Driven.ipynb b/docs/_downloads/6b940c6b760d99b131616029abefd14c/05_Data Driven.ipynb
new file mode 100644
index 0000000..0cbd9d0
--- /dev/null
+++ b/docs/_downloads/6b940c6b760d99b131616029abefd14c/05_Data Driven.ipynb
@@ -0,0 +1,70 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Using Data-Driven Models\n",
+ "**A version of this notebook will be added in release v1.8, including:**"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## General Use\n",
+ "\n",
+ "### Building a new model from data\n",
+ "\n",
+ "### Surrogate Models"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Long Short-Term Memory (LSTM)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Dynamic Mode Decomposition (DMD)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Polynomial Chaos Expansion (PCE)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Extending"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3.11.0 64-bit",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "name": "python",
+ "version": "3.12.0"
+ },
+ "orig_nbformat": 4,
+ "vscode": {
+ "interpreter": {
+ "hash": "aee8b7b246df8f9039afb4144a1f6fd8d2ca17a180786b69acc140d282b71a49"
+ }
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/docs/_downloads/70520a32b5407f76734c16a1ffe189b9/custom_model.py b/docs/_downloads/70520a32b5407f76734c16a1ffe189b9/custom_model.py
index d7d74fc..1dc74da 100644
--- a/docs/_downloads/70520a32b5407f76734c16a1ffe189b9/custom_model.py
+++ b/docs/_downloads/70520a32b5407f76734c16a1ffe189b9/custom_model.py
@@ -44,13 +44,13 @@ def run_example():
# Step 2: Build standard model
print("Building standard model...")
m_batt = LSTMStateTransitionModel.from_data(
- inputs=input_data,
- outputs=output_data,
+ inputs = input_data,
+ outputs = output_data,
window=WINDOW,
epochs=30,
units=64, # Additional units given the increased complexity of the system
- input_keys=['i', 'dt'],
- output_keys=['t', 'v'])
+ input_keys = ['i', 'dt'],
+ output_keys = ['t', 'v'])
m_batt.plot_history()
# Step 3: Build custom model
@@ -79,19 +79,22 @@ def run_example():
# so we need to transpose them to a column vector
normalization = (u_mean[np.newaxis].T, u_std[np.newaxis].T, z_mean, z_std)
+ callbacks = [
+ keras.callbacks.ModelCheckpoint("jena_sense.keras", save_best_only=True)
+ ]
inputs = keras.Input(shape=u_all.shape[1:])
x = layers.Bidirectional(layers.LSTM(128))(inputs)
x = layers.Dropout(0.1)(x)
x = layers.Dense(z_all.shape[1] if z_all.ndim == 2 else 1)(x)
model = keras.Model(inputs, x)
model.compile(optimizer="rmsprop", loss="mse", metrics=["mae"])
- history = model.fit(u_all, z_all, epochs=30, validation_split=0.1)
+ history = model.fit(u_all, z_all, epochs=30, callbacks = callbacks, validation_split = 0.1)
# Step 4: Build LSTMStateTransitionModel
m_custom = LSTMStateTransitionModel(model,
normalization=normalization,
- input_keys=['i', 'dt'],
- output_keys=['t', 'v'], history=history # Provide history so plot_history will work
+ input_keys = ['i', 'dt'],
+ output_keys = ['t', 'v'], history=history # Provide history so plot_history will work
)
m_custom.plot_history()
@@ -102,7 +105,7 @@ def run_example():
def future_loading(t, x=None):
return batt.InputContainer({'i': 3})
- def future_loading2(t, x=None):
+ def future_loading2(t, x = None):
nonlocal t_counter, x_counter
z = batt.output(x_counter)
z = m_batt.InputContainer({'i': 3, 't_t-1': z['t'], 'v_t-1': z['v'], 'dt': t - t_counter})
diff --git a/docs/_downloads/8b8068cadf898f26bec76d85c19e7d58/playback.py b/docs/_downloads/8b8068cadf898f26bec76d85c19e7d58/playback.py
index e52099c..e16920b 100644
--- a/docs/_downloads/8b8068cadf898f26bec76d85c19e7d58/playback.py
+++ b/docs/_downloads/8b8068cadf898f26bec76d85c19e7d58/playback.py
@@ -31,7 +31,7 @@
from progpy.predictors import UnscentedTransformPredictor as Predictor
# VVV Uncomment this to use MonteCarloPredictor instead
-# from progpy.predictors import MonteCarlo as Predictor
+from progpy.predictors import MonteCarlo as Predictor
# Constants
NUM_SAMPLES = 20
@@ -84,13 +84,14 @@ def future_loading(t, x=None):
z = {'t': float(row[2]), 'v': float(row[3])}
# State Estimation Step
- filt.estimate(t, i, z)
+ filt.estimate(t, i, z)
eod = batt.event_state(filt.x.mean)['EOD']
print(" - Event State: ", eod)
# Prediction Step (every PREDICTION_UPDATE_FREQ steps)
if (step%PREDICTION_UPDATE_FREQ == 0):
mc_results = mc.predict(filt.x, future_loading, t0 = t, n_samples=NUM_SAMPLES, dt=TIME_STEP)
+ mc_results.outputs.mean
metrics = mc_results.time_of_event.metrics()
print(' - ToE: {} (sigma: {})'.format(metrics['EOD']['mean'], metrics['EOD']['std']))
profile.add_prediction(t, mc_results.time_of_event)
diff --git a/docs/_downloads/98e318f1c85976fc33e5b193dd2922a1/sensitivity.py b/docs/_downloads/98e318f1c85976fc33e5b193dd2922a1/sensitivity.py
index 375f098..0e39ae4 100644
--- a/docs/_downloads/98e318f1c85976fc33e5b193dd2922a1/sensitivity.py
+++ b/docs/_downloads/98e318f1c85976fc33e5b193dd2922a1/sensitivity.py
@@ -22,7 +22,7 @@ def run_example():
eods = np.empty(len(thrower_height_range))
for (i, thrower_height) in zip(range(len(thrower_height_range)), thrower_height_range):
m.parameters['thrower_height'] = thrower_height
- simulated_results = m.simulate_to_threshold(threshold_keys=[event], dt =1e-3, save_freq =10)
+ simulated_results = m.simulate_to_threshold(events=event, dt =1e-3, save_freq =10)
eods[i] = simulated_results.times[-1]
# Step 4: Analysis
@@ -36,7 +36,7 @@ def run_example():
eods = np.empty(len(throw_speed_range))
for (i, throw_speed) in zip(range(len(throw_speed_range)), throw_speed_range):
m.parameters['throwing_speed'] = throw_speed
- simulated_results = m.simulate_to_threshold(threshold_keys=[event], options={'dt':1e-3, 'save_freq':10})
+ simulated_results = m.simulate_to_threshold(events=event, options={'dt':1e-3, 'save_freq':10})
eods[i] = simulated_results.times[-1]
print('\nFor a reasonable range of throwing speeds, impact time is between {} and {}'.format(round(eods[0],3), round(eods[-1],3)))
diff --git a/docs/_downloads/9af9bdc48da233bdafa07c9ecb46568e/basic_example_battery.py b/docs/_downloads/9af9bdc48da233bdafa07c9ecb46568e/basic_example_battery.py
index ada269a..221e5fe 100644
--- a/docs/_downloads/9af9bdc48da233bdafa07c9ecb46568e/basic_example_battery.py
+++ b/docs/_downloads/9af9bdc48da233bdafa07c9ecb46568e/basic_example_battery.py
@@ -27,7 +27,6 @@
# VVV Uncomment this to use UnscentedTransform Predictor VVV
# from progpy.predictors import UnscentedTransformPredictor as Predictor
-from progpy.loading import Piecewise
from progpy.metrics import prob_success
def run_example():
@@ -39,10 +38,24 @@ def run_example():
}
batt = Battery(process_noise = 0.25, measurement_noise = R_vars)
# Creating the input containers outside of the function accelerates prediction
- future_loading = Piecewise(
- batt.InputContainer,
- [600, 900, 1800, 3000, float('inf')],
- {'i': [2, 1, 4, 2, 3]})
+ loads = [
+ batt.InputContainer({'i': 2}),
+ batt.InputContainer({'i': 1}),
+ batt.InputContainer({'i': 4}),
+ batt.InputContainer({'i': 2}),
+ batt.InputContainer({'i': 3})
+ ]
+ def future_loading(t, x = None):
+ # Variable (piece-wise) future loading scheme
+ if (t < 600):
+ return loads[0]
+ elif (t < 900):
+ return loads[1]
+ elif (t < 1800):
+ return loads[2]
+ elif (t < 3000):
+ return loads[3]
+ return loads[-1]
initial_state = batt.initialize()
@@ -81,7 +94,7 @@ def run_example():
NUM_SAMPLES = 25
STEP_SIZE = 0.1
SAVE_FREQ = 100 # How often to save results
- mc_results = mc.predict(filt.x, future_loading, n_samples=NUM_SAMPLES, dt=STEP_SIZE, save_freq=SAVE_FREQ)
+ mc_results = mc.predict(filt.x, future_loading, n_samples = NUM_SAMPLES, dt=STEP_SIZE, save_freq = SAVE_FREQ)
print('ToE', mc_results.time_of_event.mean)
# Step 3c: Analyze the results
diff --git a/docs/_downloads/9c8e9f4d6db817c0bef52c29c3dbca21/01_Simulation.ipynb b/docs/_downloads/9c8e9f4d6db817c0bef52c29c3dbca21/01_Simulation.ipynb
new file mode 100644
index 0000000..35542f9
--- /dev/null
+++ b/docs/_downloads/9c8e9f4d6db817c0bef52c29c3dbca21/01_Simulation.ipynb
@@ -0,0 +1,1340 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# 1. Simulating with Prognostics Models\n",
+ "\n",
+ "One of the most basic of functions for models is simulation. Simulation is the process of predicting the evolution of [system's state](https://nasa.github.io/progpy/glossary.html#term-state) with time. Simulation is the foundation of prediction (see 9. Prediction). Unlike full prediction, simulation does not include uncertainty in the state and other product (e.g., [output](https://nasa.github.io/progpy/glossary.html#term-output)) representation.\n",
+ "\n",
+ "The first section introduces simulating to a specific time (e.g., 3 seconds), using the `simulate_to` method. The second section introduces the concept of simulating until a threshold is met rather than a defined time, using `simulate_to_threshold`. The third section makes simulation more concrete with the introduction of [future loading](https://nasa.github.io/progpy/glossary.html#term-future-load). The sections following these introduce various advanced features that can be used in simulation.\n",
+ "\n",
+ "Note: Before running this example make sure you have ProgPy installed and up to date."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Basic Simulation to a Time\n",
+ "\n",
+ "Let's go through a basic example simulating a model to a specific point in time. In this case we are using the ThrownObject model. ThrownObject is a basic model of an object being thrown up into the air (with resistance) and returning to the ground.\n",
+ "\n",
+ "First we import the model from ProgPy's models subpackage (see 3. Included Models) and create a model instance."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from progpy.models import ThrownObject\n",
+ "m = ThrownObject()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Now we simulate this model for three seconds. To do this we use the [`simulate_to`](https://nasa.github.io/progpy/api_ref/prog_models/PrognosticModel.html#prog_models.PrognosticsModel.simulate_to) method."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "results = m.simulate_to(3)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "It's that simple! We've simulated the model forward three seconds. Let's look in a little more detail at the returned results. \n",
+ "\n",
+ "Simulation results consists of 5 different types of information, described below:\n",
+ "* **times**: the time corresponding to each value.\n",
+ "* **[inputs](https://nasa.github.io/progpy/glossary.html#term-input)**: Control or loading applied to the system being modeled (e.g., current drawn from a battery). Input is frequently denoted by u.\n",
+ "* **[states](https://nasa.github.io/progpy/glossary.html#term-state)**: Internal variables (typically hidden states) used to represent the state of the system. Can be same as inputs or outputs but do not have to be. State is frequently denoted as `x`.\n",
+ "* **[outputs](https://nasa.github.io/progpy/glossary.html#term-output)**: Measured sensor values from a system (e.g., voltage and temperature of a battery). Can be estimated from the system state. Output is frequently denoted by `z`.\n",
+ "* **[event_states](https://nasa.github.io/progpy/glossary.html#term-event-state)**: Progress towards [event](https://nasa.github.io/progpy/glossary.html#term-event) occurring. Defined as a number where an event state of 0 indicates the event has occurred and 1 indicates no progress towards the event (i.e., fully healthy operation for a failure event). For a gradually occurring event (e.g., discharge) the number will progress from 1 to 0 as the event nears. In prognostics, event state is frequently called “State of Health”.\n",
+ "\n",
+ "In this case, times are the start and beginning of the simulation ([0, 3]), since we have not yet told the simulator to save intermediate times. The ThrownObject model doesn't have any way of controlling or loading the object, so there are no inputs. The states are position (`x`) and velocity (`v`). This model assumes that you can measure position, so the outputs are just position (`x`). The two events for this model are `falling` (i.e., if the object is falling towards the earth) and `impact` (i.e., the object has impacted the ground). For a real prognostic model, events might be failure modes or warning thresholds."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Now let's inspect the results. First, let's plot the outputs (position)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fig = results.outputs.plot()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "This is a model of an object thrown in the air, so we generally expect its path to follow a parabola, but what we see above is linear. This is because there are only two points, the start (0s) and the end (3s). To see the parabola we need more points. This is where `save_freq` and `save_pts` come into play. \n",
+ "\n",
+ "`save_freq` is an argument in simulation that specifies a frequency at which you would like to save the results (e.g., 1 seconds), while `save_pts` is used to specify specific times that you would like to save the results (e.g., [1.5, 2.5, 3, 5] seconds).\n",
+ "\n",
+ "Now let's repeat the simulation above with a save frequency and plot the results."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "results = m.simulate_to(3, save_freq=0.5)\n",
+ "fig = results.outputs.plot(ylabel='Position (m)')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Now we can see the start of the parabola path we expected. We dont see the full parabola because we stopped simulation at 3 seconds.\n",
+ "\n",
+ "If you look at results.times, you can see that the results were saved every 0.5 seconds during simulation"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "print(results.times)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Next, let's look at the event_states (i.e., `falling` and `impact`)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fig = results.event_states.plot()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Here we see that the `falling` event state decreased linerally with time, and was approaching 0. This shows that it was nearly falling when we stopped simulation. The `impact` event state remained at 1, indicating that we had not made any progress towards impact. With this model, `impact` event state only starts decreasing as the object falls. "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Finally, let's take a look at model states. In this case the two states are position (`x`) and velocity (`v`)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fig = results.states.plot()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Position will match the output exactly and velocity (`v`) decreases nearly linerally with time due to the constant pull of gravity. The slight non-linerality is due to the effects of drag."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "This is a basic example of simulating to a set time. This is useful information for inspecting or analyzing the behavior of a model or the degredation of a system. There are many useful features that allow for complex simulation, described in the upcoming sections. \n",
+ "\n",
+ "Note that this is an example problem. In most cases, the system will have inputs, in which case simulation will require future loading (see Future Loading section, below), and simulation will not be until a time, but until a threshold is met. Simulating to a threshold will be described in the next section."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Simulating to Threshold"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "In the first section we introduced simulating to a set time. For most applications, users are not interested in the system evolution over a certain time period, but instead in simulating to some event of interest.\n",
+ "\n",
+ "In this section we will introduce the concept of simulating until an event occurs. This section builds upon the concepts introduced in the previous section.\n",
+ "\n",
+ "Just like in the previous section, we will start by preparing the ThrownObject model. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from progpy.models import ThrownObject\n",
+ "m = ThrownObject()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "If you recall, the ThrownObject model is of an object thrown into the air. The model has two events, `impact` and `falling`. In real prognostic models, these events will likely correspond with some failure, fault, or warning threshold. That said, events can be any event of interest that a user would like to predict. \n",
+ "\n",
+ "Now let's repeat the simulation from the previous example, this time simulating until an event has occured by using the [`simulate_to_threshold`](https://nasa.github.io/progpy/api_ref/prog_models/PrognosticModel.html#prog_models.PrognosticsModel.simulate_to_threshold) method."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "results = m.simulate_to_threshold(save_freq=0.5)\n",
+ "fig = results.outputs.plot(ylabel='Position (m)')\n",
+ "fig = results.event_states.plot()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Note that simulation continued beyond the 3 seconds used in the first section. Instead simulation stopped at 4 seconds, at which point the `falling` event state reached 0 and the position (`x`) reached the apogee of its path."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "By default, `simulate_to_threshold` simulates until the first event occurs. In this case, that's `falling` (i.e., when the object begins falling). For this model `falling` will always occur before `impact`, but for many models you won't have such a strict ordering of events. \n",
+ "\n",
+ "For users interested in when a specific event is reached, you can indicate which event(s) you'd like to simulate to using the `events` argument. For example,"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "results = m.simulate_to_threshold(save_freq=0.5, events='impact')\n",
+ "fig = results.outputs.plot(ylabel='Position (m)')\n",
+ "fig = results.event_states.plot()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Now the model simulated past the `falling` event until the `impact` event occurred. `events` accepts a single event, or a list of events, so for models with many events you can specify a list of events where any will stop simulation."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Frequently users are interested in simulating to a threshold, only if it occurs within some horizon of interest, like a mission time or planning horizon. This is accomplished with the `horizon` keyword argument. \n",
+ "\n",
+ "For example, if we were only interested in events occuring in the next 7 seconds we could set `horizon` to 7, like below:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "results = m.simulate_to_threshold(save_freq=0.5, events='impact', horizon=7)\n",
+ "fig = results.outputs.plot(ylabel='Position (m)')\n",
+ "fig = results.event_states.plot()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Notice that now simulation stopped at 7 seconds, even though the event had not yet occured. If we use a horizon after the event, like 10 seconds, then simulation stops at the event."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "results = m.simulate_to_threshold(save_freq=0.5, events='impact', horizon=10)\n",
+ "fig = results.outputs.plot(ylabel='Position (m)')\n",
+ "fig = results.event_states.plot()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The 7 and 10 second horizon is used as an example. In most cases, the simulation horizon will be much longer. For example, you can imagine a user who's interested in prognostics for a one hour drone flight might set the horizon to a little over an hour. A user who has a month-long maintenance scheduling window might chose a horizon of one month. \n",
+ "\n",
+ "It is good practice to include a horizon with most simulations to prevent simulations continuing indefinitely for the case where the event never happens."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "One final note: you can also use the print and progress options to track progress during long simulations, like below:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "results = m.simulate_to_threshold(save_freq=0.5, events='impact', print=True, progress=True)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "For most users running this in Jupyter notebook, the output will be truncated, but it gives an idea of what would be shown when selecting these options."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "In this example we specified events='impact' to indicate that simulation should stop when the specified event 'impact' is met. By default, the simulation will stop when the first of the specified events occur. If you dont specify any events, all model events will be included (in this case ['falling', 'impact']). This means that without specifying events, execution would have ended early, when the object starts falling, like below:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "results = m.simulate_to_threshold(save_freq=0.5, dt=0.1)\n",
+ "print('Last timestep: ', results.times[-1])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Note that simulation stopped at around 3.8seconds, about when the object starts falling. \n",
+ "\n",
+ "Alternately, if we would like to execute until all events have occurred we can use the `event_strategy` argument, like below:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "results = m.simulate_to_threshold(save_freq=0.5, dt=0.1, event_strategy='all')\n",
+ "print('Last timestep: ', results.times[-1])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Not the simulation stopped at around 7.9 seconds, when the last of the events occured ('impact')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "This is a basic example of simulating to an event. However, this is still just an example. Most models will have some form of input or loading. Simulating these models is described in the following section. The remainder of the sections go through various features for customizing simulation further."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Future Loading"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The previous examples feature a simple ThrownObject model, which does not have any inputs. Unlike ThrownObject, most prognostics models have some sort of [input](https://nasa.github.io/progpy/glossary.html#term-input). The input is some sort of control or loading applied to the system being modeled. In this section we will describe how to simulate a model which features an input.\n",
+ "\n",
+ "In this example we will be using the BatteryCircuit model from the models subpackage (see 3. Included Models). This is a simple battery discharge model where the battery is represented by an equivalent circuit.\n",
+ "\n",
+ "Like the past examples, we start by importing and creating the model."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from progpy.models import BatteryCircuit\n",
+ "m = BatteryCircuit()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "You can see the battery's inputs, states, and outputs (described above) by accessing these attributes."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "print('outputs:', m.outputs)\n",
+ "print('inputs:', m.inputs)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Consulting the [model documentation](https://nasa.github.io/progpy/api_ref/prog_models/IncludedModels.html#prog_models.models.BatteryCircuit), we see that the outputs (i.e., measurable values) of the model are temperature (`t`) and voltage (`v`). The model's input is the current (`i`) drawn from the battery.\n",
+ "\n",
+ "If we try to simulate as we do above (without specifying loading), it wouldn't work because the battery discharge is a function of the current (`i`) drawn from the battery. Simulation for a model like this requires that we define the future load. "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Piecewise load\n",
+ "\n",
+ "For the first example, we define a piecewise loading profile using the `progpy.loading.Piecewise` class. This is one of the most common loading profiles. First we import the class from the loading subpackage"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from progpy.loading import Piecewise"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Next, we define a loading profile. Piecewise loader takes 3 arguments: 1. the model InputContainer, 2. times and 3. loads. Each of these are explained in more detail below.\n",
+ "\n",
+ "The model input container is a class for representing the input for a model. It's a class attribute for every model, and is specific to that model. It can be found at m.InputContainer. For example,"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "m.InputContainer"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "InputContainers are initialized with either a dictionary or a column vector, for example:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "print(m.InputContainer({'i': 3}))\n",
+ "import numpy as np\n",
+ "print(m.InputContainer(np.vstack((2.3, ))))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The second and third arguments for the loading profile are times and loads. Together, the 'times' and 'loads' arguments specify what load is applied to the system at what times throughout simulation. The values in 'times' specify the ending time for each load. For example, if times were [5, 7, 10], then the first load would apply until t=5, then the second load would apply for 2 seconds, following by the third load for 3 more seconds. \n",
+ "\n",
+ "Loads are a dictionary of arrays, where the keys of the dictionary are the inputs to the model (for a battery, just current `i`), and the values in the array are the value at each time in times. If the loads array is one longer than times, then the last value is the \"default load\", i.e., the load that will be applied after the last time has passed.\n",
+ "\n",
+ "For example, we might define this load profile for our battery."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "loading = Piecewise(\n",
+ " InputContainer=m.InputContainer,\n",
+ " times=[600, 900, 1800, 3000],\n",
+ " values={'i': [2, 1, 4, 2, 3]})"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "In this case, the current drawn (`i`) is 2 amps until t is 600 seconds, then it is 1 for the next 300 seconds (until 900 seconds), etc. The \"default load\" is 3, meaning that after the last time has passed (3000 seconds) a current of 3 will be drawn. \n",
+ "\n",
+ "Now that we have this load profile, let's run a simulation with our model"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "results = m.simulate_to_threshold(loading, save_freq=100)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Let's take a look at the inputs to the model"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fig = results.inputs.plot(ylabel=\"Current Draw (amps)\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "See above that the load profile is piecewise, matching the profile we defined above.\n",
+ "\n",
+ "Plotting the outputs, you can see jumps in the voltage levels as the current changes."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fig = results.outputs.plot(compact=False)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "In this example we simulated to threshold, loading the system using a simple piecewise load profile. This is the most common load profile and will probably work for most cases."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Moving Average"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Another common loading scheme is the moving-average load. This loading scheme assumes that the load will continue like it's seen in the past. This is useful when you don't know the exact load, but you expect it to be consistent.\n",
+ "\n",
+ "Like with Piecewise loading, the first step it to import the loading class. In this case, `progpy.loading.MovingAverage`"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from progpy.loading import MovingAverage"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Next we create the moving average loading object, passing in the InputContainer"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "loading = MovingAverage(m.InputContainer)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The moving average load estimator requires an additional step, sending the observed load. This is done using the add_load method. Let's load it up with some observed current draws. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "measured_loads = [4, 4.5, 4.0, 4, 2.1, 1.8, 1.99, 2.0, 2.01, 1.89, 1.92, 2.01, 2.1, 2.2]\n",
+ " \n",
+ "for load in measured_loads:\n",
+ " loading.add_load({'i': load})"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "In practice the add_load method should be called whenever there's new input (i.e., load) information. The MovingAverage load estimator averages over a window of elements, configurable at construction using the window argument (e.g., MovingAverage(m.InputContainer, window=12))\n",
+ "\n",
+ "Now the configured load estimator can be used in simulation. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "results = m.simulate_to_threshold(loading, save_freq=100)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Now let's take a look at the resulting input current."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fig = results.inputs.plot()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Note that the loading is a constant around 2, this is because the larger loads (~4 amps) are outside of the averaging window. Here are the resulting outputs"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fig = results.outputs.plot(compact=False)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The voltage and temperature curves are much cleaner. They don't have the jumps present in the piecewise loading example. This is due to the constant loading.\n",
+ "\n",
+ "In this example we simulated to threshold, loading the system using a constant load profile calculated using the moving average load estimator. This load estimator needs to be updated with the add_load method whenever new loading data is available. "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Gaussian Noise in Loading"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Typically, users have an idea of what loading will look like, but there is some uncertainty. Future load estimates are hardly ever known exactly. This is where load wrappers like the `progpy.loading.GaussianNoiseLoadWrapper` come into play. The GaussianNoiseLoadWrapper wraps around another load profile, adding a random amount of noise, sampled from a Gaussian distribution, at each step. This will show some variability in simulation, but this becomes more important in prediction (see 9. Prediction).\n",
+ "\n",
+ "In this example we will repeat the Piecewise load example, this time using the GaussianNoiseLoadWrapper to represent our uncertainty in our future load estimate. \n",
+ "\n",
+ "First we will import the necessary classes and construct the Piecewise load estimation just as in the previous example."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from progpy.loading import Piecewise, GaussianNoiseLoadWrapper\n",
+ "loading = Piecewise(\n",
+ " InputContainer=m.InputContainer,\n",
+ " times=[600, 900, 1800, 3000],\n",
+ " values={'i': [2, 1, 4, 2, 3]})"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Next we will wrap this loading object in our Gaussian noise load wrapper"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "loading_with_noise = GaussianNoiseLoadWrapper(loading, 0.2)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "In this case we're adding Gaussian noise with a standard deviation of 0.2 to the result of the previous load estimator.\n",
+ "\n",
+ "Now let's simulate and look at the input profile."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "results = m.simulate_to_threshold(loading_with_noise, save_freq=100)\n",
+ "fig = results.inputs.plot()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Note the loading profile follows the piecewise shape, but with noise. If you run it again, you would get a slightly different result."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "results = m.simulate_to_threshold(loading_with_noise, save_freq=100)\n",
+ "fig = results.inputs.plot()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Here are the corresponding outputs."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fig = results.outputs.plot(compact=False)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Note that the noise in input can be seen in the resulting output plots.\n",
+ "\n",
+ "The seed can be set in creation of the GaussianNoiseLoadWrapper to ensure repeatable results, for example."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "loading_with_noise = GaussianNoiseLoadWrapper(loading, 0.2, seed=2000)\n",
+ "results = m.simulate_to_threshold(loading_with_noise, save_freq=100)\n",
+ "fig = results.inputs.plot()\n",
+ "\n",
+ "loading_with_noise = GaussianNoiseLoadWrapper(loading, 0.2, seed=2000)\n",
+ "results = m.simulate_to_threshold(loading_with_noise, save_freq=100)\n",
+ "fig = results.inputs.plot()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The load profiles in the two examples above are identical because they share the same random seed.\n",
+ "\n",
+ "In this section we introduced the concept of NoiseWrappers and how they are used to represent uncertainty in future loading. This concept is especially important when used with prediction (see 9. Prediction). A GaussianNoiseLoadWrapper was used with a Piecewise loading profile to demonstrate it, but NoiseWrappers can be applied to any loading object or function, including the advanced profiles introduced in the next section."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Custom load profiles"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "For most applications, the standard load estimation classes can be used to represent a user's expectation of future loading. However, there are some cases where load is some complex combination of time and state that cannot be represented by these classes. This section briefly describes a few of these cases. \n",
+ "\n",
+ "The first example is similar to the last one, in that there is Gaussian noise added to some underlying load profile. In this case the magnitude of noise increases linearly with time. This is an important example, as it allows us to represent a case where loading further out in time has more uncertainty (i.e., is less well known). This is common for many prognostic use-cases.\n",
+ "\n",
+ "Custom load profiles can be represented either as a function (t, x=None) -> u, where t is time, x is state, and u is input, or as a class which implements the __call__ method with the same profile as the function.\n",
+ "\n",
+ "In this case we will use the first method (i.e., the function). We will define a function that will use a defined slope (derivative of standard deviation with time)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from numpy.random import normal\n",
+ "base_load = 2 # Base load (amps)\n",
+ "std_slope = 1e-4 # Derivative of standard deviation with time\n",
+ "def loading(t, x=None):\n",
+ " std = std_slope * t\n",
+ " return m.InputContainer({'i': normal(base_load, std)})"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Note that the above code is specifically for a battery, but it could be generalized to any system.\n",
+ "\n",
+ "Now let's simulate and look at the input profile."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "results = m.simulate_to_threshold(loading, save_freq=100)\n",
+ "fig = results.inputs.plot()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Note how the noise in the input signal increases with time. Since this is a random process, if you were to run this again you would get a different result.\n",
+ "\n",
+ "Here is the corresponding output. Note you can see the effects of the increasingly erratic input in the voltage curve."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fig = results.outputs.plot(compact=False)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "In the final example we will define a loading profile that considers state. In this example, we're simulating a scenario where loads are removed (i.e., turned off) when discharge event state (i.e., SOC) reaches 0.25. This emulates a \"low power mode\" often employed in battery-powered electronics.\n",
+ "\n",
+ "For simplicity the underlying load will be constant, but this same approach could be applied to more complex profiles, and noise can be added on top using a NoiseWrapper."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "normal_load = m.InputContainer({'i': 2.7})\n",
+ "low_power_load = m.InputContainer({'i': 1.9})\n",
+ "\n",
+ "def loading(t, x=None):\n",
+ " if x is not None:\n",
+ " # State is provided\n",
+ " soc = m.event_state(x)['EOD']\n",
+ " return normal_load if soc > 0.25 else low_power_load\n",
+ " return normal_load"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Note that the above example checks if x is not None. For some models, for the first timestep, state may be None (because state hasn't yet been calculated).\n",
+ "\n",
+ "Now let's use this in simulation and take a look at the loading profile."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "results = m.simulate_to_threshold(loading, save_freq=100)\n",
+ "fig = results.inputs.plot()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Here, as expected, load is at normal level for most of the time, then falls to low power mode towards the end of discharge.\n",
+ "\n",
+ "Let's look at the corresponding outputs."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fig = results.outputs.plot(compact=False)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Notice the jump in voltage at the point where the load changed. Low power mode extended the life of the battery.\n",
+ "\n",
+ "In this section we show how to make custom loading profiles. Most applications can use the standard load classes, but some may require creating complex custom load profiles using this feature."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Step Size"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The next configurable parameter in simulation is the step size, or `dt`. This is the size of the step taken from one step to the next when simulating. Smaller step sizes will usually more accurately simulate state evolution, but at a computational cost. Conversely, some models can become unstable at large step sizes. Choosing the correct step size is important to the success of a simulation or prediction.\n",
+ "\n",
+ "In this section we will introduce the concept of setting simulation step size (`dt`) and discuss some considerations when selecting step sizes.\n",
+ "\n",
+ "For this section we will use the `progpy.models.ThrownObject model` (see 3. Included models), imported and created below."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from progpy.models import ThrownObject\n",
+ "m = ThrownObject()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Basic Step Size"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "To set step size, set the dt parameter in the `simulate_to` or `simulate_to_threshold` methods. In this example we will use a large and small step size and compare the results.\n",
+ "\n",
+ "First, let's simulate with a large step size, saving the result at every step."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "results = m.simulate_to_threshold(\n",
+ " events='impact',\n",
+ " dt=2.5,\n",
+ " save_freq=2.5)\n",
+ "fig = results.outputs.plot(ylabel='Position (m)')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Note that the parabola above is jagged. Also note that the estimated time of impact is around 10 seconds and the maximum height is a little over 120 meters. \n",
+ "\n",
+ "Now let's run the simulation again with a smaller step size."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "results = m.simulate_to_threshold(\n",
+ " events='impact',\n",
+ " dt=0.25,\n",
+ " save_freq=0.25)\n",
+ "fig = results.outputs.plot(ylabel='Position (m)')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Not only is the curve much smoother with a smaller step size, but the results are significantly different. Now the time of impact is closer to 8 seconds and maximum height closer to 80 meters.\n",
+ "\n",
+ "All simulations are approximations. The example with the larger step size accumulates more error in integration. The second example (with a smaller step size) is more accurate to the actual model behavior.\n",
+ "\n",
+ "Now let's decrease the step size even more"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "results = m.simulate_to_threshold(\n",
+ " events='impact',\n",
+ " dt=0.05,\n",
+ " save_freq=0.05)\n",
+ "fig = results.outputs.plot(ylabel='Position (m)')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The resulting output is different than the 0.25 second step size run, but not by much. What you see here is the diminishing returns in decreasing step size.\n",
+ "\n",
+ "The smaller the step size, the more computational resources required to simulate it. This doesn't matter as much for simulating this simple model over a short horizon, but becomes very important when performing prediction (see 9. Prediction), using a complex model with a long horizon, or when operating in a computationally constrained environment (e.g., embedded)."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Dynamic Step Size"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The last section introduced step size and showed how changing the step size effects the simulation results. In the last example step size (`dt`) and `save_freq` were the same, meaning each point was captured exactly. This is not always the case, for example in the case below. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "results = m.simulate_to_threshold(\n",
+ " events='impact',\n",
+ " dt=1,\n",
+ " save_freq=1.5)\n",
+ "print('Times saved: ', results.times)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "With a `save_freq` of 1.5 seconds you might expect the times saved to be 0, 1.5, 3, 4.5, ..., but that's not the case. This is because the timestep is 1 second, so the simulation never stops near 1.5 seconds to record it. 'auto' stepsize can help with this.\n",
+ "\n",
+ "To use 'auto' stepsize set `dt` to a tuple of ('auto', MAX) where MAX is replaced with the maximum allowable stepsize."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "results = m.simulate_to_threshold(\n",
+ " events='impact',\n",
+ " dt=('auto', 1),\n",
+ " save_freq=1.5)\n",
+ "print('Times saved: ', results.times)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We repeated the simulation using automatic step size with a maximum step size of 1. The result was that the times where state was saved matched what was requested exactly. This is important for simulations with large step sizes where there are specific times that must be captured.\n",
+ "\n",
+ "Also note that automatic step size doesn't just adjust for `save_freq`. It will also adjust to meet `save_pts` and any transition points in a Piecewise loading profile."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Custom Step Size"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "There are times when an advanced user would like more flexibility in selecting step sizes. This can be used to adjust step size dynamically close to events or times of interest. In some models, there are complex behaviors during certain parts of the life of the system that require more precise simulation. For example, the knee point in the voltage profile for a discharged battery. This can be done by providing a function (t, x)->dt instead of a scalar `dt`. \n",
+ "\n",
+ "For example, if a user wanted to reduce the step size closer to impact, they could do so like this:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def next_time(t, x):\n",
+ " # In this example dt is a function of state. Uses a dt of 1 until impact event state 0.25, then 0.25\n",
+ " event_state = m.event_state(x)\n",
+ " if event_state['impact'] < 0.25:\n",
+ " return 0.25\n",
+ " return 1\n",
+ "\n",
+ "results=m.simulate_to_threshold(dt=next_time, save_freq= 0.25, events='impact')\n",
+ "\n",
+ "print(results.times)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Note that after 8 seconds the step size decreased to 0.25 seconds, as expected."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Parameters"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "All the previous sections used a model with default settings. This is hardly ever the case. A model will have to be configured to represent the actual system. For example, the BatteryCircuit default parameters are for a 18650 battery tested in NASA's SHARP lab. If you're using the model for a system other than that one battery, you will need to update the parameters.\n",
+ "\n",
+ "The parameters available are specific to the system in question. See 3. Included Models for a more detailed description of these. For example, for the BatteryCircuit model, parameters include battery capacity, internal resistance, and other electrical characteristics.\n",
+ "\n",
+ "In this section we will adjust the parameters for the ThrownObject Model, observing how that changes system behavior."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from progpy.models import ThrownObject\n",
+ "m = ThrownObject()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Parameters can be accessed using the `parameters` attribute. For a ThrownObject, here are the parameters:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "print(m.parameters)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Ignoring process and measurement noise for now (that will be described in the next section) and the lumped_parameter (which will be described in 4. Creating new prognostic models), the parameters of interest here are described below:\n",
+ "\n",
+ "* **thrower_height**: The height of the thrower in meters, and therefore the initial height of the thrown object\n",
+ "* **throwing_speed**: The speed at which the ball is thrown vertically (in m/s)\n",
+ "* **g**: Acceleration due to gravity (m/s^2)\n",
+ "* **rho**: Air density (affects drag)\n",
+ "* **A**: Cross-sectional area of the object (affects drag)\n",
+ "* **m**: Mass of the object (affects drag)\n",
+ "* **cd**: Coefficient of drag of the object (affects drag)\n",
+ "\n",
+ "Let's try simulating the path of the object with different throwing speeds."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "results1 = m.simulate_to_threshold(events='impact', dt=0.1, save_freq=0.1)\n",
+ "fig = results1.outputs.plot(title='default')\n",
+ "\n",
+ "m['throwing_speed'] = 10\n",
+ "results2 = m.simulate_to_threshold(events='impact', dt=0.1, save_freq=0.1)\n",
+ "fig = results2.outputs.plot(title='slow')\n",
+ "\n",
+ "m['throwing_speed'] = 80\n",
+ "results3 = m.simulate_to_threshold(events='impact', dt=0.1, save_freq=0.1)\n",
+ "fig = results3.outputs.plot(title='fast')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "You can also set parameters as keyword arguments when instantiating the model, like below. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "m_e = ThrownObject(g=-9.81) # Earth gravity\n",
+ "results_earth = m_e.simulate_to_threshold(events='impact', dt=0.1, save_freq=0.1)\n",
+ "fig = results_earth.outputs.plot(title='Earth')\n",
+ "\n",
+ "m_j = ThrownObject(g=-24.79) # Jupiter gravity\n",
+ "results_jupiter = m_j.simulate_to_threshold(events='impact', dt=0.1, save_freq=0.1)\n",
+ "fig = results_jupiter.outputs.plot(title='Jupiter')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Model parameters are used to configure a model to accurately describe the system of interest.\n",
+ "\n",
+ "For a simple system like the ThrownObject, model parameters are simple and measurable. For most systems, there are many parameters that are difficult to estimate. For these, parameter estimation comes into play. See 2. Parameter Estimation for more details"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Noise\n",
+ "**A version of this section will be added in release v1.8**"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Vectorized Simulation\n",
+ "**A version of this section will be added in release v1.8**"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Configuring Simulation\n",
+ "**A version of this notebook will be added in release v1.8, including:**\n",
+ "* t0, x\n",
+ "* integration_method"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3.12.0 64-bit",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.13.0"
+ },
+ "orig_nbformat": 4,
+ "vscode": {
+ "interpreter": {
+ "hash": "aee8b7b246df8f9039afb4144a1f6fd8d2ca17a180786b69acc140d282b71a49"
+ }
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/docs/_downloads/a53af2a664a02d85e1a95ed94e2629b1/noise.py b/docs/_downloads/a53af2a664a02d85e1a95ed94e2629b1/noise.py
index ddc3b9a..2b06be0 100644
--- a/docs/_downloads/a53af2a664a02d85e1a95ed94e2629b1/noise.py
+++ b/docs/_downloads/a53af2a664a02d85e1a95ed94e2629b1/noise.py
@@ -17,7 +17,7 @@ def future_load(t=None, x=None):
# Define configuration for simulation
config = {
- 'threshold_keys': 'impact', # Simulate until the thrown object has impacted the ground
+ 'events': 'impact', # Simulate until the thrown object has impacted the ground
'dt': 0.005, # Time step (s)
'save_freq': 0.5, # Frequency at which results are saved (s)
}
diff --git a/docs/_downloads/b0f882b08ad06c468dcd7cc5ad370a58/sim_battery_eol.py b/docs/_downloads/b0f882b08ad06c468dcd7cc5ad370a58/sim_battery_eol.py
index d1fcaef..ad4de0f 100644
--- a/docs/_downloads/b0f882b08ad06c468dcd7cc5ad370a58/sim_battery_eol.py
+++ b/docs/_downloads/b0f882b08ad06c468dcd7cc5ad370a58/sim_battery_eol.py
@@ -37,7 +37,7 @@ def future_loading(t, x=None):
options = {
'save_freq': 1000, # Frequency at which results are saved
'dt': 2, # Timestep
- 'threshold_keys': ['InsufficientCapacity'], # Simulate to InsufficientCapacity
+ 'events': 'InsufficientCapacity', # Simulate to InsufficientCapacity
'print': True
}
simulated_results = batt.simulate_to_threshold(future_loading, **options)
diff --git a/docs/_downloads/c416c00aa2e02950fa0b0b4547cf49f9/new_model.py b/docs/_downloads/c416c00aa2e02950fa0b0b4547cf49f9/new_model.py
index afd9053..46fbb3f 100644
--- a/docs/_downloads/c416c00aa2e02950fa0b0b4547cf49f9/new_model.py
+++ b/docs/_downloads/c416c00aa2e02950fa0b0b4547cf49f9/new_model.py
@@ -75,7 +75,7 @@ def future_load(t, x=None):
# Step 3: Simulate to impact
event = 'impact'
- simulated_results = m.simulate_to_threshold(future_load, threshold_keys=[event], dt=0.005, save_freq=1, print = True)
+ simulated_results = m.simulate_to_threshold(future_load, events=event, dt=0.005, save_freq=1, print = True)
# Print flight time
print('The object hit the ground in {} seconds'.format(round(simulated_results.times[-1],2)))
@@ -86,16 +86,16 @@ def future_load(t, x=None):
# The first way to change the configuration is to pass in your desired config into construction of the model
m = ThrownObject(g = grav_moon)
- simulated_moon_results = m.simulate_to_threshold(future_load, threshold_keys=[event], dt=0.005, save_freq=1)
+ simulated_moon_results = m.simulate_to_threshold(future_load, events=event, dt=0.005, save_freq=1)
grav_mars = -3.711
# You can also update the parameters after it's constructed
m.parameters['g'] = grav_mars
- simulated_mars_results = m.simulate_to_threshold(future_load, threshold_keys=[event], dt=0.005, save_freq=1)
+ simulated_mars_results = m.simulate_to_threshold(future_load, events=event, dt=0.005, save_freq=1)
grav_venus = -8.87
m.parameters['g'] = grav_venus
- simulated_venus_results = m.simulate_to_threshold(future_load, threshold_keys=[event], dt=0.005, save_freq=1)
+ simulated_venus_results = m.simulate_to_threshold(future_load, events=event, dt=0.005, save_freq=1)
print('Time to hit the ground: ')
print('\tvenus: {}s'.format(round(simulated_venus_results.times[-1],2)))
@@ -103,7 +103,7 @@ def future_load(t, x=None):
print('\tmars: {}s'.format(round(simulated_mars_results.times[-1],2)))
print('\tmoon: {}s'.format(round(simulated_moon_results.times[-1],2)))
- # We can also simulate until any event is met by neglecting the threshold_keys argument
+ # We can also simulate until any event is met by neglecting the events argument
simulated_results = m.simulate_to_threshold(future_load, dt=0.005, save_freq=1)
threshs_met = m.threshold_met(simulated_results.states[-1])
for (key, met) in threshs_met.items():
diff --git a/docs/_downloads/cadc512c2baefeca720f0906f97221af/uav_dynamics_model.py b/docs/_downloads/cadc512c2baefeca720f0906f97221af/uav_dynamics_model.py
index 8109242..3b7fea9 100644
--- a/docs/_downloads/cadc512c2baefeca720f0906f97221af/uav_dynamics_model.py
+++ b/docs/_downloads/cadc512c2baefeca720f0906f97221af/uav_dynamics_model.py
@@ -41,8 +41,8 @@ def run_example():
# Generate trajectory
# =====================
# Generate trajectory object and pass the route (waypoints, ETA) to it
- traj = Trajectory(lat=lat_deg * np.pi/180.0,
- lon=lon_deg * np.pi/180.0,
+ traj = Trajectory(lat=lat_deg,
+ lon=lon_deg,
alt=alt_ft * 0.3048,
etas=time_unix)
@@ -74,8 +74,8 @@ def run_example():
# Generate trajectory object and pass the route (lat/lon/alt, no ETAs)
# and speed information to it
- traj_speed = Trajectory(lat=lat_deg * np.pi/180.0,
- lon=lon_deg * np.pi/180.0,
+ traj_speed = Trajectory(lat=lat_deg,
+ lon=lon_deg,
alt=alt_ft * 0.3048,
cruise_speed=8.0,
ascent_speed=2.0,
diff --git a/docs/_downloads/cb37c25b3e11f6fe8987e558bb775c53/06_Combining Models.ipynb b/docs/_downloads/cb37c25b3e11f6fe8987e558bb775c53/06_Combining Models.ipynb
new file mode 100644
index 0000000..bd30376
--- /dev/null
+++ b/docs/_downloads/cb37c25b3e11f6fe8987e558bb775c53/06_Combining Models.ipynb
@@ -0,0 +1,861 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Combining Prognostic Models"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "This section demonstrates how prognostic models can be combined. There are two times in which this is useful: \n",
+ "\n",
+ "1. When combining multiple models of different inter-related systems into one system-of-system model (i.e., [Composite Models](https://nasa.github.io/progpy/api_ref/prog_models/CompositeModel.html)), or\n",
+ "2. Combining multiple models of the same system to be simulated together and aggregated (i.e., [Ensemble Models](https://nasa.github.io/progpy/api_ref/prog_models/EnsembleModel.html) or [Mixture of Expert Models](https://nasa.github.io/progpy/api_ref/progpy/MixtureOfExperts.html)). This is generally done to improve the accuracy of prediction when you have multiple models that each represent part of the behavior or represent a distribution of different behaviors. \n",
+ "\n",
+ "These two methods for combining models are described in the following sections."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Composite Model"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "A CompositeModel is a PrognosticsModel that is composed of multiple PrognosticsModels. This is a tool for modeling system-of-systems. i.e., interconnected systems, where the behavior and state of one system affects the state of another system. The composite prognostics models are connected using defined connections between the output or state of one model, and the input of another model. The resulting CompositeModel behaves as a single model.\n",
+ "\n",
+ "To illustrate this, we will create a composite model of an aircraft's electric powertrain, combining the DCMotor, ESC, and PropellerLoad models. The Electronic Speed Controller (ESC) converts a commanded duty (i.e., throttle) to signals to the motor. The motor then acts on the signals from the ESC to spin the load, which enacts a torque on the motor (in this case from air resistence).\n",
+ "\n",
+ "First we will import the used models, and the CompositeModel class"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from progpy.models import DCMotor, ESC, PropellerLoad\n",
+ "from progpy import CompositeModel"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Next we will initiate objects of the individual models that will later create the composite powertrain model."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "m_motor = DCMotor()\n",
+ "m_esc = ESC()\n",
+ "m_load = PropellerLoad()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Next we have to define the connections between the systems. Let's first define the connections from the DCMotor to the propeller load. For this, we'll need to look at the DCMotor states and understand how they influence the PropellerLoad inputs."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "print('motor states: ', m_motor.states)\n",
+ "print('load inputs: ', m_load.inputs)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Each of the states and inputs are described in the model documentation at [DC Motor Docs](https://nasa.github.io/progpy/api_ref/prog_models/IncludedModels.html#dc-motor) and [Propeller Docs](https://nasa.github.io/progpy/api_ref/prog_models/IncludedModels.html#propellerload)\n",
+ "\n",
+ "From reading the documentation we understand that the propeller's velocity is from the motor, so we can define the first connection:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "connections = [\n",
+ " ('DCMotor.v_rot', 'PropellerLoad.v_rot')\n",
+ "]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Connections are defined as couples where the first value is the input for the second value. The connection above tells the composite model to feed the DCMotor's v_rot into the PropellerLoad's input v_rot.\n",
+ "\n",
+ "Next, let's look at the connections the other direction, from the load to the motor."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "print('load states: ', m_load.states)\n",
+ "print('motor inputs: ', m_motor.inputs)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We know here that the load on the motor is from the propeller load, so we can add that connection. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "connections.append(('PropellerLoad.t_l', 'DCMotor.t_l'))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Now we will repeat the exercise with the DCMotor and ESC."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "print('ESC states: ', m_esc.states)\n",
+ "print('motor inputs: ', m_motor.inputs)\n",
+ "connections.append(('ESC.v_a', 'DCMotor.v_a'))\n",
+ "connections.append(('ESC.v_b', 'DCMotor.v_b'))\n",
+ "connections.append(('ESC.v_c', 'DCMotor.v_c'))\n",
+ "\n",
+ "print('motor states: ', m_motor.states)\n",
+ "print('ESC inputs: ', m_esc.inputs)\n",
+ "connections.append(('DCMotor.theta', 'ESC.theta'))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Now we are ready to combine the models. We create a composite model with the inidividual models and the defined connections."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "m_powertrain = CompositeModel(\n",
+ " (m_esc, m_load, m_motor), \n",
+ " connections=connections)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The resulting model includes two inputs, ESC voltage (from the battery) and duty (i.e., commanded throttle). These are the only two inputs not connected internally from the original three models. The states are a combination of all the states of every system. Finally, the outputs are a combination of all the outputs from each of the individual systems. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "print('inputs: ', m_powertrain.inputs)\n",
+ "print('states: ', m_powertrain.states)\n",
+ "print('outputs: ', m_powertrain.outputs)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Frequently users only want a subset of the outputs from the original model. For example, in this case you're unlikely to be measuring the individual voltages from the ESC. Outputs can be specified when creating the composite model. For example:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "m_powertrain = CompositeModel(\n",
+ " (m_esc, m_load, m_motor), \n",
+ " connections=connections,\n",
+ " outputs={'DCMotor.v_rot', 'DCMotor.theta'})\n",
+ "print('outputs: ', m_powertrain.outputs)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Now the outputs are only DCMotor angle and velocity.\n",
+ "\n",
+ "The resulting model can be used in simulation, state estimation, and prediction the same way any other model would be, as demonstrated below:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "load = m_powertrain.InputContainer({\n",
+ " 'ESC.duty': 1, # 100% Throttle\n",
+ " 'ESC.v': 23\n",
+ " })\n",
+ "def future_loading(t, x=None):\n",
+ " return load\n",
+ "\n",
+ "simulated_results = m_powertrain.simulate_to(1, future_loading, dt=2.5e-5, save_freq=2e-2)\n",
+ "fig = simulated_results.outputs.plot(compact=False, keys=['DCMotor.v_rot'], ylabel='Velocity')\n",
+ "fig = simulated_results.states.plot(keys=['DCMotor.i_b', 'DCMotor.i_c', 'DCMotor.i_a'], ylabel='ESC Currents')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Parameters in composed models can be updated directly using the model_name.parameter name parameter of the composite model. Like so:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "m_powertrain.parameters['PropellerLoad.D'] = 1"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Here we updated the propeller diameter to 1, greatly increasing the load on the motor. You can see this in the updated simulation outputs (below). When compared to the original results above you will find that the maximum velocity is lower. This is expected given the larger propeller load."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "simulated_results = m_powertrain.simulate_to(1, future_loading, dt=2.5e-5, save_freq=2e-2)\n",
+ "fig = simulated_results.outputs.plot(compact=False, keys=['DCMotor.v_rot'], ylabel='Velocity')\n",
+ "fig = simulated_results.states.plot(keys=['DCMotor.i_b', 'DCMotor.i_c', 'DCMotor.i_a'], ylabel='ESC Currents')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Note: A function can be used to perform simple transitions between models. For example, if you wanted to multiply the torque by 1.1 to represent some gearing or additional load, that could be done by defining a function, as follows"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def torque_multiplier(t_l):\n",
+ " return t_l * 1.1"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The function is referred to as 'function' by the composite model. So we can add the function into the connections as follows. Note that the argument name is used for the input of the function and 'return' is used to signify the function's return value. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "connections = [\n",
+ " ('PropellerLoad.t_l', 'function.t_l'),\n",
+ " ('function.return', 'DCMotor.t_l')\n",
+ "]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Now let's add back in the other connections and build the composite model"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "connections.extend([\n",
+ " ('ESC.v_a', 'DCMotor.v_a'),\n",
+ " ('ESC.v_b', 'DCMotor.v_b'),\n",
+ " ('ESC.v_c', 'DCMotor.v_c'),\n",
+ " ('DCMotor.theta', 'ESC.theta'),\n",
+ " ('DCMotor.v_rot', 'PropellerLoad.v_rot')\n",
+ "])\n",
+ "m_powertrain = CompositeModel(\n",
+ " (m_esc, m_load, m_motor, torque_multiplier), \n",
+ " connections=connections,\n",
+ " outputs={'DCMotor.v_rot', 'DCMotor.theta'})\n",
+ "simulated_results = m_powertrain.simulate_to(1, future_loading, dt=2.5e-5, save_freq=2e-2)\n",
+ "fig = simulated_results.outputs.plot(compact=False, keys=['DCMotor.v_rot'], ylabel='Velocity')\n",
+ "fig = simulated_results.states.plot(keys=['DCMotor.i_b', 'DCMotor.i_c', 'DCMotor.i_a'], ylabel='ESC Currents')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Note that you can also have functions with more than one argument. If you dont connect the arguments of the function to some model, it will show up in the inputs of the composite model."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Ensemble Model"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "An ensemble model is an approach to modeling where one or more models of the same system are simulated together and then aggregated into a single prediction. This can be multiple versions of the same model with different parameters, or different models of the same system representing different parts of the system's behavior. This is generally done to improve the accuracy of prediction when you have multiple models that each represent part of the behavior or represent a distribution of different behaviors.\n",
+ "\n",
+ "In ensemble models, aggregation occurs in two steps, at state transition and then output, event state, threshold met, or performance metric calculation. At each state transition, the states from each aggregate model are combined based on the defined aggregation method. When calling output, the resulting outputs from each aggregate model are similarily combined. The default method is mean, but the user can also choose to use a custom aggregator.\n",
+ "\n",
+ "![Aggregation](img/aggregation.png)\n",
+ "\n",
+ "To illustrate this, let's create an example where there we have four equivalent circuit models, each with different configuration parameters, below. These represent the range of possible configurations expected for our example system."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from progpy.models import BatteryCircuit\n",
+ "m_circuit = BatteryCircuit()\n",
+ "m_circuit_2 = BatteryCircuit(qMax = 7860)\n",
+ "m_circuit_3 = BatteryCircuit(qMax = 6700, Rs = 0.055)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Let's create an EnsembleModel which combines each of these."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from progpy import EnsembleModel\n",
+ "m_ensemble = EnsembleModel(\n",
+ " models=(m_circuit, m_circuit_2, m_circuit_3))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Now let's evaluate the performance of the combined model using real battery data from NASA's prognostic data repository. See 07. Datasets for more detail on accessing data from this repository"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from progpy.datasets import nasa_battery\n",
+ "data = nasa_battery.load_data(batt_id=8)[1]\n",
+ "RUN_ID = 0\n",
+ "test_input = [{'i': i} for i in data[RUN_ID]['current']]\n",
+ "test_time = data[RUN_ID]['relativeTime']"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "To evaluate the model we first create a future loading function that uses the loading from the data."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def future_loading(t, x=None):\n",
+ " for i, mission_time in enumerate(test_time):\n",
+ " if mission_time > t:\n",
+ " return m_circuit.InputContainer(test_input[i])\n",
+ " return m_circuit.InputContainer(test_input[-1]) # Default - last load"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "t_end = test_time.iloc[-1]\n",
+ "from matplotlib import pyplot as plt"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Next we will simulate the ensemble model"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "t_end = test_time.iloc[-1]\n",
+ "results_ensemble = m_ensemble.simulate_to(t_end, future_loading)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Finally, we compare the voltage predicted by the ensemble model with the ground truth from dataset."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from matplotlib import pyplot as plt\n",
+ "fig = plt.plot(test_time, data[RUN_ID]['voltage'], color='green', label='ground truth')\n",
+ "fig = plt.plot(results_ensemble.times, [z['v'] for z in results_ensemble.outputs], color='red', label='ensemble')\n",
+ "plt.xlabel('Time (s)')\n",
+ "plt.ylabel('Voltage')\n",
+ "plt.legend()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The ensemble model actually performs pretty poorly here. This is mostly because there's an outlier model (m_circuit_3). This can be resolved using a different aggregation method. By default, aggregation uses the mean. Let's update the ensemble model to use median and resimulate"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import numpy as np\n",
+ "m_ensemble['aggregation_method'] = np.median\n",
+ "\n",
+ "results_ensemble_median = m_ensemble.simulate_to(t_end, future_loading)\n",
+ "fig = plt.plot(results_ensemble_median.times, [z['v'] for z in results_ensemble_median.outputs], color='orange', label='ensemble -median')\n",
+ "fig = plt.plot(test_time, data[RUN_ID]['voltage'], color='green', label='ground truth')\n",
+ "fig = plt.plot(results_ensemble.times, [z['v'] for z in results_ensemble.outputs], color='red', label='ensemble')\n",
+ "plt.xlabel('Time (s)')\n",
+ "plt.ylabel('Voltage')\n",
+ "plt.legend()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Much better!"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The same ensemble approach can be used with a heterogeneous set of models that have different states.\n",
+ "\n",
+ "Here we will repeat the exercise using the battery electrochemisty and equivalent circuit models. The two models share one state in common (tb), but otherwise are different"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from progpy.models import BatteryElectroChemEOD\n",
+ "m_electro = BatteryElectroChemEOD(qMobile=7800)\n",
+ "\n",
+ "print('Electrochem states: ', m_electro.states)\n",
+ "print('Equivalent Circuit States', m_circuit.states)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Now let's create an ensemble model combining these and evaluate it."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "m_ensemble = EnsembleModel((m_circuit, m_electro))\n",
+ "results_ensemble = m_ensemble.simulate_to(t_end, future_loading)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "To compare these results, let's also simulate the two models that comprise the ensemble model."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "results_circuit1 = m_circuit.simulate_to(t_end, future_loading)\n",
+ "results_electro = m_electro.simulate_to(t_end, future_loading)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The results of each of these are plotted below."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "plt.figure()\n",
+ "plt.plot(results_circuit1.times, [z['v'] for z in results_circuit1.outputs], color='blue', label='circuit')\n",
+ "plt.plot(results_electro.times, [z['v'] for z in results_electro.outputs], color='red', label='electro chemistry')\n",
+ "plt.plot(results_ensemble.times, [z['v'] for z in results_ensemble.outputs], color='yellow', label='ensemble')\n",
+ "plt.plot(test_time, data[RUN_ID]['voltage'], color='green', label='ground truth')\n",
+ "plt.legend()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Note that the result may not be exactly between the other two models. This is because of aggregation is done in 2 steps: at state transition and then at output calculation.\n",
+ "\n",
+ "Ensemble models can be further extended to include an aggregator that selects the best model at any given time. That feature is described in the following section."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Mixture of Experts (MoE)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Mixture of Experts (MoE) models combine multiple models of the same system, similar to Ensemble models. Unlike Ensemble Models, the aggregation is done by selecting the \"best\" model. That is the model that has performed the best over the past. Each model will have a 'score' that is tracked in the state, and this determines which model is best.\n",
+ "\n",
+ "To demonstrate this feature we will repeat the example from the ensemble model section, this time with a mixture of experts model. For this example to work you will have had to have run the ensemble model section example.\n",
+ "\n",
+ "First, let's combine the three battery circuit models into a single mixture of experts model."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from progpy import MixtureOfExpertsModel\n",
+ "m = MixtureOfExpertsModel((m_circuit_3, m_circuit_2, m_circuit))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The combined model has the same outputs and events as the circuit model. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "print(m.outputs)\n",
+ "print(m.events)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Its states contain all of the states of each model, kept separate. Each individual model comprising the MoE model will be simulated separately, so the model keeps track of the states propogated through each model separately. The states also include scores for each model."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "print(m.states)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The MoE model inputs include both the comprised model input, `i` (current) and outputs: `v` (voltage) and `t`(temperature). The comprised model outputs are provided to update the scores of each model when performing state transition. If they are not provided when calling next_state, then scores would not be updated."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "print(m.inputs)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Now let's evaluate the performance of the combined model using real battery data from NASA's prognostic data repository, downloaded in the previous sections. See 07. Datasets for more detail on accessing data from this repository.\n",
+ "\n",
+ "To evaluate the model we first create a future loading function that uses the loading from the data."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "results_moe = m.simulate_to(t_end, future_loading)\n",
+ "fig = plt.plot(test_time, data[RUN_ID]['voltage'], color='green', label='ground truth')\n",
+ "fig = plt.plot(results_moe.times, [z['v'] for z in results_moe.outputs], color='red', label='ensemble')\n",
+ "plt.xlabel('Time (s)')\n",
+ "plt.ylabel('Voltage')\n",
+ "plt.legend()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Here the model performs pretty poorly. If you were to look at the state, we see that the three scores are equal. This is because we haven't provided any output information. The future load function doesn't include the output, just the input (`i`). When the three scores are equal like this, the first model is used."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "print('Model 1 Score: ', results_moe.states[-1]['BatteryCircuit._score'])\n",
+ "print('Model 2 Score: ', results_moe.states[-1]['BatteryCircuit_2._score'])\n",
+ "print('Model 3 Score: ', results_moe.states[-1]['BatteryCircuit_3._score'])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Now let's provide the output for a few steps."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "x0 = m.initialize()\n",
+ "x = m.next_state(\n",
+ " x=x0, \n",
+ " u=m.InputContainer({\n",
+ " 'i': test_input[0]['i'],\n",
+ " 'v': data[RUN_ID]['voltage'][0],\n",
+ " 't': data[RUN_ID]['temperature'][0]}),\n",
+ " dt=test_time[1]-test_time[0])\n",
+ "x = m.next_state(\n",
+ " x=x, \n",
+ " u=m.InputContainer({\n",
+ " 'i': test_input[1]['i'],\n",
+ " 'v': data[RUN_ID]['voltage'][1],\n",
+ " 't': data[RUN_ID]['temperature'][1]}),\n",
+ " dt=test_time[1]-test_time[0])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Let's take a look at the model scores again"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "print('Model 1 Score: ', x['BatteryCircuit._score'])\n",
+ "print('Model 2 Score: ', x['BatteryCircuit_2._score'])\n",
+ "print('Model 3 Score: ', x['BatteryCircuit_3._score'])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Here we see after a few steps the algorithm has determined that model 3 is the better fitting of the models. Now if we were to repeat the simulation, it would use the best model, resulting in a better fit. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "results_moe = m.simulate_to(t_end, future_loading, t0=test_time[1]-test_time[0], x=x)\n",
+ "fig = plt.plot(test_time[2:], data[RUN_ID]['voltage'][2:], color='green', label='ground truth')\n",
+ "fig = plt.plot(results_moe.times[2:], [z['v'] for z in results_moe.outputs][2:], color='red', label='moe')\n",
+ "plt.xlabel('Time (s)')\n",
+ "plt.ylabel('Voltage')\n",
+ "plt.legend()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The fit here is much better. The MoE model learned which of the three models best fit the observed behavior.\n",
+ "\n",
+ "In a prognostic application, the scores will be updated each time you use a state estimator (so long as you provide the output as part of the input). Then when performing a prediction the scores aren't updated, since outputs are not known.\n",
+ "\n",
+ "An example of when this would be useful is for cases where there are three common degradation paths or \"modes\" rather than a single model with uncertainty to represent every mode, the three modes can be represented by three different models. Once enough of the degradation path has been observed the observed mode will be the one reported.\n",
+ "\n",
+ "If the model fit is expected to be stable (that is, the best model is not expected to change anymore). The best model can be extracted and used directly, like demonstrated below."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "name, m_best = m.best_model(x)\n",
+ "print(name, \" was the best fit\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Conclusions"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "In this section we demonstrated a few methods for treating multiple models as a single model. This is of interest when there are multiple models of different systems which are interdependent (CompositeModel), multiple models of the same system that portray different parts of the behavior or different candidate representations (EnsembleModel), or multiple models of the same system that represent possible degradation modes (MixtureOfExpertModel)."
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3.11.0 64-bit",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.13.0"
+ },
+ "orig_nbformat": 4,
+ "vscode": {
+ "interpreter": {
+ "hash": "aee8b7b246df8f9039afb4144a1f6fd8d2ca17a180786b69acc140d282b71a49"
+ }
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/docs/_downloads/dc9f2bf22220701a3b515335cb4d8038/full_lstm_model.py b/docs/_downloads/dc9f2bf22220701a3b515335cb4d8038/full_lstm_model.py
index dfa6829..b8f36b4 100644
--- a/docs/_downloads/dc9f2bf22220701a3b515335cb4d8038/full_lstm_model.py
+++ b/docs/_downloads/dc9f2bf22220701a3b515335cb4d8038/full_lstm_model.py
@@ -30,11 +30,11 @@ def future_loading(t, x=None):
# Step 1: Generate additional data
# We will use data generated above, but we also want data at additional timesteps
print('Generating data...')
- data = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP, dt=TIMESTEP)
- data_half = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP/2, dt=TIMESTEP/2)
- data_quarter = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP/4, dt=TIMESTEP/4)
- data_twice = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP*2, dt=TIMESTEP*2)
- data_four = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP*4, dt=TIMESTEP*4)
+ data = m.simulate_to_threshold(future_loading, events='impact', save_freq=TIMESTEP, dt=TIMESTEP)
+ data_half = m.simulate_to_threshold(future_loading, events='impact', save_freq=TIMESTEP/2, dt=TIMESTEP/2)
+ data_quarter = m.simulate_to_threshold(future_loading, events='impact', save_freq=TIMESTEP/4, dt=TIMESTEP/4)
+ data_twice = m.simulate_to_threshold(future_loading, events='impact', save_freq=TIMESTEP*2, dt=TIMESTEP*2)
+ data_four = m.simulate_to_threshold(future_loading, events='impact', save_freq=TIMESTEP*4, dt=TIMESTEP*4)
# Step 2: Data Prep
# We need to add the timestep as a input
@@ -109,8 +109,8 @@ def future_loading3(t, x = None):
# Use new dt, not used in training
# Using a dt not used in training will demonstrate the model's
# ability to handle different timesteps not part of training set
- data = m.simulate_to_threshold(future_loading, threshold_keys='impact', dt=TIMESTEP*3, save_freq=TIMESTEP*3)
- results3 = m2.simulate_to_threshold(future_loading3, threshold_keys='impact', dt=TIMESTEP*3, save_freq=TIMESTEP*3)
+ data = m.simulate_to_threshold(future_loading, events='impact', dt=TIMESTEP*3, save_freq=TIMESTEP*3)
+ results3 = m2.simulate_to_threshold(future_loading3, events='impact', dt=TIMESTEP*3, save_freq=TIMESTEP*3)
# Step 6: Compare Results
print('Comparing results...')
diff --git a/docs/_downloads/e7ee62f41f59a26f3cd93b4b0497cc90/lstm_model.py b/docs/_downloads/e7ee62f41f59a26f3cd93b4b0497cc90/lstm_model.py
index e012368..7968caf 100644
--- a/docs/_downloads/e7ee62f41f59a26f3cd93b4b0497cc90/lstm_model.py
+++ b/docs/_downloads/e7ee62f41f59a26f3cd93b4b0497cc90/lstm_model.py
@@ -36,7 +36,7 @@ def run_example():
def future_loading(t, x=None):
return m.InputContainer({}) # No input for thrown object
- data = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP, dt=TIMESTEP)
+ data = m.simulate_to_threshold(future_loading, events='impact', save_freq=TIMESTEP, dt=TIMESTEP)
# Step 2: Generate model
# We'll use the LSTMStateTransitionModel class to generate a model
@@ -91,10 +91,10 @@ def future_loading2(t, x=None):
# We will use data generated above, but we also want data at additional timesteps
print('\n------------------------------------------\nExample 2...')
print('Generating additional data...')
- data_half = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP/2, dt=TIMESTEP/2)
- data_quarter = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP/4, dt=TIMESTEP/4)
- data_twice = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP*2, dt=TIMESTEP*2)
- data_four = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP*4, dt=TIMESTEP*4)
+ data_half = m.simulate_to_threshold(future_loading, events='impact', save_freq=TIMESTEP/2, dt=TIMESTEP/2)
+ data_quarter = m.simulate_to_threshold(future_loading, events='impact', save_freq=TIMESTEP/4, dt=TIMESTEP/4)
+ data_twice = m.simulate_to_threshold(future_loading, events='impact', save_freq=TIMESTEP*2, dt=TIMESTEP*2)
+ data_four = m.simulate_to_threshold(future_loading, events='impact', save_freq=TIMESTEP*4, dt=TIMESTEP*4)
# Step 2: Data Prep
# We need to add the timestep as a input
diff --git a/docs/_images/ProgPyComponents.png b/docs/_images/ProgPyComponents.png
new file mode 100644
index 0000000..e3c1656
Binary files /dev/null and b/docs/_images/ProgPyComponents.png differ
diff --git a/docs/_sources/api_ref/progpy.rst.txt b/docs/_sources/api_ref/progpy.rst.txt
new file mode 100644
index 0000000..ea4c6cf
--- /dev/null
+++ b/docs/_sources/api_ref/progpy.rst.txt
@@ -0,0 +1,15 @@
+ProgPy API Reference
+==================================
+
+.. raw:: html
+
+
+
+.. toctree::
+ :glob:
+
+ progpy/*
+
+.. image:: ../images/prog_algs_UML.png
+
+.. image:: ../images/prog_models_UML.png
diff --git a/docs/_sources/api_ref/progpy/CompositeModel.rst.txt b/docs/_sources/api_ref/progpy/CompositeModel.rst.txt
new file mode 100644
index 0000000..625e199
--- /dev/null
+++ b/docs/_sources/api_ref/progpy/CompositeModel.rst.txt
@@ -0,0 +1,5 @@
+CompositeModel
+================
+
+.. autoclass:: progpy.CompositeModel
+ :show-inheritance:
diff --git a/docs/_sources/api_ref/progpy/DataModel.rst.txt b/docs/_sources/api_ref/progpy/DataModel.rst.txt
new file mode 100644
index 0000000..d3a490c
--- /dev/null
+++ b/docs/_sources/api_ref/progpy/DataModel.rst.txt
@@ -0,0 +1,86 @@
+DataModel
+=============
+
+The :py:class:`DataModel` class is the base class for all data-based models. It is a subclass of :py:class:`PrognosticsModel`, allowing it to be used interchangeably with physics-based models.
+
+.. .. contents::
+.. :backlinks: top
+
+Examples:
+
+* :download:`examples.lstm_model <../../../../progpy/examples/lstm_model.py>`
+* :download:`examples.full_lstm_model <../../../../progpy/examples/full_lstm_model.py>`
+* :download:`examples.custom_model <../../../../progpy/examples/custom_model.py>`
+
+Training DataModels
+-----------------------
+There are a few ways to construct a :py:class:`DataModel` object, described below.
+
+From Data
+*************************************************
+This is the most common way to construct a :py:class:`DataModel` object, using the :py:func:`DataModel.from_data` method. It involves using one or more runs of data to train the model. Each DataModel class expects different data from the following set: times, inputs, states, outputs, and event_states. See documentation for the specific algorithm to see what it expects. Below is an example if it's use with the LSTMStateTransitionModel, which expects inputs and outputs.
+
+.. dropdown:: example
+
+ .. code-block:: python
+
+ >>> from progpy.models import LSTMStateTransitionModel
+ >>> input_data = [run1.inputs, run2.inputs, run3.inputs]
+ >>> output_data = [run1.outputs, run2.outputs, run3.outputs]
+ >>> m = LSTMStateTransitionModel.from_data(input_data, output_data)
+
+From Another PrognosticsModel (i.e., Surrogate)
+*************************************************
+Surrogate models are constructed using the :py:func:`DataModel.from_model` Class Method. These models are trained using data from the original model, i.e., as a surrogate for the original model. The original model is not modified. Below is an example if it's use. In this example a surrogate (m2) of the original ThrownObject Model (m) is created, and can then be used interchangeably with the original model.
+
+.. dropdown:: example
+
+ .. code-block:: python
+
+ >>> from progpy.models import ThrownObject
+ >>> from progpy.models import LSTMStateTransitionModel
+ >>> m = ThrownObject()
+ >>> def future_loading(t, x=None):
+ >>> return m.InputContainer({}) # No input for thrown object
+ >>> m2 = LSTMStateTransitionModel.from_model(m, future_loading)
+
+.. note::
+
+ Surrogate models are generally less accurate than the original model. This method is used either to create a quicker version of the original model (see :py:class:`DMDModel`) or to test the performance of a :py:class:`DataModel` approach.
+
+.. seealso::
+
+ :py:func:`PrognosticsModel.generate_surrogate`
+
+Using Constructor
+**********************
+This method is the least frequently used, and it is very specific to the :py:class:`DataModel` class being constructed. For example: :py:class:`DMDModel` classes are constructed using the DMD Matrix, and :py:class:`LSTMStateTransitionModel` classes are constructed using a trained Keras Model.
+
+See example :download:`examples.custom_model <../../../../progpy/examples/custom_model.py>`
+
+Included DataModels
+-------------------------
+The following DataModels are included in the package. A new DataModel can be created by subclassing :py:class:`DataModel`, implementing the abstract methods of both :py:class:`DataModel` and :py:class:`PrognosticsModel`.
+
+DMDModel
+**************************
+.. autoclass:: progpy.data_models.DMDModel
+ :members: from_data, from_model
+
+LSTMStateTransitionModel
+**************************
+.. autoclass:: progpy.data_models.LSTMStateTransitionModel
+ :members: from_data, from_model
+
+PolynomialChaosExpansion
+**************************
+.. autoclass:: progpy.data_models.PolynomialChaosExpansion
+ :members: from_data, from_model
+
+DataModel Interface
+---------------------------
+.. autoclass:: progpy.data_models.DataModel
+ :members:
+ :show-inheritance:
+ :inherited-members:
+ :exclude-members: SimulationResults, generate_model, observables, dx, next_state, initialize, output, event_state, threshold_met, apply_process_noise, apply_measurement_noise, apply_limits, performance_metrics
diff --git a/docs/_sources/api_ref/progpy/DataSets.rst.txt b/docs/_sources/api_ref/progpy/DataSets.rst.txt
new file mode 100644
index 0000000..2bb9edb
--- /dev/null
+++ b/docs/_sources/api_ref/progpy/DataSets.rst.txt
@@ -0,0 +1,15 @@
+Datasets
+================================================================
+
+The `progpy` dataset subpackage is used to download labeled prognostics data for use in model building, analysis, or validation. Every dataset comes equipped with a `load_data` function which loads the specified data. Some datasets require a dataset number or id. This indicates the specific data to load from the larger dataset. The format of the data is specific to the dataset downloaded. Details of the specific datasets are summarized below:
+
+.. note:: To use the dataset feature, you must install the requests package.
+
+Variable Load Battery Data (nasa_battery)
+----------------------------------------------------
+.. autofunction:: progpy.datasets.nasa_battery.load_data
+
+CMAPSS Jet Engine Data (nasa_cmapss)
+----------------------------------------------------
+.. autofunction:: progpy.datasets.nasa_cmapss.load_data
+
\ No newline at end of file
diff --git a/docs/_sources/api_ref/progpy/EnsembleModel.rst.txt b/docs/_sources/api_ref/progpy/EnsembleModel.rst.txt
new file mode 100644
index 0000000..20a0268
--- /dev/null
+++ b/docs/_sources/api_ref/progpy/EnsembleModel.rst.txt
@@ -0,0 +1,5 @@
+EnsembleModel
+================
+
+.. autoclass:: progpy.EnsembleModel
+ :show-inheritance:
diff --git a/docs/_sources/api_ref/progpy/IncludedModels.rst.txt b/docs/_sources/api_ref/progpy/IncludedModels.rst.txt
new file mode 100644
index 0000000..1b6c4e2
--- /dev/null
+++ b/docs/_sources/api_ref/progpy/IncludedModels.rst.txt
@@ -0,0 +1,99 @@
+Included Models
+===================
+The progpy package is distributed with a few pre-constructed models that can be used in simulation or prognostics. These models are summarized in the following sections.
+
+.. .. contents::
+.. :backlinks: top
+
+Battery Model
+-------------------------------------------------------------
+
+.. tabs::
+
+ .. tab:: ElectroChem (EOD)
+
+ .. autoclass:: progpy.models.BatteryElectroChemEOD
+
+ .. tab:: ElectroChem (EOL)
+
+ .. autoclass:: progpy.models.BatteryElectroChemEOL
+
+ .. tab:: ElectroChem (Combo)
+
+ .. autoclass:: progpy.models.BatteryElectroChem
+
+ .. autoclass:: progpy.models.BatteryElectroChemEODEOL
+
+ .. tab:: Circuit
+
+ .. autoclass:: progpy.models.BatteryCircuit
+
+
+Pump Model
+-------------------------------------------------------------
+
+There are two variants of the pump model based on if the wear parameters are estimated as part of the state. The models are described below
+
+.. tabs::
+
+ .. tab:: Base Model
+
+ .. autoclass:: progpy.models.CentrifugalPumpBase
+
+ .. tab:: With Wear As State
+
+ .. autoclass:: progpy.models.CentrifugalPump
+
+ .. autoclass:: progpy.models.CentrifugalPumpWithWear
+
+Pneumatic Valve
+-------------------------------------------------------------
+
+There are two variants of the valve model based on if the wear parameters are estimated as part of the state. The models are described below
+
+.. tabs::
+
+ .. tab:: Base Model
+
+ .. autoclass:: progpy.models.PneumaticValveBase
+
+ .. tab:: With Wear As State
+
+ .. autoclass:: progpy.models.PneumaticValve
+
+ .. autoclass:: progpy.models.PneumaticValveWithWear
+
+DC Motor
+-------------------------------------------------------------
+
+.. tabs::
+
+ .. tab:: Single Phase
+
+ .. autoclass:: progpy.models.DCMotorSP
+
+ .. tab:: Triple Phase
+
+ .. autoclass:: progpy.models.DCMotor
+
+ESC
+-------------------------------------------------------------
+.. autoclass:: progpy.models.ESC
+
+Powertrain
+-------------------------------------------------------------
+.. autoclass:: progpy.models.Powertrain
+
+PropellerLoad
+-------------------------------------------------------------
+.. autoclass:: progpy.models.PropellerLoad
+
+Aircraft Models
+-------------------------------------------------------------
+Aircraft model simulate the flight of an aircraft. All aircraft models inherit from :py:class:`progpy.models.aircraft_model.AircraftModel`. Included models are listed below:
+
+.. autoclass:: progpy.models.aircraft_model.SmallRotorcraft
+
+ThrownObject
+-------------------------------------------------------------
+.. autoclass:: progpy.models.ThrownObject
diff --git a/docs/_sources/api_ref/progpy/LinearModel.rst.txt b/docs/_sources/api_ref/progpy/LinearModel.rst.txt
new file mode 100644
index 0000000..dffa2bc
--- /dev/null
+++ b/docs/_sources/api_ref/progpy/LinearModel.rst.txt
@@ -0,0 +1,8 @@
+LinearModel
+=================
+
+.. autoclass:: progpy.LinearModel
+ :members:
+ :inherited-members:
+ :exclude-members: SimulationResults, generate_model, observables
+ :show-inheritance:
diff --git a/docs/_sources/api_ref/progpy/Loading.rst b/docs/_sources/api_ref/progpy/Loading.rst
index bcfeffc..33622de 100644
--- a/docs/_sources/api_ref/progpy/Loading.rst
+++ b/docs/_sources/api_ref/progpy/Loading.rst
@@ -1,7 +1,7 @@
Loading
=========
-The loading subpackage includes some classes for complex load estimation algorithms. See :download:`examples.future_loading <../../../../progpy/examples/future_loading.py>` for more details.
+The loading subpackage includes some classes for complex load estimation algorithms.
Load Estimator Class interface
------------------------------
diff --git a/docs/_sources/api_ref/progpy/Loading.rst.txt b/docs/_sources/api_ref/progpy/Loading.rst.txt
new file mode 100644
index 0000000..bcfeffc
--- /dev/null
+++ b/docs/_sources/api_ref/progpy/Loading.rst.txt
@@ -0,0 +1,46 @@
+Loading
+=========
+
+The loading subpackage includes some classes for complex load estimation algorithms. See :download:`examples.future_loading <../../../../progpy/examples/future_loading.py>` for more details.
+
+Load Estimator Class interface
+------------------------------
+The key aspect of a load estimator is that it needs to be able to be called with either time or time and state. The most common way of accomplishing this is with a function, described in the dropdown below.
+
+.. dropdown:: Functional Load Estimator
+
+ .. code-block:: python
+
+ >>> def load_estimator(t, x = None):
+ >>> # Calculate loading as function of time (t) and state (x)
+ >>> return load
+
+The second approach for load estimators is a load estimation class. This is used to represent complex behavior. The interface for this is described in the dropdown below.
+
+.. dropdown:: Class Load Estimator
+
+ .. code-block:: python
+
+ >>> class LoadEstimator:
+ >>> def __init__(self, *args, **kwargs):
+ >>> # Initialize the load estimator
+ >>> pass
+ >>> def __call__(self, t, x = None):
+ >>> # Calculate loading as function of time (t) and state (x)
+ >>> return load
+
+Load Estimator Classes
+----------------------
+
+.. autoclass:: progpy.loading.Piecewise
+
+.. autoclass:: progpy.loading.MovingAverage
+
+.. autoclass:: progpy.loading.GaussianNoiseWrapper
+
+Controllers
+------------------
+
+.. autoclass:: progpy.loading.controllers.LQR
+
+.. autoclass:: progpy.loading.controllers.LQR_I
diff --git a/docs/_sources/api_ref/progpy/MixtureOfExperts.rst.txt b/docs/_sources/api_ref/progpy/MixtureOfExperts.rst.txt
new file mode 100644
index 0000000..6b6a2b1
--- /dev/null
+++ b/docs/_sources/api_ref/progpy/MixtureOfExperts.rst.txt
@@ -0,0 +1,5 @@
+MixtureOfExperts
+================
+
+.. autoclass:: progpy.MixtureOfExpertsModel
+ :show-inheritance:
diff --git a/docs/_sources/api_ref/progpy/Prediction.rst.txt b/docs/_sources/api_ref/progpy/Prediction.rst.txt
new file mode 100644
index 0000000..b3eba37
--- /dev/null
+++ b/docs/_sources/api_ref/progpy/Prediction.rst.txt
@@ -0,0 +1,20 @@
+Prediction
+=======================
+
+Predictions store the result of a prediction (i.e., returned by the predict method of a predictor). They store values (with uncertainty) at different future times. These are used to store states, inputs, outputs, perfomance metrics, and event states with uncertainty at savepoints.
+
+Two types of predictions are distributed with this package: `Prediction` and `UnweightedSamplesPrediction`, described below. `UnweightedSamplesPrediction` extends `Prediction` to allow some operations specific to cases where each prediction is represented by an UnweightedSamples object (e.g., accessing SimResult for a single sample).
+
+.. tabs::
+ .. tab:: Prediction
+
+ .. autoclass:: progpy.predictors.Prediction
+ :members:
+ :inherited-members:
+
+ .. tab:: UnweightedSamplesPrediction
+
+ .. autoclass:: progpy.predictors.UnweightedSamplesPrediction
+ :members:
+ :inherited-members:
+ :exclude-members: append, extend, clear, pop, remove, reverse, insert
diff --git a/docs/_sources/api_ref/progpy/Predictor.rst.txt b/docs/_sources/api_ref/progpy/Predictor.rst.txt
new file mode 100644
index 0000000..af87404
--- /dev/null
+++ b/docs/_sources/api_ref/progpy/Predictor.rst.txt
@@ -0,0 +1,43 @@
+Predictors
+===========================
+
+The :py:class:`Predictor` uses a state estimate (type :py:class:`UncertainData` subclass, output of a :py:class:`StateEstimator`), information about expected future loading, and a :py:class:`PrognosticsModel` to predict both future states (also outputs, performance metrics, event_states) at predefined points and the time that an event will occur (Time of Event, ToE) with uncertainty.
+
+Here's an example of its use. In this example we use the :py:class:`ThrownObject` model and the :py:class:`MonteCarlo` predictor, and we save the state every 1s. We also use a scalar first state (i.e., no uncertainty).
+
+.. code-block:: python
+
+ >>> from progpy.models import ThrownObject
+ >>> from progpy.predictors import MonteCarlo
+ >>> from progpy.uncertain_data import ScalarData
+ >>>
+ >>> m = ThrownObject()
+ >>> pred = MonteCarlo(m)
+ >>> first_state = ScalarData({'x': 1.7, 'v': 20}) # Initial state for prediction
+ >>> def future_loading(t, x):
+ >>> return {} # ThrownObject doesn't have a way of loading it
+ >>>
+ >>> pred_results = pred.predict(first_state, future_loading, save_freq=1)
+ >>> pred_results.time_of_event.plot_hist(events='impact') # Plot a histogram of when the impact event occurred
+
+See tutorial and examples for more information and additional features.
+
+Included Predictors
+-----------------------
+The following predictors are included with this package. A new predictor can be created by subclassing :py:class:`Predictor`. See also: `predictor_template.py`
+
+.. tabs::
+
+ .. tab:: Monte Carlo Predictor
+
+ .. autoclass:: progpy.predictors.MonteCarlo
+
+ .. tab:: Unscented Transform Predictor
+
+ .. autoclass:: progpy.predictors.UnscentedTransformPredictor
+
+Predictor Interface
+-----------------------
+.. autoclass:: progpy.predictors.Predictor
+ :members:
+ :inherited-members:
diff --git a/docs/_sources/api_ref/progpy/PrognosticModel.rst.txt b/docs/_sources/api_ref/progpy/PrognosticModel.rst.txt
new file mode 100644
index 0000000..7565e4f
--- /dev/null
+++ b/docs/_sources/api_ref/progpy/PrognosticModel.rst.txt
@@ -0,0 +1,7 @@
+PrognosticsModel
+====================
+
+.. autoclass:: progpy.PrognosticsModel
+ :members:
+ :inherited-members:
+ :exclude-members: SimulationResults, generate_model, observables
diff --git a/docs/_sources/api_ref/progpy/SimResult.rst.txt b/docs/_sources/api_ref/progpy/SimResult.rst.txt
new file mode 100644
index 0000000..fee59c9
--- /dev/null
+++ b/docs/_sources/api_ref/progpy/SimResult.rst.txt
@@ -0,0 +1,7 @@
+SimResult
+================================
+
+.. autoclass:: progpy.sim_result.SimResult
+ :members:
+ :inherited-members:
+ :exclude-members: append, reverse, count, insert
diff --git a/docs/_sources/api_ref/progpy/StateEstimator.rst.txt b/docs/_sources/api_ref/progpy/StateEstimator.rst.txt
new file mode 100644
index 0000000..852d459
--- /dev/null
+++ b/docs/_sources/api_ref/progpy/StateEstimator.rst.txt
@@ -0,0 +1,46 @@
+State Estimators
+===========================
+The State Estimator uses sensor information and a Prognostics Model to produce an estimate of system state (which can be used to estimate outputs, event_states, and performance metrics). This state estimate can either be used by itself or as input to a `Predictor `__. A state estimator is typically run each time new information is available.
+
+Here's an example of its use. In this example we use the unscented kalman filter state estimator and the ThrownObject model.
+
+.. code-block:: python
+
+ >>> from progpy.models import ThrownObject
+ >>> from progpy.state_estimators import UnscentedKalmanFilter
+ >>>
+ >>> m = ThrownObject()
+ >>> initial_state = m.initialize()
+ >>> filt = UnscentedKalmanFilter(m, initial_state)
+ >>>
+ >>> load = {} # No load for ThrownObject
+ >>> new_data = {'x': 1.8} # Observed state
+ >>> print('Prior: ', filt.x.mean)
+ >>> filt.estimate(0.1, load, new_data)
+ >>> print('Posterior: ', filt.x.mean)
+
+See tutorial and examples for more information and additional features.
+
+Included State Estimators
+-------------------------
+The following state estimators are included with this package. A new state estimator can be created by subclassing `progpy.state_estimators.StateEstimator`. See also: `state_estimator_template.py`
+
+.. tabs:: progpy
+
+ .. tab:: Particle Filter
+
+ .. autoclass:: progpy.state_estimators.ParticleFilter
+
+ .. tab:: Unscented Kalman Filter
+
+ .. autoclass:: progpy.state_estimators.UnscentedKalmanFilter
+
+ .. tab:: Kalman Filter
+
+ .. autoclass:: progpy.state_estimators.KalmanFilter
+
+State Estimator Interface
+-------------------------
+.. autoclass:: progpy.state_estimators.StateEstimator
+ :members:
+ :inherited-members:
diff --git a/docs/_sources/api_ref/progpy/ToEPredictionProfile.rst.txt b/docs/_sources/api_ref/progpy/ToEPredictionProfile.rst.txt
new file mode 100644
index 0000000..6fd141f
--- /dev/null
+++ b/docs/_sources/api_ref/progpy/ToEPredictionProfile.rst.txt
@@ -0,0 +1,6 @@
+ToEPredictionProfile
+----------------------
+.. autoclass:: progpy.predictors.ToEPredictionProfile
+ :members:
+ :inherited-members:
+ :exclude-members: setdefault
diff --git a/docs/_sources/api_ref/progpy/UncertainData.rst.txt b/docs/_sources/api_ref/progpy/UncertainData.rst.txt
new file mode 100644
index 0000000..863cd13
--- /dev/null
+++ b/docs/_sources/api_ref/progpy/UncertainData.rst.txt
@@ -0,0 +1,28 @@
+Uncertain Data
+=======================
+
+The `progpy.uncertain_data` package includes classes for representing data with uncertainty. All types of UncertainData can be operated on using `the interface <#interface>`__. Inidividual classes for representing uncertain data of different kinds are described below, in `Implemented UncertainData Types <#implemented-uncertaindata-types>`__.
+
+Interface
+------------------------
+.. autoclass:: progpy.uncertain_data.UncertainData
+ :members:
+ :inherited-members:
+
+Implemented UncertainData Types
+--------------------------------
+
+.. tabs::
+
+ .. tab:: Unweighted Samples
+
+ .. autoclass:: progpy.uncertain_data.UnweightedSamples
+ :members: key
+
+ .. tab:: Multivariate Normal Distribution
+
+ .. autoclass:: progpy.uncertain_data.MultivariateNormalDist
+
+ .. tab:: Scalar
+
+ .. autoclass:: progpy.uncertain_data.ScalarData
diff --git a/docs/_sources/api_ref/progpy/Utils.rst.txt b/docs/_sources/api_ref/progpy/Utils.rst.txt
new file mode 100644
index 0000000..ca7757d
--- /dev/null
+++ b/docs/_sources/api_ref/progpy/Utils.rst.txt
@@ -0,0 +1,12 @@
+Utils
+================================================================
+
+There are a number of support functions and classes included in progpy.utils. These utilities are used throughout the package, but are also available for use in your own code.
+
+.. .. contents::
+.. :backlinks: top
+
+Trajectory
+----------------------------------------------------------------
+.. autoclass:: progpy.utils.traj_gen.Trajectory
+ :members: ref_traj, generate
diff --git a/docs/_sources/dev_guide.rst.txt b/docs/_sources/dev_guide.rst.txt
index 1aac98f..2ade525 100644
--- a/docs/_sources/dev_guide.rst.txt
+++ b/docs/_sources/dev_guide.rst.txt
@@ -8,7 +8,7 @@ Developers Guide & Project Plan
npr7150
-This document includes some details relevant for developers working on any of the Python Prognostics Packages Tools (prog_models, prog_algs, prog_server)
+This document includes some details relevant for developers working on any of the Python Prognostics Packages Tools (progpy, prog_server)
Installing from a Branch
------------------------
@@ -17,11 +17,21 @@ To install the package package from a specific branch. First clone the repositor
This command installs the package using the checked-out version.
+Running Tests
+------------------------
+The run the progpy tests, first clone the repository and checkout the branch, installing the package using the command above. Then navigate into the repository directory. Next install the tests required dependencies, by using the following commands:
+ `pip install notebook`
+ `pip install testbook`
+ `pip install requests`
+
+Then run the tests using the following command:
+ `python -m tests`
+
Contributing
---------------
-New external (non-NASA or NASA contractor) developers must complete either the `organizational or individual Contributor License Agreement (CLA) `__.
+New external (non-NASA or NASA contractor) developers must complete either the `organizational or individual Contributor License Agreement (CLA) `__.
-Curious about what needs to be done? Have an idea for a new feature? Find a bug? Check out open issues `prog_models `__, `prog_algs `__, `prog_server `__.
+Curious about what needs to be done? Have an idea for a new feature? Find a bug? Check out open issues `progpy `__, `prog_server `__.
Project Roles
--------------------
@@ -29,11 +39,11 @@ Project Roles
* Software Assurance Officer: Christopher Teubert
* Deputy Software Lead: Katelyn Jarvis
* Software Management Team: Software Lead, Software Assurance Officer, and Deputy Software Lead
-* Developers: See `prog_models developers `__, `prog_algs developers `__, `prog_server developers `_
+* Developers: See `progpy developers `__, `prog_server developers `_
Branching Strategy
------------------
-Our project is following the git strategy described `here `__. Release branches are not required. Details specific to each branch are described below.
+Our project is following the git strategy described `here `__. Release branches are not required. Details specific to each branch are described below. We recommend that developers from within NASA watch `this video ` on git strategies and best practices.
`master`: Every merge into the master branch is done using a pull request (never commiting directly), is assigned a release number, and must complete the release checklist. The release checklist is a software assurance tool.
@@ -75,9 +85,8 @@ A release is the merging of a PR where the target is the master branch.
* Check that each new feature has corresponding tests
* [Complete - checked automatically in PRs to dev] Confirm that every page has the copyright notice
* Confirm added dependencies are at the following:
- * requirements.txt,
* setup.py,
- * the bottom of dev_guide.rst (this document)
+ * the bottom of npr7150.rst
* Confirm that all issues associated with the release have been closed (i.e., requirements have been met) or assigned to another release
* Run unit tests `python -m tests` on the following computer types:
* Apple Silicon Mac
@@ -128,7 +137,12 @@ Post-Release Checklist
* For prog_server: Update openapi specs on `SwaggerHub `__
* Send notes to Software Release Office (SRO) of updated version number
* Publish to PyPi
-* Send Highlight
+* Tag release with DOI
+* Setup release in GitHub
+* Post release in GitHub Discussions
+* Merge doc changes
+* Merge back into dev to get post-release changes
+* Send Highlights - Division, Known Users, LinkedIn, etc.
Notes for Developers
--------------------
@@ -138,14 +152,15 @@ Notes for Developers
* When supplied by or to the user, values with names (e.g., inputs, states, outputs, event_states, event occurance, etc.) should be supplied as dictionaries (or dict-like objects) where they can be referred to by name.
* subpackages shall be independent (i.e., not have any dependencies with the wider package or other subpackages) when possible
* Whenever possible Models, UncertainData types, State Estimators, and Predictors should be interchangable with any other class of the same type (e.g., any model should be interchangable with any other model)
-* Demonstrate common use cases as an example.
+* Demonstrate common use cases as an example.
+* Use collections.abc instead of typing
* Python code should comply with `PEP 8: Python Style Guide `__, where appropriate
* See also: `Writing Clean and Pythonic Code (JPL) `__
* Code should be complient with the recommendations of `LGTM `__, whenever appropriate
* Every feature should be demonstrated in an example
* The most commonly used features should be demonstrated in the tutorial
* Except in the most extreme cases, maintain backwards compatibility for the convenience of existing users
- * If a feature is to be removed, mark it as depreciated for at least 2 releases before removing
+ * If a feature is to be removed, mark it as depreciated (using DeprecationWarning) for at least 1 release before removing unless marked experimental
* Examples are included in the examples/ directory.
* Examples should cover the major use cases and features. If a major new feature is added, make sure there's an example demonstrating the feature.
* For new examples- add to examples __all__ and example tests (tests/test_examples).
@@ -155,7 +170,7 @@ Notes for Developers
* Each new feature should have a test. Check this in each PR review.
* Check test coverage to improve completeness, automatically reported by bot in each PR.
* For tests- make sure test are quality. They should cover expected input ranges, error handling.
- * There are some example models in prog_models.models.test_models which are useful for testing
+ * There are some example models in progpy.models.test_models which are useful for testing
* Documentation
* Documentation is autogenerated using sphinx from progpy repository
* Configuration is in sphinx_config.
@@ -165,7 +180,7 @@ Notes for Developers
* Automated tests are defined in the .github/ directory.
* The repository administrator can add tests to the set required to pass for each PR must be done by .
* Template
- * An empty template of a prognostics model is maintained at `prog_models/prog_model_template.py`.
- * An empty template of a state estimator and predictor is maintained at `prog_algs/state_estimator_template.py` and `prog_algs/predictor_template.py`.
+ * An empty template of a prognostics model is maintained at `progpy/prog_model_template.py`.
+ * An empty template of a state estimator and predictor is maintained at `progpy/state_estimator_template.py` and `progpy/predictor_template.py`.
* Any changes to the basic model setup should be documented there.
* A tutorial is included in tutorial.ipynb. This required Juypter Notebooks. All major features should be illustrated here.
diff --git a/docs/_sources/glossary.rst.txt b/docs/_sources/glossary.rst.txt
index a5275ab..bf32b49 100644
--- a/docs/_sources/glossary.rst.txt
+++ b/docs/_sources/glossary.rst.txt
@@ -4,6 +4,12 @@ Glossary
.. glossary::
:sorted:
+ controller
+ A closed loop future loading method. Calculates future loading as a function of state, like the :py:class:`progpy.loading.controllers.LQR` controller used by the :py:class:`progpy.models.aircraft_model.SmallRotorcraft` model.
+
+ trajectory
+ Path a vehicle takes through space, represented by a set of 4-dimensional points (position + time), represented by the :py:class:`progpy.utils.traj_gen.Trajectory` class.
+
event
Something that can be predicted (e.g., system failure). An event has either occurred or not. See also: :term:`threshold`
@@ -17,7 +23,7 @@ Glossary
Measured sensor values from a system (e.g., voltage and temperature of a battery). Output is frequently denoted by z.
future load
- :term:`input` (i.e., loading) expected to be applied to a system at future times
+ :term:`input` (i.e., loading) expected to be applied to a system at future times. In ProgPy, future load is typically provided as a function of time and state, f(time, state) -> load
performance metric
Performance characteristics of a system that are a function of system state, but are not directly measured.
@@ -29,19 +35,19 @@ Glossary
:term:`state` that is not directly measurable
state estimator
- An algorithm that is used to estimate the :term:`state` of the system, given measurements and a model, defined in the :py:mod:`prog_algs.state_estimators` subpackage (e.g., :py:class:`prog_algs.state_estimators.UnscentedKalmanFilter`).
+ An algorithm that is used to estimate the :term:`state` of the system, given measurements and a model, defined in the :py:mod:`progpy.state_estimators` subpackage (e.g., :py:class:`progpy.state_estimators.UnscentedKalmanFilter`).
predictor
- An algorithm that is used to predict future states, given the initial state, a model, and an estimate of :term:`future load`. E.g., :py:class:`prog_algs.predictors.MonteCarlo`.
+ An algorithm that is used to predict future states, given the initial state, a model, and an estimate of :term:`future load`. E.g., :py:class:`progpy.predictors.MonteCarlo`.
prediction
- A prediction of something (e.g., :term:`input`, :term:`state`, :term:`output`, :term:`event state`, etc.), with uncertainty, at one or more future times, as a result of a :term:`predictor` prediction step (:py:func:`prog_algs.predictors.Predictor.predict`). For example- a prediction of the future :term:`state` of a system at certain specified savepoints, returned from prediction using a :py:class:`prog_algs.predictors.MonteCarlo` predictor.
+ A prediction of something (e.g., :term:`input`, :term:`state`, :term:`output`, :term:`event state`, etc.), with uncertainty, at one or more future times, as a result of a :term:`predictor` prediction step (:py:func:`progpy.predictors.Predictor.predict`). For example- a prediction of the future :term:`state` of a system at certain specified savepoints, returned from prediction using a :py:class:`progpy.predictors.MonteCarlo` predictor.
surrogate
- A model that approximates the behavior of another model. Often used to generate a faster version of a model (e.g., for resource-constrained applications or to be used in optimization) or to test a data model. Generated using :py:func:`prog_models.PrognosticsModel.generate_surrogate` method.
+ A model that approximates the behavior of another model. Often used to generate a faster version of a model (e.g., for resource-constrained applications or to be used in optimization) or to test a data model. Generated using :py:func:`progpy.PrognosticsModel.generate_surrogate` method.
model
- A subclass of :py:class:`prog_models.PrognosticsModel` the describes the behavior of a system. Models are typically physics-based, data-driven (i.e., subclasses of :py:class:`prog_models.data_models.DataModel`), or some hybrid approach (e.g., physics informed machine learning).
+ A subclass of :py:class:`progpy.PrognosticsModel` the describes the behavior of a system. Models are typically physics-based, data-driven (i.e., subclasses of :py:class:`progpy.data_models.DataModel`), or some hybrid approach (e.g., physics informed machine learning).
threshold
The conditions under which an :term:`event` is considered to have occurred.
@@ -50,7 +56,7 @@ Glossary
Prediction of (a) future performance and/or (b) the time at which one or more events of interest occur, for a system or a system of systems
data-driven model
- A model where the behavior is learned from data. In ProgPy, data-driven models derive from the parent class :py:class:`prog_models.data_models.DataModel`. A common example of data-driven models is models using neural networks (e.g., :py:class:`prog_models.data_models.LSTMStateTransitionModel`).
+ A model where the behavior is learned from data. In ProgPy, data-driven models derive from the parent class :py:class:`progpy.data_models.DataModel`. A common example of data-driven models is models using neural networks (e.g., :py:class:`progpy.data_models.LSTMStateTransitionModel`).
physics-based model
A model where behavior is described by the physics of the system. Physics-based models are typically :term:`parameterized`, so that exact behavior of the system can be configured or learned (through parameter estimation).
@@ -68,13 +74,13 @@ Glossary
Noise applied in the user provided :term:`future load` function. This is used to represent uncertainty in how the system is loaded in the future.
state estimation
- State estimation is the process from which the internal model :term:`state` (x) is estimated using :term:`input` (i.e., loading) and :term:`output` (i.e., sensor data). State estimation is necessary for cases where model state isn't directly measurable (i.e., `hidden state`) or where there is sensor noise. Most state estimators estimate the state with some representation of uncertainty. An algorithm that performs state estimation is called a :term:`state estimator` and is included in the prog_algs.state_estimators package
+ State estimation is the process from which the internal model :term:`state` (x) is estimated using :term:`input` (i.e., loading) and :term:`output` (i.e., sensor data). State estimation is necessary for cases where model state isn't directly measurable (i.e., `hidden state`) or where there is sensor noise. Most state estimators estimate the state with some representation of uncertainty. An algorithm that performs state estimation is called a :term:`state estimator` and is included in the progpy.state_estimators package
time of event
The time at which an :term:`event` is predicted to occur (i.e., when :term:`threshold` is reached). Sometimes abbreviated as ToE. When the event of interest is failure, this is frequently referred to as End of Life (EOL).
time to event
- The time remaining until :term:`time of event`. Sometimes abbreviated as TtE. When the :term:`event`` of interest is failure, this is frequently referred to as Remaining Useful Life (RUL). :math:`TtE = ToE - t` where :math:`t` is the current time. Sometimes abbreviated as TtE.
+ The time remaining until :term:`time of event`. Sometimes abbreviated as TtE. When the :term:`event` of interest is failure, this is frequently referred to as Remaining Useful Life (RUL). :math:`TtE = ToE - t` where :math:`t` is the current time. Sometimes abbreviated as TtE.
time of prediction
The time at which a prediction is performed. Sometimes abbreviated as ToP or :math:`t_p`.
@@ -83,13 +89,13 @@ Glossary
The time at which the last measurement was performed that was used for state estimation. Sometimes abbreviated as ToM or :math:`t_m`.
direct-prediction model
- A model where the :term:`time of event` is directly estimated from the current state and/or :term:`future load`, instead of predicted through simulation to threshold. These are implemented using the :py:meth:`prog_models.PrognosticsModel.time_to_event` method.
+ A model where the :term:`time of event` is directly estimated from the current state and/or :term:`future load`, instead of predicted through simulation to threshold. These are implemented using the :py:meth:`progpy.PrognosticsModel.time_to_event` method.
state-transition model
A model where the :term:`time of event` is predicted through simulation to threshold. Most prognostic models are state-transition models.
composite model
- A model consisting of multiple inter-related Prognostics Models, where the :term:`input` of one :term:`model` is a function of the :term:`output` or :term:`state` of another. This is a tool for representing system-of-systems. Composite models are implemented using the :py:class:`prog_models.CompositeModel` class.
+ A model consisting of multiple inter-related Prognostics Models, where the :term:`input` of one :term:`model` is a function of the :term:`output` or :term:`state` of another. This is a tool for representing system-of-systems. Composite models are implemented using the :py:class:`progpy.CompositeModel` class.
system-of-systems
- A system consisting of multiple inter-related systems, where one system affects the others. In ProgPy, system-of-systems are reporsented using :term:`composite models `. Composite models are implemented using the :py:class:`prog_models.CompositeModel` class.
+ A system consisting of multiple inter-related systems, where one system affects the others. In ProgPy, system-of-systems are reporsented using :term:`composite models `. Composite models are implemented using the :py:class:`progpy.CompositeModel` class.
diff --git a/docs/_sources/guide.rst b/docs/_sources/guide.rst
index ae57686..6a6e575 100644
--- a/docs/_sources/guide.rst
+++ b/docs/_sources/guide.rst
@@ -10,6 +10,24 @@ ProgPy Guide
prog_algs_guide
prog_server_guide
+The ProgPy framework consists of three key components that combine to create a flexible and extendible prognostics architecture.
+
+.. image:: images/ProgPyComponents.png
+ :align: center
+
+1.
+ The **Prognostics Models** are the backbone of the ProgPy architecture. Models describe the specific system that prognostics will be applied to and how the system will evolve with time. Everything else within ProgPy (e.g. simulation capabilities and prognostics tools) are built on top of a model.
+
+ ProgPy supports models that are physics-based, data-driven, or hybrid. ProgPy includes some built-in models (see examples below) but is also written in an easily adaptable way so users can implement models specific to their use-cases.
+
+2.
+ The **Prognostics Engine** encapsulates the complex logic of prognostics in a way that is modular and extendable. It includes the necessary tools to perform prognostics on the model, including state estimation, prediction, and uncertainty management. The modularity of the framework allows these capabilities to work with any model (built-in or user-defined) and the extensibility of the architecture allows users to additionally create their own methodologies.
+
+3.
+ The **Prognostics Support Tools** are a collection of capabilities to help users build new functionalities or understand prognostics results.
+
+These three key components come together to create the comprehensive framework that is ProgPy. More details will be shared in the coming pages.
+
This page is a general guide for ProgPy. To access a guide specific to the features you're using, select it in the menu below.
.. panels::
diff --git a/docs/_sources/guide.rst.txt b/docs/_sources/guide.rst.txt
index 895078e..ae57686 100644
--- a/docs/_sources/guide.rst.txt
+++ b/docs/_sources/guide.rst.txt
@@ -10,7 +10,7 @@ ProgPy Guide
prog_algs_guide
prog_server_guide
-This page is a general guide for ProgPy. ProgPy consists of three packages: prog_models, prog_algs, prog_server. To access a guide specific to the package you're using, select it in the menu below.
+This page is a general guide for ProgPy. To access a guide specific to the features you're using, select it in the menu below.
.. panels::
:img-top-cls: pt-2, pb-2
@@ -21,17 +21,17 @@ This page is a general guide for ProgPy. ProgPy consists of three packages: prog
---
:img-top: images/cube.png
- .. link-button:: prog_models Guide
+ .. link-button:: Modeling and Sim Guide
:type: ref
- :text: prog_models
+ :text: Modeling and Simulation
:classes: stretched-link btn-outline-primary btn-block
---
:img-top: images/Gear-icon.png
- .. link-button:: prog_algs Guide
+ .. link-button:: State Estimation and Prediction Guide
:type: ref
- :text: prog_algs
+ :text: State Estimation and Prediction
:classes: stretched-link btn-outline-primary btn-block
---
@@ -51,7 +51,7 @@ ProgPy uses the following definition for :term:`prognostics`:
Prediction of (a) future performance and/or (b) the time at which one or more events of interest occur, for a system or a system of systems
-This is similar to those described in [#Goebel2017]_. This approach is intended to be generic, capable of describing system behavior based on physical principles (i.e., physics-based), learning from data (i.e., data-based), or hybrid approaches (e.g., Physics-Informed Machine Learning).
+This is similar to definitions from [#Goebel2017]_. This approach is intended to be generic, capable of describing system behavior based on physical principles (i.e., physics-based), learning from data (i.e., data-based), or hybrid approaches (e.g., Physics-Informed Machine Learning).
In general, the ProgPy prognostic approach is illustrated below.
@@ -59,18 +59,18 @@ In general, the ProgPy prognostic approach is illustrated below.
The foundation of prognostics is a :term:`model`. Models describe the behavior of a system or system of systems. A prognostics model specifically describes how the state of the system evolves with time. Prognostic models typically come in one of 4 categories: knowledge-based, :term:`physics-based`, :term:`data-driven`, or some combination of those three (i.e., hybrid).
-Functionality for creation, simulation, and analysis of models can be found in the :ref:`prog_models` package. That package also includes some example models and tools to access relevant data for model creation. For more information see the :ref:`prog_models Guide`.
+Details on functionality for creation, simulation, and analysis of models can be found in the :ref:`Modeling and Simulation Guide `. ProgPy also includes some example models and tools to access relevant data for model creation.
ProgPy divides the prognostic process into two steps: :term:`state estimation` and :term:`prediction`. State estimation is the process of determining the current system state (x), with some uncertainty, given the system parameters (:math:`\Theta`), system loading (u) and measurements (z). There are various methods used for this, such as Kalman Filters and Particle Filters. These methods utilize a prognostics model, comparing measurements (z) with those predicted from the system output equation.
In the prediction step, the state estimate at the prediction time and system model are used together to estimate system degradation with time. This is most commonly done using a variant of the Monte Carlo method with the model state transition equation. Prediction is often computationally expensive, especially for sample-based approaches with strict precision requirements (which therefore require large number of samples). ProgPy provides some potential solutions to combat this, such as :term:`surrogate` models, vectorization, and model configuration options.
-Algorithms for :term:`state estimation` and :term:`prediction` along with tools analyzing and visualizing results of state estimation and prediction, managing uncertainty, and creating new state estimators or predictors can be found in the :ref:`prog_algs` package. For more information see the :ref:`prog_algs Guide`.
+Algorithms for :term:`state estimation` and :term:`prediction` along with tools analyzing and visualizing results of state estimation and prediction, managing uncertainty, and creating new state estimators or predictors, see the :ref:`State Estimation and Prediction Guide`.
More information
------------------------------
-For more information, see the inidividual pages for each of the three ProgPy Packages
+For more information, see the inidividual guides
.. panels::
:img-top-cls: pt-2, pb-2
@@ -81,17 +81,17 @@ For more information, see the inidividual pages for each of the three ProgPy Pac
---
:img-top: images/cube.png
- .. link-button:: prog_models Guide
+ .. link-button:: Modeling and Sim Guide
:type: ref
- :text: prog_models
+ :text: Modeling and Simulation
:classes: stretched-link btn-outline-primary btn-block
---
:img-top: images/Gear-icon.png
- .. link-button:: prog_algs Guide
+ .. link-button:: State Estimation and Prediction Guide
:type: ref
- :text: prog_algs
+ :text: State Estimation and Prediction
:classes: stretched-link btn-outline-primary btn-block
---
diff --git a/docs/_sources/index.rst b/docs/_sources/index.rst
index b44daf4..e345eef 100644
--- a/docs/_sources/index.rst
+++ b/docs/_sources/index.rst
@@ -25,29 +25,7 @@ ProgPy documentation is split into three senctions described below.
glossary
dev_guide
-Installing progpy
------------------------
-
-.. tabs::
-
- .. tab:: Stable Version (Recommended)
-
- The latest stable release of ProgPy is hosted on PyPi. For most users, this version will be adequate. To install via the command line, use the following command:
-
- .. code-block:: console
-
- $ pip install progpy
-
- .. tab:: Pre-Release
-
- Users who would like to contribute to ProgPy or would like to use pre-release features can do so using the `ProgPy GitHub repo `__. This isn't recommended for most users as this version may be unstable. To do this, use the following commands:
-
- .. code-block:: console
-
- $ git clone https://github.com/nasa/progpy
- $ cd progpy
- $ git checkout dev
- $ pip install -e .
+.. include:: installing.rst
Citing This Repository
-----------------------
@@ -56,16 +34,16 @@ Use the following to cite this repository:
@misc{2023_nasa_progpy,
| author = {Christopher Teubert and Katelyn Jarvis Griffith and Matteo Corbetta and Chetan Kulkarni and Portia Banerjee and Jason Watkins and Matthew Daigle},
| title = {{ProgPy Python Prognostics Packages}},
- | month = Oct,
- | year = 2023,
- | version = {1.6},
+ | month = May,
+ | year = 2024,
+ | version = {1.7},
| url = {https://nasa.github.io/progpy}
| doi = {10.5281/ZENODO.8097013}
| }
The corresponding reference should look like this:
-C. Teubert, K. Jarvis Griffith, M. Corbetta, C. Kulkarni, P. Banerjee, J. Watkins, M. Daigle, ProgPy Python Prognostics Packages, v1.6, Oct 2023. URL https://github.com/nasa/progpy.
+C. Teubert, K. Jarvis Griffith, M. Corbetta, C. Kulkarni, P. Banerjee, J. Watkins, M. Daigle, ProgPy Python Prognostics Packages, v1.7, May 2024. URL https://github.com/nasa/progpy.
Contributing and Partnering
-----------------------------
diff --git a/docs/_sources/index.rst.txt b/docs/_sources/index.rst.txt
index 0342f5a..9075a31 100644
--- a/docs/_sources/index.rst.txt
+++ b/docs/_sources/index.rst.txt
@@ -3,15 +3,15 @@ ProgPy Prognostics Python Packages
.. raw:: html
-
+
-The NASA Prognostics Python Packages (ProgPy) are a set of open-sourced python packages supporting research and development of prognostics and health management and predictive maintenance tools. They implement architectures and common functionality of prognostics, supporting researchers and practitioners.
+NASA's ProgPy is an open-sourced python package supporting research and development of prognostics and health management and predictive maintenance tools. It implements architectures and common functionality of prognostics, supporting researchers and practitioners. The ProgPy package is a combination of the original prog_models and prog_algs packages.
-ProgPy consists of a set of packages, described below. See the documentation specific to each package for more information.
+ProgPy documentation is split into three senctions described below.
-* :ref:`prog_models` : Tools for defining, building, using, and testing models for prognostics
-* :ref:`prog_algs` : Tools for performing and benchmarking prognostics and state estimation
+* :ref:`Modeling and Simulation` : defining, building, using, and testing models for prognostics
+* :ref:`State Estimation and Prediction` : performing and benchmarking prognostics and state estimation
* :ref:`prog_server` and :ref:`prog_client` : A simplified implementation of a Service-Oriented Architecture (SOA) for performing prognostics and associated client
.. toctree::
@@ -25,23 +25,47 @@ ProgPy consists of a set of packages, described below. See the documentation spe
glossary
dev_guide
+Installing progpy
+-----------------------
+
+.. tabs::
+
+ .. tab:: Stable Version (Recommended)
+
+ The latest stable release of ProgPy is hosted on PyPi. For most users, this version will be adequate. To install via the command line, use the following command:
+
+ .. code-block:: console
+
+ $ pip install progpy
+
+ .. tab:: Pre-Release
+
+ Users who would like to contribute to ProgPy or would like to use pre-release features can do so using the `ProgPy GitHub repo `__. This isn't recommended for most users as this version may be unstable. To do this, use the following commands:
+
+ .. code-block:: console
+
+ $ git clone https://github.com/nasa/progpy
+ $ cd progpy
+ $ git checkout dev
+ $ pip install -e .
Citing This Repository
-----------------------
Use the following to cite this repository:
-@misc{2022_nasa_progpy,
- | author = {Christopher Teubert and Katelyn Jarvis and Matteo Corbetta and Chetan Kulkarni and Matthew Daigle},
+@misc{2023_nasa_progpy,
+ | author = {Christopher Teubert and Katelyn Jarvis Griffith and Matteo Corbetta and Chetan Kulkarni and Portia Banerjee and Jason Watkins and Matthew Daigle},
| title = {{ProgPy Python Prognostics Packages}},
| month = May,
- | year = 2023,
- | version = {1.5},
+ | year = 2024,
+ | version = {1.7},
| url = {https://nasa.github.io/progpy}
+ | doi = {10.5281/ZENODO.8097013}
| }
The corresponding reference should look like this:
-C. Teubert, K. Jarvis, M. Corbetta, C. Kulkarni, M. Daigle, ProgPy Python Prognostics Packages, v1.5, May 2022. URL https://github.com/nasa/progpy.
+C. Teubert, K. Jarvis Griffith, M. Corbetta, C. Kulkarni, P. Banerjee, J. Watkins, M. Daigle, ProgPy Python Prognostics Packages, v1.7, May 2024. URL https://github.com/nasa/progpy.
Contributing and Partnering
-----------------------------
diff --git a/docs/_sources/installing.rst b/docs/_sources/installing.rst
new file mode 100644
index 0000000..17bc478
--- /dev/null
+++ b/docs/_sources/installing.rst
@@ -0,0 +1,35 @@
+Installing ProgPy
+-----------------------
+
+.. tabs::
+
+ .. tab:: Stable Version (Recommended)
+
+ The latest stable release of ProgPy is hosted on PyPi. For most users, this version will be adequate. To install via the command line, use the following command:
+
+ .. code-block:: console
+
+ $ pip install progpy
+
+ If you will be using the datadriven tools (e.g., LSTM model), install the datadriven dependencies as well using the following command:
+
+ .. code-block:: console
+
+ $ pip install progpy[datadriven]
+
+ .. tab:: Pre-Release
+
+ Users who would like to contribute to ProgPy or would like to use pre-release features can do so using the `ProgPy GitHub repo `__. This isn't recommended for most users as this version may be unstable. To do this, use the following commands:
+
+ .. code-block:: console
+
+ $ git clone https://github.com/nasa/progpy
+ $ cd progpy
+ $ git checkout dev
+ $ pip install -e .
+
+ If you will be using the datadriven tools (e.g., LSTM model), install the datadriven dependencies as well using the following command:
+
+ .. code-block:: console
+
+ $ pip install -e '.[datadriven]'
diff --git a/docs/_sources/npr7150.rst.txt b/docs/_sources/npr7150.rst.txt
index ea47977..7854c6f 100644
--- a/docs/_sources/npr7150.rst.txt
+++ b/docs/_sources/npr7150.rst.txt
@@ -64,9 +64,7 @@ Life Cycle Management
+-------+----------------------------------+------------+-----------------------------------------------------------------+
| 036 | Software Processes | FC | See notes below |
+-------+----------------------------------+------------+-----------------------------------------------------------------+
-| 037M | Document Milestones | FC | `Milestones `__ |
-+-------+----------------------------------+------------+-----------------------------------------------------------------+
-| 037A | Document Milestones | FC | `Milestones `__ |
+| 037M | Document Milestones | FC | `Milestones `__ |
+-------+----------------------------------+------------+-----------------------------------------------------------------+
| 037S | Document Milestones | FC | `Milestones `__ |
+-------+----------------------------------+------------+-----------------------------------------------------------------+
@@ -80,9 +78,7 @@ Life Cycle Management
+-------+----------------------------------+------------+-----------------------------------------------------------------+
| 039e | Software Reviews | FC | Software Assurance Officer gives final approval after reviews |
+-------+----------------------------------+------------+-----------------------------------------------------------------+
-| 040aM | Products | FC | Kept in `Repo `__ |
-+-------+----------------------------------+------------+-----------------------------------------------------------------+
-| 040aA | Products | FC | Kept in `Repo `__ |
+| 040aM | Products | FC | Kept in `Repo `__ |
+-------+----------------------------------+------------+-----------------------------------------------------------------+
| 040aS | Products | FC | Kept in `Repo `__ |
+-------+----------------------------------+------------+-----------------------------------------------------------------+
@@ -90,9 +86,7 @@ Life Cycle Management
+-------+----------------------------------+------------+-----------------------------------------------------------------+
| 040c | Non-conformances | FC | See github issues |
+-------+----------------------------------+------------+-----------------------------------------------------------------+
-| 040dM | Change tracking | FC | See `Commits `__ |
-+-------+----------------------------------+------------+-----------------------------------------------------------------+
-| 040dA | Change tracking | FC | See `Commits `__ |
+| 040dM | Change tracking | FC | See `Commits `__ |
+-------+----------------------------------+------------+-----------------------------------------------------------------+
| 040dS | Change tracking | FC | See `Commits `__ |
+-------+----------------------------------+------------+-----------------------------------------------------------------+
@@ -143,9 +137,7 @@ Schedules
+-------+----------------------------------+------------+-----------------------------------------------------------------+
| SWE # | Description | Compliance | Evidence |
+=======+==================================+============+=================================================================+
-| 016M | Schedule Requirements | FC | `Milestones `__ |
-+-------+----------------------------------+------------+-----------------------------------------------------------------+
-| 016A | Schedule Requirements | FC | `Milestones `__ |
+| 016M | Schedule Requirements | FC | `Milestones `__ |
+-------+----------------------------------+------------+-----------------------------------------------------------------+
| 016S | Schedule Requirements | FC | `Milestones `__ |
+-------+----------------------------------+------------+-----------------------------------------------------------------+
@@ -288,9 +280,7 @@ Requirements
+-------+----------------------------------+------------+--------------------------------------------------------------------------------+
| SWE # | Description | Compliance | Evidence |
+=======+==================================+============+================================================================================+
-| 050M | Software Requirements | FC | `Enhancement Issues `__|
-+-------+----------------------------------+------------+--------------------------------------------------------------------------------+
-| 050A | Software Requirements | FC | `Enhancement Issues `__ |
+| 050M | Software Requirements | FC | `Enhancement Issues `__ |
+-------+----------------------------------+------------+--------------------------------------------------------------------------------+
| 050S | Software Requirements | FC | `Enhancement Issues `__|
+-------+----------------------------------+------------+--------------------------------------------------------------------------------+
@@ -320,18 +310,16 @@ Implementation
+-------+----------------------------------+------------+--------------------------------------------------------------------------------+
| 186 | Unit Test Repeatability | FC | Unit tests are created with each enhancement, run automatically with each PR. |
+-------+----------------------------------+------------+--------------------------------------------------------------------------------+
-| 063M | Software Version Description | FC | `See here `__ |
-+-------+----------------------------------+------------+--------------------------------------------------------------------------------+
-| 063A | Software Version Description | FC | `See here `__ |
+| 063M | Software Version Description | FC | `See here `__ |
+-------+----------------------------------+------------+--------------------------------------------------------------------------------+
| 063S | Software Version Description | FC | `See here `__ |
+-------+----------------------------------+------------+--------------------------------------------------------------------------------+
Static Analysis Methods Used:
-* CodeFactor.io (`prog_models `__, `prog_algs `__, `prog_server `__): Runs automatically in each PR. If issues are detected, they are noted in the PR chat.
-* LGTM (`prog_models `__, `prog_algs `__, `prog_server `__): Runs automatically in each PR. If issues are detected, they are noted in the PR chat.
-* Codecov (`prog_models `__, `prog_algs `__, `prog_server `__): Runs automatically in each PR. If issues are detected, they are noted in the PR chat.
+* CodeFactor.io (`progpy `__, `prog_server `__): Runs automatically in each PR. If issues are detected, they are noted in the PR chat.
+* LGTM (`progpy `__, `prog_server `__): Runs automatically in each PR. If issues are detected, they are noted in the PR chat.
+* Codecov (`progpy `__, `prog_server `__): Runs automatically in each PR. If issues are detected, they are noted in the PR chat.
* CodeQL Scanning: Runs automatically in each PR. If issues are detected, they are noted in the PR chat.
* Github Dependabot Alerts: Tracks dependencies, alerts of any issues.
@@ -343,21 +331,15 @@ Testing
+=======+==================================+============+======================================================================================================+
| 065a | Test Plan | FC | See this document. |
+-------+----------------------------------+------------+------------------------------------------------------------------------------------------------------+
-| 065bM | Test Procedures | FC | See `GitHub Actions Workflows `__.|
-+-------+----------------------------------+------------+------------------------------------------------------------------------------------------------------+
-| 065bA | Test Procedures | FC | See `GitHub Actions Workflows `__. |
+| 065bM | Test Procedures | FC | See `GitHub Actions Workflows `__. |
+-------+----------------------------------+------------+------------------------------------------------------------------------------------------------------+
| 065bS | Test Procedures | FC | See `GitHub Actions Workflows `__.|
+-------+----------------------------------+------------+------------------------------------------------------------------------------------------------------+
-| 065cM | Tests | FC | See `tests directory `__. |
-+-------+----------------------------------+------------+------------------------------------------------------------------------------------------------------+
-| 065cA | Tests | FC | See `tests directory `__. |
+| 065cM | Tests | FC | See `tests directory `__. |
+-------+----------------------------------+------------+------------------------------------------------------------------------------------------------------+
| 065cS | Tests | FC | See `tests directory `__. |
+-------+----------------------------------+------------+------------------------------------------------------------------------------------------------------+
-| 065dM | Test Reports | FC | See `Github Actions Results `__. |
-+-------+----------------------------------+------------+------------------------------------------------------------------------------------------------------+
-| 065dA | Test Reports | FC | See `Github Actions Results `__. |
+| 065dM | Test Reports | FC | See `Github Actions Results `__. |
+-------+----------------------------------+------------+------------------------------------------------------------------------------------------------------+
| 065dS | Test Reports | FC | See `Github Actions Results `__. |
+-------+----------------------------------+------------+------------------------------------------------------------------------------------------------------+
@@ -367,9 +349,7 @@ Testing
+-------+----------------------------------+------------+------------------------------------------------------------------------------------------------------+
| 071 | Update Test Plans | FC | Workflow, tests, and this document are updated as requirements change |
+-------+----------------------------------+------------+------------------------------------------------------------------------------------------------------+
-| 186M | Code Coverage | FC | See `Codecov `__ |
-+-------+----------------------------------+------------+------------------------------------------------------------------------------------------------------+
-| 186A | Code Coverage | FC | See `Codecov `__ |
+| 186M | Code Coverage | FC | See `Codecov `__ |
+-------+----------------------------------+------------+------------------------------------------------------------------------------------------------------+
| 186S | Code Coverage | FC | See `Codecov `__ |
+-------+----------------------------------+------------+------------------------------------------------------------------------------------------------------+
@@ -431,9 +411,7 @@ Configuration Management
+=======+==================================+============+===================================================================+
| 079 | Configuration Management Plan | FC | See this document |
+-------+----------------------------------+------------+-------------------------------------------------------------------+
-| 080M | Evaluate Sotware Product Changes | FC | See `PRs `__ |
-+-------+----------------------------------+------------+-------------------------------------------------------------------+
-| 080A | Evaluate Sotware Product Changes | FC | See `PRs `__ |
+| 080M | Evaluate Sotware Product Changes | FC | See `PRs `__ |
+-------+----------------------------------+------------+-------------------------------------------------------------------+
| 080S | Evaluate Sotware Product Changes | FC | See `PRs `__ |
+-------+----------------------------------+------------+-------------------------------------------------------------------+
@@ -445,9 +423,7 @@ Configuration Management
+-------+----------------------------------+------------+-------------------------------------------------------------------+
| 082c | Authorization Authority | FC | See this document |
+-------+----------------------------------+------------+-------------------------------------------------------------------+
-| 083M | Configuration Status | FC | See `Branches `__ |
-+-------+----------------------------------+------------+-------------------------------------------------------------------+
-| 083A | Configuration Status | FC | See `Branches `__ |
+| 083M | Configuration Status | FC | See `Branches `__ |
+-------+----------------------------------+------------+-------------------------------------------------------------------+
| 083S | Configuration Status | FC | See `Branches `__ |
+-------+----------------------------------+------------+-------------------------------------------------------------------+
@@ -460,21 +436,21 @@ Note on SWE084- Configuration Audits: Configuration audits are conducted in part
* To a large degree- configuration audits are performed automatically by GitHub actions and branch restrictions. These check the following:
- * That tests were run (they are automatically run), passed, and results are recorded
- * That files conform with copyright rules
- * That required code reviews were performed (Requirement for branch merging)
+ * That tests were run (they are automatically run), passed, and results are recorded
+ * That files conform with copyright rules
+ * That required code reviews were performed (Requirement for branch merging)
* In performing a code review, the reviewing user confirms:
- * That the change is linked to a requirement (i.e., feature issue) and the requirement was met or that it is linked to another issue (e.g., bug report)
- * That appropriate tests exist
+ * That the change is linked to a requirement (i.e., feature issue) and the requirement was met or that it is linked to another issue (e.g., bug report)
+ * That appropriate tests exist
* In performing a release review, the project manager confirms:
- * That all issues are completed
- * That all tests pass, have proper documentation
- * That documentation has been updated and matches the code
- * That a schedule exists for the next release and is in the proper place
+ * That all issues are completed
+ * That all tests pass, have proper documentation
+ * That documentation has been updated and matches the code
+ * That a schedule exists for the next release and is in the proper place
Non-Conformances
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -482,9 +458,7 @@ Non-Conformances
+-------+----------------------------------+------------+-------------------------------------------------------------------+
| SWE # | Description | Compliance | Evidence |
+=======+==================================+============+===================================================================+
-| 201M | Track non-conformances | FC | See `Github Issues `__|
-+-------+----------------------------------+------------+-------------------------------------------------------------------+
-| 201A | Track non-conformances | FC | See `Github Issues `__ |
+| 201M | Track non-conformances | FC | See `Github Issues `__ |
+-------+----------------------------------+------------+-------------------------------------------------------------------+
| 201S | Track non-conformances | FC | See `Github Issues `__|
+-------+----------------------------------+------------+-------------------------------------------------------------------+
@@ -503,7 +477,7 @@ Assessed, there are some existing prognostics tools but no general Python packag
Requirement Tracking
********************
-Requirements are tracked as issues with the "Enhancement" label (See `prog_models `__, `prog_algs `__, `prog_server `__ Enhancement Issues). An issue template is used to ensure that the requirement has the desired information. Issues are closed to indicate the requirement has been met. Closing a requirement issue is done with a pull request, which is linked to the relevant requirement, for tracability. Closing the requirement issue requires a code review (see above for details), and requires implementation of passing tests that test the requirement (i.e., verification tests). The tests are reviewed with the code implementing the requirement. Issues are assigned to a milestone (i.e., release) indicating the requirements for that release. Github automatically tracks any changes to the issues (i.e., requirements)
+Requirements are tracked as issues with the "Enhancement" label (See `progpy `__, `prog_server `__ Enhancement Issues). An issue template is used to ensure that the requirement has the desired information. Issues are closed to indicate the requirement has been met. Closing a requirement issue is done with a pull request, which is linked to the relevant requirement, for tracability. Closing the requirement issue requires a code review (see above for details), and requires implementation of passing tests that test the requirement (i.e., verification tests). The tests are reviewed with the code implementing the requirement. Issues are assigned to a milestone (i.e., release) indicating the requirements for that release. Github automatically tracks any changes to the issues (i.e., requirements)
Dependencies
**************
@@ -511,73 +485,81 @@ The following dependencies are used in the project:
* `numpy `__
- * Requirements met: Various mathematical and array functions
- * Documentation: https://numpy.org
- * Usage Rights: Released under the BSD 3-Clause License
- * Future Support: expected- Numpy is a common tool still under development and actively supported
+ * Requirements met: Various mathematical and array functions
+ * Documentation: https://numpy.org
+ * Usage Rights: Released under the BSD 3-Clause License
+ * Future Support: expected- Numpy is a common tool still under development and actively supported
* `scipy `__
- * Requirements met: Various mathematical and array functions
- * Documentation: https://www.scipy.org
- * Usage Rights: Released under the BSD 3-Clause License
- * Future Support: expected- Scipy is a common tool still under development and actively supported
+ * Requirements met: Various mathematical and array functions
+ * Documentation: https://www.scipy.org
+ * Usage Rights: Released under the BSD 3-Clause License
+ * Future Support: expected- Scipy is a common tool still under development and actively supported
* `matplotlib `__
- * Requirements met: Various figure generation methods
- * Documentation: https://matplotlib.org
- * Usage Rights: Released under the BSD 3-Clause License
- * Future Support: expected- Matplotlib is a common tool still under development and actively supported
+ * Requirements met: Various figure generation methods
+ * Documentation: https://matplotlib.org
+ * Usage Rights: Released under the BSD 3-Clause License
+ * Future Support: expected- Matplotlib is a common tool still under development and actively supported
* `pandas `__
- * Requirements met: Various data analysis methods (especially for datasets)
- * Documentation: https://pandas.pydata.org
- * Usage Rights: Released under the BSD 3-Clause License
- * Future Support: expected- Pandas is a common tool still under development and actively supported
+ * Requirements met: Various data analysis methods (especially for datasets)
+ * Documentation: https://pandas.pydata.org
+ * Usage Rights: Released under the BSD 3-Clause License
+ * Future Support: expected- Pandas is a common tool still under development and actively supported
* `tensorflow `__
- * Requirements met: Machine learning algorithms
- * Documentation: https://www.tensorflow.org
- * Usage Rights: Released under the Apache License 2.0
- * Future Support: expected- Tensorflow is a common tool still under development and actively supported
+ * Requirements met: Machine learning algorithms
+ * Documentation: https://www.tensorflow.org
+ * Usage Rights: Released under the Apache License 2.0
+ * Future Support: expected- Tensorflow is a common tool still under development and actively supported
* `chaospy `__
- * Requirements met: Uncertainty quantification and polynomial chaos expansion logic
- * Documentation: http://chaospy.readthedocs.io
- * Usage Rights: Released under the MIT License
- * Future Support: expected- Chaospy is a common tool still under development and actively supported
+ * Requirements met: Uncertainty quantification and polynomial chaos expansion logic
+ * Documentation: http://chaospy.readthedocs.io
+ * Usage Rights: Released under the MIT License
+ * Future Support: expected- Chaospy is a common tool still under development and actively supported
-* `FilterPy `__ (prog_algs only)
+* `FilterPy `__
- * Requirements met: Algorithms for state estimators and predictors
- * Documentation: https://filterpy.readthedocs.io
- * Usage Rights: Released under the MIT License
- * Future Support: expected- FilterPy is a common tool still under development and actively supported
+ * Requirements met: Algorithms for state estimators and predictors
+ * Documentation: https://filterpy.readthedocs.io
+ * Usage Rights: Released under the MIT License
+ * Future Support: expected- FilterPy is a common tool still under development and actively supported
* `Requests `__ (prog_server only)
- * Requirements met: HTTP requests
- * Documentation: https://requests.readthedocs.io
- * Usage Rights: Released under the Apache License
- * Future Support: expected- Requests is a common tool still under development and actively supported
+ * Requirements met: HTTP requests
+ * Documentation: https://requests.readthedocs.io
+ * Usage Rights: Released under the Apache License
+ * Future Support: expected- Requests is a common tool still under development and actively supported
* `Flask `__ (prog_server only)
- * Requirements met: Web server
- * Documentation: https://flask.palletsprojects.com/en/1.1.x
- * Usage Rights: Released under the BSD 3-Clause License
- * Future Support: expected- Flask is a common tool still under development and actively supported
+ * Requirements met: Web server
+ * Documentation: https://flask.palletsprojects.com/en/1.1.x
+ * Usage Rights: Released under the BSD 3-Clause License
+ * Future Support: expected- Flask is a common tool still under development and actively supported
* `urllib3 `__ (prog_server only)
- * Requirements met: HTTP requests
- * Documentation: https://urllib3.readthedocs.org
- * Usage Rights: Released under the MIT License
- * Future Support: expected- urllib3 is a common tool still under development and actively supported
+ * Requirements met: HTTP requests
+ * Documentation: https://urllib3.readthedocs.org
+ * Usage Rights: Released under the MIT License
+ * Future Support: expected- urllib3 is a common tool still under development and actively supported
+
+* `fastdtw `__
+
+ * Requirements met: Dynamic time warping (for dtw metric used in calc_error)
+ * Documentation: https://github.com/slaypni/fastdtw
+ * Usage Rights: Released under the MIT License
+ * Future Support: Unknown- at the time of investigation, the last release was in 3.5 years ago and the last branch was updated 2 years ago. However, the code is simple and the tool is still used by many projects.
+ * Other notes: validated by comparing the output of fastdtw to dtaidistance and the algorithm from the 'Towards Data Science' page (https://towardsdatascience.com/dynamic-time-warping-3933f25fcdd). Additionally, the team inspected the output of the package and it seemed reasonable. Finally, a review of open issues on their github repository and a search for known vulnerabilities yielded no concerns. It's only dependency is numpy, which is a trusted package.
Notes for all:
@@ -586,7 +568,7 @@ Notes for all:
Tracability Notes
*****************
-Hazards and non-conformances are tracked as issues with the label bug (See `prog_models `__, `prog_algs `__, `prog_server `__). In the template for a bug report, there is a section asking for relevant enhancement issues (i.e., requirements). This linking establishes tracability from hazards/non-conformances to the underlying requirement. These linkings are automatically marked by the github system in the requirement issue. Additionally, to close an enhancement issue (i.e., requirement), passing verification tests must be created and checked in. The PR where these tests are created and the implementation is completed is linked to the issue establishing tracability from requirement -> verification test. These tests run automatically at every change/PR.
+Hazards and non-conformances are tracked as issues with the label bug (See `progpy `__, `prog_server `__). In the template for a bug report, there is a section asking for relevant enhancement issues (i.e., requirements). This linking establishes tracability from hazards/non-conformances to the underlying requirement. These linkings are automatically marked by the github system in the requirement issue. Additionally, to close an enhancement issue (i.e., requirement), passing verification tests must be created and checked in. The PR where these tests are created and the implementation is completed is linked to the issue establishing tracability from requirement -> verification test. These tests run automatically at every change/PR.
Additionally, requirements are assigned to milestones/releases, establishing bi-directional tracability to these
@@ -597,3 +579,5 @@ Summary: The following tracabilities are maintained:
* Requirement <-> Verification Test & Results
* Requirement <-> Implementation
* Release/Milestone <-> Requirement
+
+For past bugs, enhancements, pull requests, etc. look at the previously used prog_models and prog_algs servers.
diff --git a/docs/_sources/prog_algs_guide.rst b/docs/_sources/prog_algs_guide.rst
index c811a75..b609b87 100644
--- a/docs/_sources/prog_algs_guide.rst
+++ b/docs/_sources/prog_algs_guide.rst
@@ -13,29 +13,7 @@ State Estimation and Prediction Guide
The Prognostic Python Package (progpy) is a python framework for prognostics (computation of remaining useful life or future states) of engineering systems. The package provides an extendable set of algorithms for state estimation and prediction, including uncertainty propagation. The package also include metrics, visualization, and analysis tools needed to measure the prognostic performance. The algorithms use prognostic models (from :ref:`Modeling and Simulation Guide`) to perform estimation and prediction functions. The package enables the rapid development of prognostics solutions for given models of components and systems. Different algorithms can be easily swapped to do comparative studies and evaluations of different algorithms to select the best for the application at hand.
-Installing progpy
------------------------
-
-.. tabs::
-
- .. tab:: Stable Version (Recommended)
-
- The latest stable release of ProgPy is hosted on PyPi. For most users, this version will be adequate. To install via the command line, use the following command:
-
- .. code-block:: console
-
- $ pip install progpy
-
- .. tab:: Pre-Release
-
- Users who would like to contribute to progpy or would like to use pre-release features can do so using the `progpy GitHub repo `__. This isn't recommended for most users as this version may be unstable. To do this, use the following commands:
-
- .. code-block:: console
-
- $ git clone https://github.com/nasa/progpy
- $ cd progpy
- $ git checkout dev
- $ pip install -e .
+.. include:: installing.rst
Summary
---------
@@ -107,8 +85,8 @@ The internal state is stored in the estimators x property as a UncertainData sub
>>> filt.estimate(0.1, load, new_data)
>>> print('Posterior: ', filt.x.mean)
-Extending
-************
+Extending State Estimators
+****************************
New :term:`state estimator` are created by extending the :class:`progpy.state_estimators.StateEstimator` class.
@@ -164,8 +142,8 @@ The stepsize and times at which results are saved can be defined like in a simul
.. autoclass:: progpy.predictors.MonteCarloPredictor
-Extending
-*************
+Extending Predictors
+**********************
New :term:`predictor` are created by extending the :class:`progpy.predictors.Predictor` class.
@@ -293,12 +271,6 @@ The best way to learn how to use `progpy` is through the `tutorial `
.. automodule:: basic_example
-* :download:`examples.basic_example_battery <../../progpy/examples/basic_example_battery.py>`
- .. automodule:: basic_example_battery
-
-.. * :download:`examples.benchmarking_example <../../progpy/examples/benchmarking_example.py>`
-.. .. automodule:: benchmarking_example
-
* :download:`examples.eol_event <../../progpy/examples/eol_event.py>`
.. automodule:: eol_event
@@ -308,9 +280,6 @@ The best way to learn how to use `progpy` is through the `tutorial `
.. automodule:: horizon
-* :download:`examples.kalman_filter <../../progpy/examples/kalman_filter.py>`
- .. automodule:: kalman_filter
-
* :download:`examples.measurement_eqn_example <../../progpy/examples/measurement_eqn_example.py>`
.. automodule:: measurement_eqn_example
diff --git a/docs/_sources/prog_algs_guide.rst.txt b/docs/_sources/prog_algs_guide.rst.txt
index ba8650a..57a81af 100644
--- a/docs/_sources/prog_algs_guide.rst.txt
+++ b/docs/_sources/prog_algs_guide.rst.txt
@@ -1,4 +1,4 @@
-prog_algs Guide
+State Estimation and Prediction Guide
===================================================
.. role:: pythoncode(code)
@@ -6,34 +6,34 @@ prog_algs Guide
.. raw:: html
-
+
.. image:: https://mybinder.org/badge_logo.svg
- :target: https://mybinder.org/v2/gh/nasa/prog_algs/master?labpath=tutorial.ipynb
+ :target: https://mybinder.org/v2/gh/nasa/progpy/master?labpath=tutorial.ipynb
-The Prognostic Algorithms Package is a python framework for prognostics (computation of remaining useful life or future states) of engineering systems. The package provides an extendable set of algorithms for state estimation and prediction, including uncertainty propagation. The package also include metrics, visualization, and analysis tools needed to measure the prognostic performance. The algorithms use prognostic models (from :ref:`prog_models`) to perform estimation and prediction functions. The package enables the rapid development of prognostics solutions for given models of components and systems. Different algorithms can be easily swapped to do comparative studies and evaluations of different algorithms to select the best for the application at hand.
+The Prognostic Python Package (progpy) is a python framework for prognostics (computation of remaining useful life or future states) of engineering systems. The package provides an extendable set of algorithms for state estimation and prediction, including uncertainty propagation. The package also include metrics, visualization, and analysis tools needed to measure the prognostic performance. The algorithms use prognostic models (from :ref:`Modeling and Simulation Guide`) to perform estimation and prediction functions. The package enables the rapid development of prognostics solutions for given models of components and systems. Different algorithms can be easily swapped to do comparative studies and evaluations of different algorithms to select the best for the application at hand.
-Installing prog_algs
+Installing progpy
-----------------------
.. tabs::
.. tab:: Stable Version (Recommended)
- The latest stable release of prog_algs is hosted on PyPi. For most users (unless you want to contribute to the development of `prog_algs`), this version will be adequate. To install from the command line, use the following command:
+ The latest stable release of ProgPy is hosted on PyPi. For most users, this version will be adequate. To install via the command line, use the following command:
.. code-block:: console
- $ pip install prog_algs
+ $ pip install progpy
.. tab:: Pre-Release
- Users who would like to contribute to prog_algs or would like to use pre-release features can do so using the `prog_algs GitHub repo `__. This isn't recommended for most users as this version may be unstable. To do this, use the following commands:
+ Users who would like to contribute to progpy or would like to use pre-release features can do so using the `progpy GitHub repo `__. This isn't recommended for most users as this version may be unstable. To do this, use the following commands:
.. code-block:: console
- $ git clone https://github.com/nasa/prog_algs
- $ cd prog_algs
+ $ git clone https://github.com/nasa/progpy
+ $ cd progpy
$ git checkout dev
$ pip install -e .
@@ -44,16 +44,16 @@ The structure of the packages is illustrated below:
.. image:: images/package_structure.png
-Prognostics is performed using `State Estimators `__ and `Predictors `__. State Estimators are resposible for estimating the current state of the modeled system using sensor data and a prognostics model (see: `prog_models package `__). The state estimator then produces an estimate of the system state with uncertainty in the form of an `uncertain data object `__. This state estimate is used by the predictor to predict when events will occur (Time of Event, ToE - returned as an `uncertain data object `__), and future system states (returned as a `Prediction object `__).
+Prognostics is performed using :ref:`State Estimators ` and :ref:`Predictors `. State Estimators are resposible for estimating the current state of the modeled system using sensor data and a prognostics model (see: :ref:`Modeling and Simulation Guide `). The state estimator then produces an estimate of the system state with uncertainty in the form of an :ref:`uncertain data object `. This state estimate is used by the predictor to predict when events will occur (Time of Event, ToE - returned as an :ref:`uncertain data object `), and future system states (returned as a :ref:`Prediction object `).
Data Structures
***************
A few custom data structures are available for storing and manipulating prognostics data of various forms. These structures are listed below and desribed on their respective pages:
- * :py:class:`prog_models.sim_result.SimResult` : The result of a single simulation (without uncertainty). Can be used to store inputs, outputs, states, event_states, observables, etc. Is returned by the model.simulate_to* methods.
- * :py:class:`prog_algs.uncertain_data.UncertainData`: Used throughout the package to represent data with uncertainty. There are a variety of subclasses of UncertainData to represent data with uncertainty in different forms (e.g., :py:class:`prog_algs.uncertain_data.ScalarData`, :py:class:`prog_algs.uncertain_data.MultivariateNormalDist`, :py:class:`prog_algs.uncertain_data.UnweightedSamples`). Notably, this is used to represent the output of a StateEstimator's `estimate` method, individual snapshots of a prediction, and the time of event estimate from a predictor's `predict` method.
- * :py:class:`prog_algs.predictors.Prediction`: Prediction of future values (with uncertainty) of some variable (e.g., :term:`input`, :term:`state`, :term:`output`, :term:`event state`, etc.). The `predict` method of predictors return this.
- * :py:class:`prog_algs.predictors.ToEPredictionProfile` : The time of prediction estimates from multiple predictions. This data structure can be treated as a dictionary of time of prediction to toe prediction.
+ * :py:class:`progpy.sim_result.SimResult` : The result of a single simulation (without uncertainty). Can be used to store inputs, outputs, states, event_states, observables, etc. Is returned by the model.simulate_to* methods.
+ * :py:class:`progpy.uncertain_data.UncertainData`: Used throughout the package to represent data with uncertainty. There are a variety of subclasses of UncertainData to represent data with uncertainty in different forms (e.g., :py:class:`progpy.uncertain_data.ScalarData`, :py:class:`progpy.uncertain_data.MultivariateNormalDist`, :py:class:`progpy.uncertain_data.UnweightedSamples`). Notably, this is used to represent the output of a StateEstimator's `estimate` method, individual snapshots of a prediction, and the time of event estimate from a predictor's `predict` method.
+ * :py:class:`progpy.predictors.Prediction`: Prediction of future values (with uncertainty) of some variable (e.g., :term:`input`, :term:`state`, :term:`output`, :term:`event state`, etc.). The `predict` method of predictors return this.
+ * :py:class:`progpy.predictors.ToEPredictionProfile` : The time of prediction estimates from multiple predictions. This data structure can be treated as a dictionary of time of prediction to toe prediction.
State Estimation
-----------------
@@ -66,27 +66,27 @@ The foundation of state estimators is the estimate method. The estimate method i
>>> estimator.estimate(time, inputs, outputs)
-The internal state is stored in the estimators x property as a UncertainData subclass (see `UncertainData `__). State is accessed like so :pythoncode:`x_est = estimator.x`.
+The internal state is stored in the estimators x property as a UncertainData subclass (see `UncertainData `__). State is accessed like so :pythoncode:`x_est = estimator.x`.
.. dropdown:: Included State Estimators
- ProgPy includes a number of state estimators in the *prog_algs.state_estimators* package. The most commonly used of these are highlighted below. See `State Estimators `__ for a full list of supported state estimators.
+ ProgPy includes a number of state estimators in the *progpy.state_estimators* package. The most commonly used of these are highlighted below. See `State Estimators `__ for a full list of supported state estimators.
- * **Unscented Kalman Filter (UKF)**: A type of kalman filter for non-linear models where the state distribution is represented by a set of sigma points, calculated by an unscented tranform. Sigma points are propogated forward and then compared with the measurement to update the distribution. The resulting state is represented by a :py:class:`prog_algs.uncertain_data.MultivariateNormalDist`. By it's nature, UKFs are much faster than Particle Filters, but they fit the data to a normal distribution, resulting in some loss of information.
- * **Particle Filter (PF)**: A sample-based state estimation algorithm, where the distribution of likely states is represented by a set of unweighted samples. These samples are propagated forward and then weighted according to the likelihood of the measurement (given those samples) to update the distribution. The resulting state is represented by a :py:class:`prog_algs.uncertain_data.UnweightedSamples`. By its nature, PF is more accurate than a UKF, but much slower. Full accuracy of PF can be adjusted by increasing or decreasing the number of samples
- * **Kalman Filter (KF)**: A Simple efficient Kalman Filter for linear systems where state is represented by a mean and covariance matrix. The resulting state is represented by a :py:class:`prog_algs.uncertain_data.MultivariateNormalDist`. Only works with Prognostic Models inheriting from :py:class:`prog_models.LinearModel`.
+ * **Unscented Kalman Filter (UKF)**: A type of kalman filter for non-linear models where the state distribution is represented by a set of sigma points, calculated by an unscented tranform. Sigma points are propogated forward and then compared with the measurement to update the distribution. The resulting state is represented by a :py:class:`progpy.uncertain_data.MultivariateNormalDist`. By it's nature, UKFs are much faster than Particle Filters, but they fit the data to a normal distribution, resulting in some loss of information.
+ * **Particle Filter (PF)**: A sample-based state estimation algorithm, where the distribution of likely states is represented by a set of unweighted samples. These samples are propagated forward and then weighted according to the likelihood of the measurement (given those samples) to update the distribution. The resulting state is represented by a :py:class:`progpy.uncertain_data.UnweightedSamples`. By its nature, PF is more accurate than a UKF, but much slower. Full accuracy of PF can be adjusted by increasing or decreasing the number of samples
+ * **Kalman Filter (KF)**: A Simple efficient Kalman Filter for linear systems where state is represented by a mean and covariance matrix. The resulting state is represented by a :py:class:`progpy.uncertain_data.MultivariateNormalDist`. Only works with Prognostic Models inheriting from :py:class:`progpy.LinearModel`.
.. dropdown:: UKF Details
- .. autoclass:: prog_algs.state_estimators.UnscentedKalmanFilter
+ .. autoclass:: progpy.state_estimators.UnscentedKalmanFilter
.. dropdown:: PF Details
- .. autoclass:: prog_algs.state_estimators.ParticleFilter
+ .. autoclass:: progpy.state_estimators.ParticleFilter
.. dropdown:: KF Details
- .. autoclass:: prog_algs.state_estimators.ParticleFilter
+ .. autoclass:: progpy.state_estimators.KalmanFilter
.. dropdown:: Example
@@ -110,14 +110,14 @@ The internal state is stored in the estimators x property as a UncertainData sub
Extending
************
-New :term:`state estimator` are created by extending the :class:`prog_algs.state_estimators.StateEstimator` class.
+New :term:`state estimator` are created by extending the :class:`progpy.state_estimators.StateEstimator` class.
-See :download:`examples.new_state_estimator_example <../../prog_algs/examples/new_state_estimator_example.py>` for an example of this approach.
+See :download:`examples.new_state_estimator_example <../../progpy/examples/new_state_estimator_example.py>` for an example of this approach.
Example
^^^^^^^^^^^
-* :download:`examples.new_state_estimator_example <../../prog_algs/examples/new_state_estimator_example.py>`
+* :download:`examples.new_state_estimator_example <../../progpy/examples/new_state_estimator_example.py>`
.. automodule:: new_state_estimator_example
Prediction
@@ -135,39 +135,39 @@ A predictors ``predict`` method is used to perform prediction, generally defined
result = predictor.predict(x0, future_loading, **config)
-Where x0 is the initial state as an UncertainData object (often the output of state estimation), future_loading is a function defining future loading as a function of state and time, and config is a dictionary of any additional configuration parameters, specific to the predictor being used. See `Predictors `__ for options available for each predictor
+Where x0 is the initial state as an UncertainData object (often the output of state estimation), future_loading is a function defining future loading as a function of state and time, and config is a dictionary of any additional configuration parameters, specific to the predictor being used. See `Predictors `__ for options available for each predictor
The result of the predict method is a named tuple with the following members:
* **times**: array of times for each savepoint such that times[i] corresponds to inputs.snapshot(i)
-* **inputs**: :py:class:`prog_algs.predictors.Prediction` object containing inputs used to perform prediction such that inputs.snapshot(i) corresponds to times[i]
-* **outputs**: :py:class:`prog_algs.predictors.Prediction` object containing predicted outputs at each savepoint such that outputs.snapshot(i) corresponds to times[i]
-* **event_states**: :py:class:`prog_algs.predictors.Prediction` object containing predicted event states at each savepoint such that event_states.snapshot(i) corresponds to times[i]
-* **time_of_event**: :py:class:`prog_algs.uncertain_data.UncertainData` object containing the predicted Time of Event (ToE) for each event. Additionally, final state at time of event is saved at time_of_event.final_state -> :py:class:`prog_algs.uncertain_data.UncertainData` for each event
+* **inputs**: :py:class:`progpy.predictors.Prediction` object containing inputs used to perform prediction such that inputs.snapshot(i) corresponds to times[i]
+* **outputs**: :py:class:`progpy.predictors.Prediction` object containing predicted outputs at each savepoint such that outputs.snapshot(i) corresponds to times[i]
+* **event_states**: :py:class:`progpy.predictors.Prediction` object containing predicted event states at each savepoint such that event_states.snapshot(i) corresponds to times[i]
+* **time_of_event**: :py:class:`progpy.uncertain_data.UncertainData` object containing the predicted Time of Event (ToE) for each event. Additionally, final state at time of event is saved at time_of_event.final_state -> :py:class:`progpy.uncertain_data.UncertainData` for each event
The stepsize and times at which results are saved can be defined like in a simulation. See `Simulation `__.
.. dropdown:: Included Predictors
- ProgPy includes a number of predictors in the *prog_algs.predictors* package. The most commonly used of these are highlighted below. See `Predictors `__ for a full list of supported predictors.
+ ProgPy includes a number of predictors in the *progpy.predictors* package. The most commonly used of these are highlighted below. See `Predictors `__ for a full list of supported predictors.
- * **Unscented Transform (UT)**: A type of predictor for non-linear models where the state distribution is represented by a set of sigma points, calculated by an unscented tranform. Sigma points are propogated forward with time until the pass the threshold. The times at which each sigma point passes the threshold are converted to a distribution of time of event. The predicted future states and time of event are represented by a :py:class:`prog_algs.uncertain_data.MultivariateNormalDist`. By it's nature, UTs are much faster than MCs, but they fit the data to a normal distribution, resulting in some loss of information.
- * **Monte Carlo (MC)**: A sample-based prediction algorithm, where the distribution of likely states is represented by a set of unweighted samples. These samples are propagated forward with time. By its nature, MC is more accurate than a PF, but much slower. The predicted future states and time of event are represented by a :py:class:`prog_algs.uncertain_data.UnweightedSamples`. Full accuracy of MC can be adjusted by increasing or decreasing the number of samples
+ * **Unscented Transform (UT)**: A type of predictor for non-linear models where the state distribution is represented by a set of sigma points, calculated by an unscented tranform. Sigma points are propogated forward with time until the pass the threshold. The times at which each sigma point passes the threshold are converted to a distribution of time of event. The predicted future states and time of event are represented by a :py:class:`progpy.uncertain_data.MultivariateNormalDist`. By it's nature, UTs are much faster than MCs, but they fit the data to a normal distribution, resulting in some loss of information.
+ * **Monte Carlo (MC)**: A sample-based prediction algorithm, where the distribution of likely states is represented by a set of unweighted samples. These samples are propagated forward with time. By its nature, MC is more accurate than a PF, but much slower. The predicted future states and time of event are represented by a :py:class:`progpy.uncertain_data.UnweightedSamples`. Full accuracy of MC can be adjusted by increasing or decreasing the number of samples
.. dropdown:: UT Details
- .. autoclass:: prog_algs.predictors.UnscentedTransformPredictor
+ .. autoclass:: progpy.predictors.UnscentedTransformPredictor
.. dropdown:: MC Details
- .. autoclass:: prog_algs.predictors.MonteCarlo
+ .. autoclass:: progpy.predictors.MonteCarlo
- .. autoclass:: prog_algs.predictors.MonteCarloPredictor
+ .. autoclass:: progpy.predictors.MonteCarloPredictor
Extending
*************
-New :term:`predictor` are created by extending the :class:`prog_algs.predictors.Predictor` class.
+New :term:`predictor` are created by extending the :class:`progpy.predictors.Predictor` class.
Analyzing Results
@@ -176,7 +176,7 @@ Analyzing Results
State Estimation
*******************
-The results of the state estimation are stored in an object of type :class:`prog_algs.uncertain_data.UncertainData`. This class contains a number of methods for analyzing a state estimate. This includes methods for obtaining statistics about the distribution, including the following:
+The results of the state estimation are stored in an object of type :class:`progpy.uncertain_data.UncertainData`. This class contains a number of methods for analyzing a state estimate. This includes methods for obtaining statistics about the distribution, including the following:
* **mean**: The mean value of the state estimate distribution.
* **median**: The median value of the state estimate distribution.
@@ -233,17 +233,19 @@ There are also a number of figures available to describe a state estimate, descr
.. image:: images/histogram.png
+ :width: 70 %
+ :align: center
.. raw:: html
-See :class:`prog_algs.uncertain_data.UncertainData` documentation for more details.
+See :class:`progpy.uncertain_data.UncertainData` documentation for more details.
Predicted Future States
**************************
-Predicted future states, inputs, outputs, and event states come in the form of a :class:`prog_algs.predictors.Prediction` object. Predictions store distributions of predicted future values at multiple future times. Predictions contain a number of tools for analyzing the results, some of which are described below:
+Predicted future states, inputs, outputs, and event states come in the form of a :class:`progpy.predictors.Prediction` object. Predictions store distributions of predicted future values at multiple future times. Predictions contain a number of tools for analyzing the results, some of which are described below:
* **mean**: Estimate the mean value at each time. The result is a list of dictionaries such that prediction.mean[i] corresponds to times[i]
* **monotonicity**: Given a single prediction, for each event: go through all predicted states and compare those to the next one.
@@ -253,7 +255,7 @@ Predicted future states, inputs, outputs, and event states come in the form of a
Time of Event (ToE)
**************************
-Time of Event is also stored as an object of type :class:`prog_algs.uncertain_data.UncertainData`, so the analysis functions described in :ref:`State Estimation` are also available for a ToE estimate. See :ref:`State Estimation` or :class:`prog_algs.uncertain_data.UncertainData` documentation for details.
+Time of Event is also stored as an object of type :class:`progpy.uncertain_data.UncertainData`, so the analysis functions described in :ref:`State Estimation` are also available for a ToE estimate. See :ref:`State Estimation` or :class:`progpy.uncertain_data.UncertainData` documentation for details.
In addition to these standard UncertainData metrics, Probability of Success (PoS) is an important metric for prognostics. Probability of Success is the probability that a event will not occur before a defined time. For example, in aeronautics, PoS might be the probability that no failure will occur before end of mission.
@@ -267,7 +269,7 @@ Below is an example calculating probability of success:
ToE Prediction Profile
**************************
-A :class:`prog_algs.predictors.ToEPredictionProfile` contains Time of Event (ToE) predictions performed at multiple points. ToEPredictionProfile is frequently used to evaluate the prognostic quality for a given prognostic solution. It contains a number of methods to help with this, including:
+A :class:`progpy.predictors.ToEPredictionProfile` contains Time of Event (ToE) predictions performed at multiple points. ToEPredictionProfile is frequently used to evaluate the prognostic quality for a given prognostic solution. It contains a number of methods to help with this, including:
* **alpha_lambda**: Whether the prediction falls within specified limits at particular times with respect to a performance measure [#Goebel2017]_ [#Saxena2010]_
* **cumulate_relative_accuracy**: The sum of the relative accuracies of each prediction, given a ground truth
@@ -284,41 +286,38 @@ Examples
----------
.. image:: https://mybinder.org/badge_logo.svg
- :target: https://mybinder.org/v2/gh/nasa/prog_algs/master?labpath=tutorial.ipynb
+ :target: https://mybinder.org/v2/gh/nasa/progpy/master?labpath=tutorial.ipynb
-The best way to learn how to use `prog_algs` is through the `tutorial `__. There are also a number of examples which show different aspects of the package, summarized and linked below:
+The best way to learn how to use `progpy` is through the `tutorial `__. There are also a number of examples which show different aspects of the package, summarized and linked below:
-* :download:`examples.basic_example <../../prog_algs/examples/basic_example.py>`
- .. automodule:: basic_example
-
-* :download:`examples.basic_example_battery <../../prog_algs/examples/basic_example_battery.py>`
+* :download:`examples.basic_example_battery <../../progpy/examples/basic_example_battery.py>`
.. automodule:: basic_example_battery
-.. * :download:`examples.benchmarking_example <../../prog_algs/examples/benchmarking_example.py>`
+.. * :download:`examples.benchmarking_example <../../progpy/examples/benchmarking_example.py>`
.. .. automodule:: benchmarking_example
-* :download:`examples.eol_event <../../prog_algs/examples/eol_event.py>`
+* :download:`examples.eol_event <../../progpy/examples/eol_event.py>`
.. automodule:: eol_event
-* :download:`examples.new_state_estimator_example <../../prog_algs/examples/new_state_estimator_example.py>`
+* :download:`examples.new_state_estimator_example <../../progpy/examples/new_state_estimator_example.py>`
.. automodule:: new_state_estimator_example
-* :download:`examples.horizon <../../prog_algs/examples/horizon.py>`
+* :download:`examples.horizon <../../progpy/examples/horizon.py>`
.. automodule:: horizon
-* :download:`examples.kalman_filter <../../prog_algs/examples/kalman_filter.py>`
+* :download:`examples.kalman_filter <../../progpy/examples/kalman_filter.py>`
.. automodule:: kalman_filter
-* :download:`examples.measurement_eqn_example <../../prog_algs/examples/measurement_eqn_example.py>`
+* :download:`examples.measurement_eqn_example <../../progpy/examples/measurement_eqn_example.py>`
.. automodule:: measurement_eqn_example
-* :download:`examples.playback <../../prog_algs/examples/playback.py>`
+* :download:`examples.playback <../../progpy/examples/playback.py>`
.. automodule:: playback
-* :download:`examples.predict_specific_event <../../prog_algs/examples/predict_specific_event.py>`
+* :download:`examples.predict_specific_event <../../progpy/examples/predict_specific_event.py>`
.. automodule:: predict_specific_event
-* :download:`examples.particle_filter_battery_example <../../prog_algs/examples/particle_filter_battery_example.py>`
+* :download:`examples.particle_filter_battery_example <../../progpy/examples/particle_filter_battery_example.py>`
.. automodule:: particle_filter_battery_example
References
diff --git a/docs/_sources/prog_models_guide.rst b/docs/_sources/prog_models_guide.rst
index b0e8ef2..57625ee 100644
--- a/docs/_sources/prog_models_guide.rst
+++ b/docs/_sources/prog_models_guide.rst
@@ -7,29 +7,7 @@ Modeling and Sim Guide
The Prognostics Python Package (ProgPy) includes tools for defining, building, using, and testing models for :term:`prognostics` of engineering systems. It also provides a set of prognostics models for select components developed within this framework, suitable for use in prognostics applications for these components and can be used in conjunction with the state estimation and prediction features (see :ref:`State Estimation and Prediction Guide`) to perform research in prognostics methods.
-Installing progpy
------------------------
-
-.. tabs::
-
- .. tab:: Stable Version (Recommended)
-
- The latest stable release of ProgPy is hosted on PyPi. For most users, this version will be adequate. To install via the command line, use the following command:
-
- .. code-block:: console
-
- $ pip install progpy
-
- .. tab:: Pre-Release
-
- Users who would like to contribute to progpy or would like to use pre-release features can do so using the `progpy GitHub repo `__. This isn't recommended for most users as this version may be unstable. To do this, use the following commands:
-
- .. code-block:: console
-
- $ git clone https://github.com/nasa/progpy
- $ cd progpy
- $ git checkout dev
- $ pip install -e .
+.. include:: installing.rst
Getting Started
------------------
@@ -63,6 +41,7 @@ States are transitioned forward in time using the state transition equation.
.. raw:: html
+
:math:`x(t+dt) = f(t, x(t), u(t), dt, \Theta)`
.. raw:: html
@@ -119,6 +98,7 @@ Outputs are a function of only the system state (x) and :term:`parameters` (:mat
.. raw:: html
+
:math:`z(t) = f(x(t), \Theta)`
.. raw:: html
@@ -233,8 +213,7 @@ The specific parameters are very specific to the system being modeled. For examp
Sometimes users would like to specify parameters as a function of other parameters. This feature is called "derived parameters". See example below for more details on this feature.
- * :download:`examples.derived_params <../../progpy/examples/derived_params.py>`
- .. automodule:: derived_params
+ * :download:`04 New Models <../../progpy/examples/04_New Models.ipynb>`
Noise
^^^^^^^^^^^^^^^^^^^^^^^
@@ -262,10 +241,10 @@ See example below for details on how to configure proccess and measurement noise
* :download:`examples.noise <../../progpy/examples/noise.py>`
.. automodule:: noise
-:term:`Future Loading `
+Future Loading
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Future loading is an essential part of prediction and simulation. In order to simulate forward in time, you must have an estimate of how the system will be used (i.e., loaded) during the window of time that the system is simulated. Future load is essentially expected :ref:`Inputs` at future times.
+:term:`Future Loading ` is an essential part of prediction and simulation. In order to simulate forward in time, you must have an estimate of how the system will be used (i.e., loaded) during the window of time that the system is simulated. Future load is essentially expected :ref:`Inputs` at future times.
Future loading is provided by the user either using the predifined loading classes in `progpy.loading`, or as a function of time and optional state. For example:
@@ -277,13 +256,12 @@ Future loading is provided by the user either using the predifined loading class
See example below for details on how to provide future loading information in ProgPy.
-* :download:`examples.future_loading <../../progpy/examples/future_loading.py>`
- .. automodule:: future_loading
+* :download:`01. Simulation <../../progpy/examples/01_Simulation.ipynb>`
General Notes
^^^^^^^^^^^^^^^^
-Users of ProgPy will need a model describing the behavior of the system of interest. Users will likely either use one of the models distribued with ProgPy (see `Included Models `__), configuring it to their own system using parameter estimation (see :download:`examples.param_est <../../progpy/examples/param_est.ipynb>`), use a :term:`data-driven model` class to learn system behavior from data, or build their own model (see `Building New Models`_ section, below).
+Users of ProgPy will need a model describing the behavior of the system of interest. Users will likely either use one of the models distribued with ProgPy (see `Included Models `__), configuring it to their own system using parameter estimation (see :download:`02 Parameter Estimation <../../progpy/examples/02_Parameter Estimation.ipynb>`), use a :term:`data-driven model` class to learn system behavior from data, or build their own model (see `Building New Models`_ section, below).
Building New Models
----------------------
@@ -301,21 +279,7 @@ State-transition Models
For simple linear models, users can choose to subclass the simpler :py:class:`progpy.LinearModel` class, as illustrated in the second example. Some methods and algorithms only function on linear models.
- * :download:`examples.new_model <../../progpy/examples/new_model.py>`
- .. automodule:: new_model
-
- * :download:`examples.linear_model <../../progpy/examples/linear_model.ipynb>`
-
- .. dropdown:: Advanced features in model building
-
- * :download:`examples.derived_params <../../progpy/examples/derived_params.py>`
- .. automodule:: derived_params
-
- * :download:`examples.state_limits <../../progpy/examples/state_limits.py>`
- .. automodule:: state_limits
-
- * :download:`examples.events <../../progpy/examples/events.py>`
- .. automodule:: events
+ * :download:`04. New Models <../../progpy/examples/04_New Models.ipynb>`
.. tab:: data-driven
@@ -323,6 +287,15 @@ State-transition Models
The :py:func:`progpy.data_models.DataModel.from_data` and :py:func:`progpy.data_models.DataModel.from_model` methods are used to construct new models from data or an existing model (i.e., :term:`surrogate`), respectively. The use of these is demonstrated in the following examples.
+ .. note::
+ To use a data-driven model distributed with progpy you need to install the data-driven dependencies.
+
+ .. code-block:: console
+
+ $ pip install progpy[datadriven]
+
+ * :download:`05_Data Driven <../../progpy/examples/05_Data Driven.ipynb>`
+
* :download:`examples.lstm_model <../../progpy/examples/lstm_model.py>`
.. automodule:: lstm_model
@@ -432,8 +405,7 @@ One of the most basic of functions using a model is simulation. Simulation is th
For more details on dynamic step sizes, see the following example:
- * :download:`examples.dynamic_step_size <../../progpy/examples/dynamic_step_size.py>`
- .. automodule:: dynamic_step_size
+ * :download:`01 Simulation <../../progpy/examples/01_Simulation.ipynb>`
.. dropdown:: Integration Methods
@@ -475,9 +447,6 @@ Use of simulation is described further in the following examples:
* :download:`examples.noise <../../progpy/examples/noise.py>`
.. automodule:: noise
-* :download:`examples.future_loading <../../progpy/examples/future_loading.py>`
- .. automodule:: future_loading
-
Parameter Estimation
----------------------------
@@ -494,8 +463,6 @@ Generally, parameter estimation is done by tuning the parameters of the model so
See the example below for more details
-* :download:`examples.param_est <../../progpy/examples/param_est.ipynb>`
-
.. admonition:: Note
:class: tip
@@ -519,6 +486,10 @@ Combination Models
There are two methods in progpy through which multiple models can be combined and used together: composite models and ensemble models, described below.
+For more details, see:
+
+ * :download:`06. Combining Models <../../progpy/examples/06_Combining Models.ipynb>`
+
.. tabs::
.. tab:: Composite models
@@ -535,10 +506,6 @@ There are two methods in progpy through which multiple models can be combined an
>>> ]
>>> )
- For more information, see the example below:
-
- * :download:`examples.composite_model <../../progpy/examples/composite_model.py>`
-
.. tab:: Ensemble models
Unlike composite models which model a system of systems, ensemble models are used when to combine the logic of multiple models which describe the same system. This is used when there are multiple models representing different system behaviors or conditions. The results of each model are aggregated in a way that can be defined by the user. For example,
@@ -550,10 +517,6 @@ There are two methods in progpy through which multiple models can be combined an
>>> aggregator = np.mean
>>> )
- For more information, see the example below:
-
- * :download:`examples.ensemble <../../progpy/examples/ensemble.py>`
-
.. tab:: MixtureOfExperts models
Mixture of Experts (MoE) models combine multiple models of the same system, similar to Ensemble models. Unlike Ensemble Models, the aggregation is done by selecting the "best" model. That is the model that has performed the best over the past. Each model will have a 'score' that is tracked in the state, and this determines which model is best.
@@ -562,10 +525,6 @@ There are two methods in progpy through which multiple models can be combined an
>> m = MixtureOfExpertsModel([model1, model2])
- For more information, see the example below:
-
- * :download:`examples.mixture_of_experts <../../progpy/examples/mixture_of_experts.py>`
-
Other Examples
----------------------------
diff --git a/docs/_sources/prog_models_guide.rst.txt b/docs/_sources/prog_models_guide.rst.txt
index fd9152c..b0e8ef2 100644
--- a/docs/_sources/prog_models_guide.rst.txt
+++ b/docs/_sources/prog_models_guide.rst.txt
@@ -1,33 +1,33 @@
-prog_models Guide
+Modeling and Sim Guide
===================================================
.. raw:: html
-
+
-The Prognostics Models Package (prog_models) is a Python framework for defining, building, using, and testing models for :term:`prognostics` of engineering systems. It also provides a set of prognostics models for select components developed within this framework, suitable for use in prognostics applications for these components and can be used in conjunction with the Prognostics Algorithms Package (:ref:`prog_algs`) to perform research in prognostics methods.
+The Prognostics Python Package (ProgPy) includes tools for defining, building, using, and testing models for :term:`prognostics` of engineering systems. It also provides a set of prognostics models for select components developed within this framework, suitable for use in prognostics applications for these components and can be used in conjunction with the state estimation and prediction features (see :ref:`State Estimation and Prediction Guide`) to perform research in prognostics methods.
-Installing prog_models
+Installing progpy
-----------------------
.. tabs::
.. tab:: Stable Version (Recommended)
- The latest stable release of prog_models is hosted on PyPi. For most users (unless you want to contribute to the development of prog_models), the version on PyPi will be adequate. To install from the command line, use the following command:
+ The latest stable release of ProgPy is hosted on PyPi. For most users, this version will be adequate. To install via the command line, use the following command:
.. code-block:: console
- $ pip install prog_models
+ $ pip install progpy
.. tab:: Pre-Release
- Users who would like to contribute to prog_models or would like to use pre-release features can do so using the `prog_models GitHub repo `__. This isn't recommended for most users as this version may be unstable. To do this, use the following commands:
+ Users who would like to contribute to progpy or would like to use pre-release features can do so using the `progpy GitHub repo `__. This isn't recommended for most users as this version may be unstable. To do this, use the following commands:
.. code-block:: console
- $ git clone https://github.com/nasa/prog_models
- $ cd prog_models
+ $ git clone https://github.com/nasa/progpy
+ $ cd progpy
$ git checkout dev
$ pip install -e .
@@ -35,9 +35,9 @@ Getting Started
------------------
.. image:: https://mybinder.org/badge_logo.svg
- :target: https://mybinder.org/v2/gh/nasa/prog_models/master?labpath=tutorial.ipynb
+ :target: https://mybinder.org/v2/gh/nasa/progpy/master?labpath=tutorial.ipynb
-The best way to learn how to use prog_models is through the `tutorial `__. There are also a number of examples that show different aspects of the package, summarized and linked in the below sections
+The best way to learn how to use progpy is through the `tutorial `__. There are also a number of examples that show different aspects of the package, summarized and linked in the below sections
ProgPy Prognostic Model Format
----------------------------------
@@ -49,7 +49,7 @@ Inputs
Prognostic model :term:`inputs` are how a system is loaded. These are things that can be controlled, and affect how the system state evolves. The expected inputs for a model are defined by its *inputs* property. For example, a battery is loaded by applying a current, so the only input is *i*, the applied current. Inputs are also sometimes environmental conditions, such as ambient temperature or pressure.
-Inputs are one of the inputs to the state transition model, described in :ref:`States`
+Inputs are one of the inputs to the state transition model, described in :ref:`States` .
States
^^^^^^^^^^^^^^^^^^^^
@@ -69,13 +69,17 @@ States are transitioned forward in time using the state transition equation.
-where :math:`x(t)` is :term:`state`, at time :math:`t`, :math:`u(t)` is :term:`input` at time :math:`t`, :math:`dt` is the stepsize, and :math:`\Theta` are the model :term:`parameters`.
+where :math:`x(t)` is :term:`state` at time :math:`t`, :math:`u(t)` is :term:`input` at time :math:`t` , :math:`dt` is the stepsize, and :math:`\Theta` are the model :term:`parameters` .
-In a ProgPy model, this state transition can be represented one of two ways, either discrete or continuous, depending on the nature of state transition. In the case of continuous models, state transition behavior is defined by defining the first derivative, using the :py:func:`prog_models.PrognosticsModel.dx` method. For discrete models, state transition behavior is defined using the :py:func:`prog_models.PrognosticsModel.next_state` method. The continuous state transition behavior is recommended, because defining the first derivative enables some approaches that rely on that information.
+In a ProgPy model, this state transition can be represented one of two ways, either discrete or continuous, depending on the nature of state transition. In the case of continuous models, state transition behavior is defined by defining the first derivative, using the :py:func:`progpy.PrognosticsModel.dx` method. For discrete models, state transition behavior is defined using the :py:func:`progpy.PrognosticsModel.next_state` method. The continuous state transition behavior is recommended, because defining the first derivative enables some approaches that rely on that information.
.. image:: images/next_state.png
+ :width: 70 %
+ :align: center
.. image:: images/dx.png
+ :width: 70 %
+ :align: center
.. dropdown:: State transition equation example
@@ -106,9 +110,11 @@ Output (Measurements)
The next important part of a prognostic model is the outputs. Outputs are measurable quantities of a system that are a function of system state. When applied in prognostics, generally the outputs are what is being measured or observed in some way. State estimators use the different between predicted and measured values of these outputs to estimate the system state.
-Outputs are a function of only the system state (x) and :term:`parameters` (:math:`\Theta`), as described below. The expected outputs for a model are defined by its *outputs* property. The logic of calculating outputs from system state is provided by the user in the model :py:func:`prog_models.PrognosticsModel.output` method.
+Outputs are a function of only the system state (x) and :term:`parameters` (:math:`\Theta`), as described below. The expected outputs for a model are defined by its *outputs* property. The logic of calculating outputs from system state is provided by the user in the model :py:func:`progpy.PrognosticsModel.output` method.
.. image:: images/output.png
+ :width: 70 %
+ :align: center
.. raw:: html
@@ -137,12 +143,13 @@ Traditionally users may have heard the prognostic problem as estimating the Rema
Additionally, events can be used to predict other events of interest beyond failure, such as special system states or warning thresholds. For example, the above battery model might also have an warning event for when battery capacity reaches 50% of the original capacity because of battery aging with use.
-The expected events for a model are defined by its *events* property. The logic of events can be defined in two methods: :py:func:`prog_models.PrognosticsModel.threshold_met` and :py:func:`prog_models.PrognosticsModel.event_state`.
+The expected events for a model are defined by its *events* property. The logic of events can be defined in two methods: :py:func:`progpy.PrognosticsModel.threshold_met` and :py:func:`progpy.PrognosticsModel.event_state`.
-:term:`Thresholds` are the conditions under which an event occurs. The logic of the threshold is defined in the :py:func:`prog_models.PrognosticsModel.threshold_met` method. This method returns boolean for each event specifying if the event has occured.
+:term:`Thresholds` are the conditions under which an event occurs. The logic of the threshold is defined in the :py:func:`progpy.PrognosticsModel.threshold_met` method. This method returns boolean for each event specifying if the event has occured.
.. image:: images/threshold_met.png
-
+ :width: 70 %
+ :align: center
.. raw:: html
@@ -154,9 +161,11 @@ The expected events for a model are defined by its *events* property. The logic
-:term:`Event states` are an estimate of the progress towards a threshold. Where thresholds are boolean, event states are a number between 0 and 1, where 0 means the event has occured, 1 means no progress towards an event. Event states are a generalization of State of Health (SOH) for systems with multiple events and non-failure events. The logic of the event states is defined in the :py:func:`prog_models.PrognosticsModel.event_state` method.
+:term:`Event states` are an estimate of the progress towards a threshold. Where thresholds are boolean, event states are a number between 0 and 1, where 0 means the event has occured, 1 means no progress towards the event. Event states are a generalization of State of Health (SOH) for systems with multiple events and non-failure events. The logic of the event states is defined in :py:func:`progpy.PrognosticsModel.event_state`.
.. image:: images/event_state.png
+ :width: 70 %
+ :align: center
.. raw:: html
@@ -205,17 +214,17 @@ Parameters
Parameters are used to configure the behavior of a model. For parameterized :term:`physics-based` models, parameters are used to configure the general system to match the behavior of the specific system. For example, parameters of the general battery model can be used to configure the model to describe the behavior of a specific battery.
-Models define a ``default_parameters`` property, that are the default parameters for that model. After construction, the parameters for a specific model can be accessed using the *parameters* property. For example, for a model `m`
+Models define a ``default_parameters`` property- the default parameters for that model. After construction, the parameters for a specific model can be accessed using the *parameters* property. For example, for a model `m`
.. code-block:: python
>>> print(m.parameters)
-Parameters can be set one of three ways: in model construction, using the *parameters* property after construction, or using Parameter Estimation feature (See :ref:`Parameter Estimation`). The first two are illustrated below:
+Parameters can be set in model construction, using the *parameters* property after construction, or using Parameter Estimation feature (See :ref:`Parameter Estimation`). The first two are illustrated below:
.. code-block:: python
- >>> m = SomeModel(some_parameter = 10.2, some_other_parameter = 2.5)
+ >>> m = SomeModel(some_parameter=10.2, some_other_parameter=2.5)
>>> m.parameters['some_parameter'] = 11.2 # Overriding parameter
The specific parameters are very specific to the system being modeled. For example, a battery might have parameters for the capacity and internal resistance. When using provided models, see the documentation for that model for details on parameters supported.
@@ -224,13 +233,13 @@ The specific parameters are very specific to the system being modeled. For examp
Sometimes users would like to specify parameters as a function of other parameters. This feature is called "derived parameters". See example below for more details on this feature.
- * :download:`examples.derived_params <../../prog_models/examples/derived_params.py>`
- .. automodule:: derived_params
+ * :download:`examples.derived_params <../../progpy/examples/derived_params.py>`
+ .. automodule:: derived_params
Noise
-^^^^^^^^^^^
+^^^^^^^^^^^^^^^^^^^^^^^
-In practice, it is impossible to have absolute knowledge of future states due to uncertainties in the system. There is uncertainty in the estimates of the present state, future inputs, models, and prediction methods [#Goebel2017]_. This model-based prognostic approach incorporates this uncertainty in four forms: initial state uncertainty (:math:`x_0`), :term:`process noise`, :term:`measurement noise`, and :term:`future loading noise`.
+In practice, it is impossible to have absolute knowledge of future states due to uncertainties in the system. There is uncertainty in the estimates of the present state, future inputs, models, and prediction methods [Goebel2017]_. This model-based prognostic approach incorporates this uncertainty in four forms: initial state uncertainty (:math:`x_0`), :term:`process noise`, :term:`measurement noise`, and :term:`future loading noise`.
.. dropdown:: Process Noise
@@ -250,15 +259,15 @@ In practice, it is impossible to have absolute knowledge of future states due to
See example below for details on how to configure proccess and measurement noise in ProgPy
-* :download:`examples.noise <../../prog_models/examples/noise.py>`
+* :download:`examples.noise <../../progpy/examples/noise.py>`
.. automodule:: noise
-Future Loading
-^^^^^^^^^^^^^^^^^^
+:term:`Future Loading `
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Future loading is an essential part of prediction and simulation. In order to simulate forward in time, you must have an estimate of how the system will be used (i.e., loaded) during the window of time that the system is simulated. Future load is essentially expected :term:`inputs` (see :ref:`Inputs`) at future times.
+Future loading is an essential part of prediction and simulation. In order to simulate forward in time, you must have an estimate of how the system will be used (i.e., loaded) during the window of time that the system is simulated. Future load is essentially expected :ref:`Inputs` at future times.
-Future loading is provided to the user as a function of time and optional state. For example:
+Future loading is provided by the user either using the predifined loading classes in `progpy.loading`, or as a function of time and optional state. For example:
.. code-block:: python
@@ -268,13 +277,13 @@ Future loading is provided to the user as a function of time and optional state.
See example below for details on how to provide future loading information in ProgPy.
-* :download:`examples.future_loading <../../prog_models/examples/future_loading.py>`
+* :download:`examples.future_loading <../../progpy/examples/future_loading.py>`
.. automodule:: future_loading
General Notes
^^^^^^^^^^^^^^^^
-Users of ProgPy will need a model describing the behavior of the system of interest. Users will likely either use one of the models distribued with ProgPy (see `Included Models `__), configuring it to their own system using parameter estimation (see :download:`examples.param_est <../../prog_models/examples/param_est.py>`), use a :term:`data-driven model` class to learn system behavior from data, or build their own model (see `Building New Models`_ section, below).
+Users of ProgPy will need a model describing the behavior of the system of interest. Users will likely either use one of the models distribued with ProgPy (see `Included Models `__), configuring it to their own system using parameter estimation (see :download:`examples.param_est <../../progpy/examples/param_est.ipynb>`), use a :term:`data-driven model` class to learn system behavior from data, or build their own model (see `Building New Models`_ section, below).
Building New Models
----------------------
@@ -288,53 +297,55 @@ State-transition Models
.. tab:: physics-based
- New :term:`physics-based models` are constructed by subclassing :py:class:`prog_models.PrognosticsModel` as illustrated in the first example. To generate a new model, create a new class for your model that inherits from this class. Alternatively, you can copy the template :download:`prog_model_template.ProgModelTemplate <../../prog_models/prog_model_template.py>`, replacing the methods with logic defining your specific model. The analysis and simulation tools defined in :class:`prog_models.PrognosticsModel` will then work with your new model.
+ New :term:`physics-based models` are constructed by subclassing :py:class:`progpy.PrognosticsModel` as illustrated in the first example. To generate a new model, create a new class for your model that inherits from this class. Alternatively, you can copy the template :download:`prog_model_template.ProgModelTemplate <../../progpy/prog_model_template.py>`, replacing the methods with logic defining your specific model. The analysis and simulation tools defined in :class:`progpy.PrognosticsModel` will then work with your new model.
- For simple linear models, users can choose to subclass the simpler :py:class:`prog_models.LinearModel` class, as illustrated in the second example. Some methods and algorithms only function on linear models.
+ For simple linear models, users can choose to subclass the simpler :py:class:`progpy.LinearModel` class, as illustrated in the second example. Some methods and algorithms only function on linear models.
- * :download:`examples.new_model <../../prog_models/examples/new_model.py>`
+ * :download:`examples.new_model <../../progpy/examples/new_model.py>`
.. automodule:: new_model
- * :download:`examples.linear_model <../../prog_models/examples/linear_model.py>`
- .. automodule:: linear_model
+ * :download:`examples.linear_model <../../progpy/examples/linear_model.ipynb>`
.. dropdown:: Advanced features in model building
- * :download:`examples.derived_params <../../prog_models/examples/derived_params.py>`
+ * :download:`examples.derived_params <../../progpy/examples/derived_params.py>`
.. automodule:: derived_params
- * :download:`examples.state_limits <../../prog_models/examples/state_limits.py>`
+ * :download:`examples.state_limits <../../progpy/examples/state_limits.py>`
.. automodule:: state_limits
- * :download:`examples.events <../../prog_models/examples/events.py>`
+ * :download:`examples.events <../../progpy/examples/events.py>`
.. automodule:: events
.. tab:: data-driven
- New :term:`data-driven models`, such as those using neural networks, are created by subclassing the :py:class:`prog_models.data_models.DataModel` class, overriding the ``from_data`` method.
+ New :term:`data-driven models`, such as those using neural networks, are created by subclassing the :py:class:`progpy.data_models.DataModel` class, overriding the ``from_data`` method.
- The :py:func:`prog_models.data_models.DataModel.from_data` and :py:func:`prog_models.data_models.DataModel.from_model` methods are used to construct new models from data or an existing model (i.e., :term:`surrogate`), respectively. The use of these is demonstrated in the following examples.
+ The :py:func:`progpy.data_models.DataModel.from_data` and :py:func:`progpy.data_models.DataModel.from_model` methods are used to construct new models from data or an existing model (i.e., :term:`surrogate`), respectively. The use of these is demonstrated in the following examples.
- * :download:`examples.lstm_model <../../prog_models/examples/lstm_model.py>`
+ * :download:`examples.lstm_model <../../progpy/examples/lstm_model.py>`
.. automodule:: lstm_model
- * :download:`examples.full_lstm_model <../../prog_models/examples/full_lstm_model.py>`
+ * :download:`examples.full_lstm_model <../../progpy/examples/full_lstm_model.py>`
.. automodule:: full_lstm_model
+
+ * :download:`examples.pce <../../progpy/examples/pce.py>`
+ .. automodule:: pce
- * :download:`examples.generate_surrogate <../../prog_models/examples/generate_surrogate.py>`
+ * :download:`examples.generate_surrogate <../../progpy/examples/generate_surrogate.py>`
.. automodule:: generate_surrogate
.. dropdown:: Advanced features in data models
- * :download:`examples.custom_model <../../prog_models/examples/custom_model.py>`
+ * :download:`examples.custom_model <../../progpy/examples/custom_model.py>`
.. automodule:: custom_model
Direct-prediction models
^^^^^^^^^^^^^^^^^^^^^^^^^^^
-:term:`Direct-prediction models` are models that estimate :term:`time of event` directly from the current state and :term:`future load`, instead of being predicted through state transition. When models are pure direct-prediction models, future states cannot be predicted. See example below for more information.
+:term:`Direct-prediction models` are models that estimate :term:`time of event` directly from the current state and :term:`future load`, instead of being predicted through state transition. When models are pure direct-prediction models, future states cannot be predicted. See example below for more information.
-* :download:`examples.direct_model <../../prog_models/examples/direct_model.py>`
+* :download:`examples.direct_model <../../progpy/examples/direct_model.py>`
.. automodule:: direct_model
Using Data
@@ -342,9 +353,9 @@ Using Data
Wether you're using :term:`data-driven`, :term:`physics-based`, expert knowledge, or some hybrid approach, building and validating a model requires data. In the case of data-driven approaches, data is used to train and validate the model. In the case of physics-based, data is used to estimate parameters (see `Parameter Estimation`) and validate the model.
-ProgPy includes some example datasets. See `ProgPy Datasets `_ and the example below for details.
+ProgPy includes some example datasets. See `ProgPy Datasets `_ and the example below for details.
-* :download:`examples.dataset <../../prog_models/examples/dataset.py>`
+* :download:`examples.dataset <../../progpy/examples/dataset.py>`
.. automodule:: dataset
.. note:: To use the dataset feature, you must install the requests package.
@@ -352,39 +363,42 @@ ProgPy includes some example datasets. See `ProgPy Datasets `__. The examples below illustrate use of some of the models provided in the :py:mod:`prog_models.models` module.
+For details on the included models see `Included Models `__. The examples below illustrate use of some of the models provided in the :py:mod:`progpy.models` module.
-* :download:`examples.sim <../../prog_models/examples/sim.py>`
+* :download:`examples.sim <../../progpy/examples/sim.py>`
.. automodule:: sim
-* :download:`examples.sim_battery_eol <../../prog_models/examples/sim_battery_eol.py>`
+* :download:`examples.sim_battery_eol <../../progpy/examples/sim_battery_eol.py>`
.. automodule:: sim_battery_eol
-* :download:`examples.sim_pump <../../prog_models/examples/sim_pump.py>`
+* :download:`examples.sim_pump <../../progpy/examples/sim_pump.py>`
.. automodule:: sim_pump
-* :download:`examples.sim_valve <../../prog_models/examples/sim_valve.py>`
+* :download:`examples.sim_valve <../../progpy/examples/sim_valve.py>`
.. automodule:: sim_valve
-* :download:`examples.sim_powertrain <../../prog_models/examples/sim_powertrain.py>`
+* :download:`examples.sim_powertrain <../../progpy/examples/sim_powertrain.py>`
.. automodule:: sim_powertrain
-* :download:`examples.sim_dcmotor_singlephase <../../prog_models/examples/sim_dcmotor_singlephase.py>`
+* :download:`examples.sim_dcmotor_singlephase <../../progpy/examples/sim_dcmotor_singlephase.py>`
.. automodule:: sim_dcmotor_singlephase
+* :download:`examples.uav_dynamics_model <../../progpy/examples/uav_dynamics_model.py>`
+ .. automodule:: uav_dynamics_model
+
Simulation
----------------------------
-One of the most basic of functions using a model is simulation. Simulation is the process of predicting the evolution of system :term:`state` with time, given a specific :term:`future load` profile. Unlike full prognostics, simulation does not include uncertainty in the state and other product (e.g., :term:`output`) representation. For a prognostics model, simulation is done using the :py:meth:`prog_models.PrognosticsModel.simulate_to` and :py:meth:`prog_models.PrognosticsModel.simulate_to_threshold` methods.
+One of the most basic of functions using a model is simulation. Simulation is the process of predicting the evolution of system :term:`state` with time, given a specific :term:`future load` profile. Unlike full prognostics, simulation does not include uncertainty in the state and other product (e.g., :term:`output`) representation. For a prognostics model, simulation is done using the :py:meth:`progpy.PrognosticsModel.simulate_to` and :py:meth:`progpy.PrognosticsModel.simulate_to_threshold` methods.
.. role:: pythoncode(code)
:language: python
.. dropdown:: Saving results
- :py:meth:`prog_models.PrognosticsModel.simulate_to` and :py:meth:`prog_models.PrognosticsModel.simulate_to_threshold` return the inputs, states, outputs, and event states at various points in the simulation. Returning these values for every timestep would require a lot of memory, and is not necessary for most use cases, so ProgPy provides an ability for users to specify what data to save.
+ :py:meth:`progpy.PrognosticsModel.simulate_to` and :py:meth:`progpy.PrognosticsModel.simulate_to_threshold` return the inputs, states, outputs, and event states at various points in the simulation. Returning these values for every timestep would require a lot of memory, and is not necessary for most use cases, so ProgPy provides an ability for users to specify what data to save.
There are two formats to specify what data to save: the ``save_freq`` and ``save_pts`` arguments, described below
@@ -416,9 +430,14 @@ One of the most basic of functions using a model is simulation. Simulation is th
* *Bounded Automatic Dynamic Step Size*: Step size is adjusted automatically to hit each save_pt and save_freq exactly, with a maximum step size. Example, :pythoncode:`m.simulate_to_threshold(..., dt=('auto', 0.5))`
* *Functional Dynamic Step Size*: Step size is provided as a function of time and state. This is the most flexible approach. Example, :pythoncode:`m.simulate_to_threshold(..., dt= lambda t, x : max(0.75 - t*0.01, 0.25))`
+ For more details on dynamic step sizes, see the following example:
+
+ * :download:`examples.dynamic_step_size <../../progpy/examples/dynamic_step_size.py>`
+ .. automodule:: dynamic_step_size
+
.. dropdown:: Integration Methods
- Simulation is essentially the process of integrating the model forward with time. By default, simple euler integration is used to propogate the model forward. Advanced users can change the numerical integration method to affect the simulation quality and runtime. This is done using the ``integration_method`` argument in :py:meth:`prog_models.PrognosticsModel.simulate_to_threshold` and :py:meth:`prog_models.PrognosticsModel.simulate_to`.
+ Simulation is essentially the process of integrating the model forward with time. By default, simple euler integration is used to propogate the model forward. Advanced users can change the numerical integration method to affect the simulation quality and runtime. This is done using the ``integration_method`` argument in :py:meth:`progpy.PrognosticsModel.simulate_to_threshold` and :py:meth:`progpy.PrognosticsModel.simulate_to`.
For example, users can use the commonly-used Runge Kutta 4 numerical integration method using the following method call for model m:
@@ -426,20 +445,39 @@ One of the most basic of functions using a model is simulation. Simulation is th
>>> m.simulate_to_threshold(future_loading, integration_method = 'rk4')
+.. dropdown:: Eval Points
+
+ Sometimes users would like to ensure that simulation hits a specific point exactly, regardless of the step size (``dt``). This can be done using the ``eval_pts`` argument in :py:meth:`progpy.PrognosticsModel.simulate_to_threshold` and :py:meth:`progpy.PrognosticsModel.simulate_to`. This argument takes a list of times at which simulation should include. For example, for simulation to evaluate at 10 and 20 seconds, use the following method call for model m:
+
+ .. code-block:: python
+
+ >>> m.simulate_to_threshold(future_loading, eval_pts = [10, 20])
+
+ This feature is especially important for use cases where loading changes dramatically at a specific time. For example, if loading is 10 for the first 5 seconds and 20 afterwards, and you have a ``dt`` of 4 seconds, here's loading simulation would see:
+
+ * 0-4 seconds: 10
+ * 4-8 seconds: 10
+ * 8-12 seconds: 20
+
+ That means the load of 10 was applied 3 seconds longer than it was supposed to. Adding a eval point of 5 would apply this load:
+
+ * 0-4 seconds: 10
+ * 4-5 seconds: 10
+ * 5-9 seconds: 20
+
+ Now loading is applied correctly.
+
Use of simulation is described further in the following examples:
-* :download:`examples.sim <../../prog_models/examples/sim.py>`
+* :download:`examples.sim <../../progpy/examples/sim.py>`
.. automodule:: sim
-* :download:`examples.noise <../../prog_models/examples/noise.py>`
+* :download:`examples.noise <../../progpy/examples/noise.py>`
.. automodule:: noise
-* :download:`examples.future_loading <../../prog_models/examples/future_loading.py>`
+* :download:`examples.future_loading <../../progpy/examples/future_loading.py>`
.. automodule:: future_loading
-* :download:`examples.dynamic_step_size <../../prog_models/examples/dynamic_step_size.py>`
- .. automodule:: dynamic_step_size
-
Parameter Estimation
----------------------------
@@ -447,7 +485,7 @@ Parameter estimation is an important step in prognostics. Parameter estimation i
Sometimes model parameters are directly measurable (e.g., dimensions of blades on rotor). For these parameters, estimating them is a simple act of direct measurement. For parameters that cannot be directly measured, they're typically estimated using observed data.
-Generally, parameter estimation is done by tuning the parameters of the model so that simulation best matches the behavior observed in some available data. In ProgPy, this is done using the :py:meth:`prog_models.PrognosticsModel.estimate_params` method. This method takes :term:`input` and :term:`output` data from one or more runs, and uses scipy.optimize.minimize function to estimate the parameters of the model.
+Generally, parameter estimation is done by tuning the parameters of the model so that simulation best matches the behavior observed in some available data. In ProgPy, this is done using the :py:meth:`progpy.PrognosticsModel.estimate_params` method. This method takes :term:`input` and :term:`output` data from one or more runs, and uses scipy.optimize.minimize function to estimate the parameters of the model.
.. code-block:: python
@@ -456,8 +494,7 @@ Generally, parameter estimation is done by tuning the parameters of the model so
See the example below for more details
-* :download:`examples.param_est <../../prog_models/examples/param_est.py>`
- .. automodule:: param_est
+* :download:`examples.param_est <../../progpy/examples/param_est.ipynb>`
.. admonition:: Note
:class: tip
@@ -475,12 +512,12 @@ Results of a simulation can be visualized using the plot method. For example:
>>> results.outputs.plot()
>>> results.states.plot()
-See :py:meth:`prog_models.sim_result.SimResult.plot` for more details on plotting capabilities
+See :py:meth:`progpy.sim_result.SimResult.plot` for more details on plotting capabilities
-Multiple Models
+Combination Models
----------------------------
-There are two methods in prog_models through which multiple models can be used together: composite models and ensemble models, described below.
+There are two methods in progpy through which multiple models can be combined and used together: composite models and ensemble models, described below.
.. tabs::
@@ -500,7 +537,7 @@ There are two methods in prog_models through which multiple models can be used t
For more information, see the example below:
- * :download:`examples.composite_model <../../prog_models/examples/composite_model.py>`
+ * :download:`examples.composite_model <../../progpy/examples/composite_model.py>`
.. tab:: Ensemble models
@@ -515,18 +552,30 @@ There are two methods in prog_models through which multiple models can be used t
For more information, see the example below:
- * :download:`examples.ensemble <../../prog_models/examples/ensemble.py>`
+ * :download:`examples.ensemble <../../progpy/examples/ensemble.py>`
+
+ .. tab:: MixtureOfExperts models
+
+ Mixture of Experts (MoE) models combine multiple models of the same system, similar to Ensemble models. Unlike Ensemble Models, the aggregation is done by selecting the "best" model. That is the model that has performed the best over the past. Each model will have a 'score' that is tracked in the state, and this determines which model is best.
+
+ .. code-block:: python
+
+ >> m = MixtureOfExpertsModel([model1, model2])
+
+ For more information, see the example below:
+
+ * :download:`examples.mixture_of_experts <../../progpy/examples/mixture_of_experts.py>`
Other Examples
----------------------------
-* :download:`examples.benchmarking <../../prog_models/examples/benchmarking.py>`
+* :download:`examples.benchmarking <../../progpy/examples/benchmarking.py>`
.. automodule:: benchmarking
-* :download:`examples.sensitivity <../../prog_models/examples/sensitivity.py>`
+* :download:`examples.sensitivity <../../progpy/examples/sensitivity.py>`
.. automodule:: sensitivity
-* :download:`examples.serialization <../../prog_models/examples/serialization.py>`
+* :download:`examples.serialization <../../progpy/examples/serialization.py>`
.. automodule:: serialization
Tips
@@ -538,8 +587,8 @@ Tips
References
----------------------------
-.. [#Goebel2017] Kai Goebel, Matthew John Daigle, Abhinav Saxena, Indranil Roychoudhury, Shankar Sankararaman, and José R Celaya. Prognostics: The science of making predictions. 2017
+.. [Goebel2017] Kai Goebel, Matthew John Daigle, Abhinav Saxena, Indranil Roychoudhury, Shankar Sankararaman, and José R Celaya. Prognostics: The science of making predictions. 2017
-.. [#Celaya2012] J Celaya, A Saxena, and K Goebel. Uncertainty representation and interpretation in model-based prognostics algorithms based on Kalman filter estimation. Annual Conference of the Prognostics and Health Management Society, 2012.
+.. [Celaya2012] J Celaya, A Saxena, and K Goebel. Uncertainty representation and interpretation in model-based prognostics algorithms based on Kalman filter estimation. Annual Conference of the Prognostics and Health Management Society, 2012.
-.. [#Sankararaman2011] S Sankararaman, Y Ling, C Shantz, and S Mahadevan. Uncertainty quantification in fatigue crack growth prognosis. International Journal of Prognostics and Health Management, vol. 2, no. 1, 2011.
+.. [Sankararaman2011] S Sankararaman, Y Ling, C Shantz, and S Mahadevan. Uncertainty quantification in fatigue crack growth prognosis. International Journal of Prognostics and Health Management, vol. 2, no. 1, 2011.
diff --git a/docs/_sources/prog_server_guide.rst.txt b/docs/_sources/prog_server_guide.rst.txt
index 7226810..b807e55 100644
--- a/docs/_sources/prog_server_guide.rst.txt
+++ b/docs/_sources/prog_server_guide.rst.txt
@@ -8,11 +8,11 @@ prog_server Guide
-The Prognostics As-A-Service (PaaS) Sandbox (a.k.a., prog_server) is a simplified implementation of a Service-Oriented Architecture (SOA) for performing prognostics (estimation of time until events and future system states) of engineering systems. The PaaS Sandbox is a wrapper around the :ref:`Prognostics Algorithms Package `__ and :ref:`Prognostics Models Package `, allowing one or more users to access the features of these packages through a REST API. The package is intended to be used as a research tool to prototype and benchmark Prognostics As-A-Service (PaaS) architectures and work on the challenges facing such architectures, including Generality, Communication, Security, Environmental Complexity, Utility, and Trust.
+The Prognostics As-A-Service (PaaS) Sandbox (a.k.a., prog_server) is a simplified implementation of a Service-Oriented Architecture (SOA) for performing prognostics (estimation of time until events and future system states) of engineering systems. The PaaS Sandbox is a wrapper around the ProgPy package, allowing one or more users to access the features of these packages through a REST API. The package is intended to be used as a research tool to prototype and benchmark Prognostics As-A-Service (PaaS) architectures and work on the challenges facing such architectures, including Generality, Communication, Security, Environmental Complexity, Utility, and Trust.
The PaaS Sandbox is actually two packages, prog_server and prog_client. The prog_server package is a prognostics server that provides the REST API. The prog_client package is a python client that provides functions to interact with the server via the REST API.
-prog_server uses the :ref:`Prognostics Algorithms Package ` and :ref:`Prognostics Models Package `.
+prog_server uses ProgPy. See the :ref:`State Estimation and Prediction Guide ` and :ref:`Modeling and Simulation Guide `.
The PaaS Sandbox is a simplified version of the Prognostics As-A-Service Architecture implented as the PaaS/SWS Safety Service software by the NASA System Wide Safety (SWS) project, building upon the original work of the Convergent Aeronautics Solutions (CAS) project. This implementation is a research tool, and is therefore missing important features that should be present in a full implementation of the PaaS architecture such as authentication and persistent state management.
@@ -23,7 +23,7 @@ Installing prog_server
.. tab:: Stable Version (Recommended)
- The latest stable release of `prog_server` is hosted on PyPi. For most users (unless you want to contribute to the development of `prog_server`), the version on PyPi will be adequate. To install from the command line, use the following command:
+ The latest stable release of `prog_server` is hosted on PyPi. For most users, this version will be adequate. To install from the command line, use the following command:
.. code-block:: console
@@ -43,9 +43,9 @@ Installing prog_server
About
---------
-`prog_server` uses the :ref:`Prognostics Algorithms Package ` and :ref:`Prognostics Models Package `. The best way to learn how to use prog_server is to first learn how to use these packages. See :ref:`Prognostics Algorithms Package Docs ` and :ref:`Prognostics Models Package Docs ` for more details.
+`prog_server` uses progpy. The best way to learn how to use prog_server is to first learn how to use that package. See :ref:`State Estimation and Prediction Guide ` and :ref:`Modeling and Simulation Guide ` for more details.
-The PaaS Sandbox is actually two packages, ``prog_server`` and ``prog_client``. The ``prog_server`` package is the server that provides the REST API. The ``prog_client`` package is a python client that uses the REST API (see `prog_client `__). The ``prog_server`` package is the PaaS Sandbox Server. Once started the server can accept requests from one or more applications requesting prognostics, using its REST API (described in `prog_server_api`).
+The PaaS Sandbox is actually two packages, ``prog_server`` and ``prog_client``. The ``prog_server`` package is the server that provides the REST API. The ``prog_client`` package is a python client that uses the REST API (see :py:class:`prog_client.Session`). The ``prog_server`` package is the PaaS Sandbox Server. Once started the server can accept requests from one or more applications requesting prognostics, using its REST API (described in :ref:`prog_server API Reference`).
Starting the prog_server
--------------------------
diff --git a/docs/_sources/releases.rst b/docs/_sources/releases.rst
index 173c1d4..84054fa 100644
--- a/docs/_sources/releases.rst
+++ b/docs/_sources/releases.rst
@@ -4,7 +4,35 @@ Release Notes
.. .. contents::
.. :backlinks: top
-Updates in V1.6
+Updates in v1.7
+----------------------
+
+progpy
+**************
+* Started "ProgPy Short Course": A series of Jupyter Notebooks designed to help users get started with ProgPy and understand how to use it for prognostics. See https://github.com/nasa/progpy/tree/master/examples
+* Updates to improve composite model:
+ * Support setting parameters in composed models using [model].[param] format (e.g., composite_model["model1.Param1"] = 12)
+ * Support adding functions to composite. Useful for simple translations
+* Prediction and Simulation event strategy. For models with multiple events can now specify if you would like prediction or simulation to end when "first" or "any" of the events are met
+* Updates to parameter estimation
+ * Users can now estimate nested parameters (e.g., parameters['x0']['a']) using a tuple. For example params=(('x0', 'a'), ...)
+ * MSE updated to include a penalty if model becomes unstable (i.e., returns NaN) before minimum threshold. This encourages parameter estimation to converge on parameters for which the model is stable
+* Tensorflow no longer installed by default (this is important for users who are space constrained). If you're using the data-driven features install ProgPy like so: pip install progpy[datadriven] or pip install -e '.[datadriven]' (if using local copy)
+* Support for Python 3.12
+* Removed some warnings
+* Various Bugfixes and Performance optimizations
+
+**Notes for upgrading:**
+* If you're using the data-driven features install ProgPy like so: pip install progpy[datadriven] or pip install -e '.[datadriven]' (if using local copy)
+* Use "events" keyword instead of "threshold_keys" in simulation
+
+prog_server
+**************
+* Add support for custom model, state_estimator, and predictor
+* New output and output prediction endpoints
+* Various bug fixes and optimizations
+
+Updates in v1.6
----------------------
progpy
@@ -27,14 +55,14 @@ Updates in V1.5
prog_models
***************
-* **Direct Models**: Added support for new model type: Direct Models. Direct Models directly map current state and future load to time of event, rather than state-transition models which simulate forward to calculate time of event. They're created by implementing the :py:meth:`prog_models.PrognosticsModel.time_of_event`. See `direct model example `__ for example of use.
-* New model types that combine multiple models.
+* **Direct Models**: Added support for new model type: Direct Models. Direct Models directly map current state and future load to time of event, rather than state-transition models which simulate forward to calculate time of event. They're created by implementing the :py:meth:`prog_models.PrognosticsModel.time_of_event`.
+* New model types that combine multiple models. See `06. Combining Models `__ for example of use.
- * **Ensemble Model**: Combinations of multiple models of the same system where results are aggregated. See `ensemble example `__ for example of use.
- * **Composite Model**: Combinations of models of different systems that are interdependent. See `composite example `__ for example of use.
+ * **Ensemble Model**: Combinations of multiple models of the same system where results are aggregated.
+ * **Composite Model**: Combinations of models of different systems that are interdependent.
* **New Model Type**: Aircraft flight model interface, :py:class:`prog_models.models.aircraft_model.AircraftModel`. Anticipated prognostics applications with the aircraft flight model include estimating and predicting loading of other aircraft systems (e.g., powertrain) and safety metrics.
-* New Model: Small Rotorcraft AircraftModel. See `example `__.
+* New Model: Small Rotorcraft AircraftModel.
* New DataModel: Polynomial Chaos Expansion (PCE) Direct Surrogate Model (:py:class:`prog_models.data_models.PolynomialChaosExpansion`). See `chaos example `__ for example of use.
* Started transition of InputContainers, StateContainers, OutputContainer and SimResult to use Pandas DataFrames. This release will bring the interface more in compliance with DataFrames. v1.6 will fully transition the classes to DataFrames.
* Implemented new metrics that can be used in :py:meth:`prog_models.PrognosticsModel.calc_error`: Root Mean Square Error (RMSE), Maximum Error (MAX_E), Mean Absolute Error (MAE), Mean Absolute Percentage Error (MAPE), and Dynamic Time Warping (DTW)
@@ -65,13 +93,13 @@ prog_models
* **Data-Driven Models**
* Created new :py:class:`prog_models.data_models.DataModel` class as interface/superclass for all data-driven models. Data-driven models are interchangeable in use (e.g., simulation, use with prog_algs) with physics-based models. DataModels can be trained using data (:py:meth:`prog_models.data_models.DataModel.from_data`), or an existing model (:py:meth:`prog_models.data_models.DataModel.from_model`)
- * Introduced new LSTM State Transition DataModel (:py:class:`prog_models.data_models.LSTMStateTransitionModel`). See :download:`examples.lstm_model <../../prog_models/examples/lstm_model.py>`, :download:`examples.full_lstm_model <../../prog_models/examples/full_lstm_model.py>`, and :download:`examples.custom_model <../../prog_models/examples/custom_model.py>` for examples of use
+ * Introduced new LSTM State Transition DataModel (:py:class:`prog_models.data_models.LSTMStateTransitionModel`).
* DMD model (:py:class:`prog_models.data_models.DMDModel`) updated to new data-driven model interface. Can now be created from data as well as an existing model
* Added ability to integrate training noise to data for DMD Model (:py:class:`prog_models.data_models.DMDModel`)
* **New Model**: Single-Phase DC Motor (:py:class:`prog_models.models.DCMotorSP`)
* Added the ability to select integration method when simulating (see ``integration_method`` keywork argument for :py:func:`prog_models.PrognosticsModel.simulate_to_threshold`). Current options are Euler and RK4
-* New feature allowing serialization of model parameters as JSON. See :py:meth:`prog_models.PrognosticsModel.to_json`, :py:meth:`prog_models.PrognosticsModel.from_json`, and serialization example (:download:`examples.serialization <../../prog_models/examples/serialization.py>`)
+* New feature allowing serialization of model parameters as JSON. See :py:meth:`prog_models.PrognosticsModel.to_json`, :py:meth:`prog_models.PrognosticsModel.from_json`, and serialization example
* Added automatic step size feature in simulation. When enabled, step size will adapt to meet the exact save_pts and save_freq. Step size range can also be bounded
* New Example Model: Simple Paris' Law (:py:class:`prog_models.models.ParisLawCrackGrowth`)
* Added ability to set bounds when estimating parameters (See :py:meth:`prog_models.PrognosticsModel.estimate_params`)
@@ -95,10 +123,10 @@ Updates in V1.3
prog_models
**************
-* **Surrogate Models** Added initial draft of new feature to generate surrogate models automatically from :class:`prog_models.PrognosticsModel`. (See :download:`examples.generate_surrogate <../../prog_models/examples/generate_surrogate.py>` example). Initial implementation uses Dynamic Mode Decomposition. Additional Surrogate Model Generation approaches will be explored for future releases. [Developed by NASA's DRF Project]
-* **New Example Models** Added new :class:`prog_models.models.DCMotor`, :class:`prog_models.models.ESC`, and :class:`prog_models.models.Powertrain` models (See :download:`examples.sim_powertrain <../../prog_models/examples/sim_powertrain.py>` example) [Developed by NASA's SWS Project]
-* **Datasets** Added new feature that allows users to access prognostic datasets programmatically (See :download:`examples.dataset <../../prog_models/examples/dataset.py>`)
-* Added new :class:`prog_models.LinearModel` class - Linear Prognostics Models can be represented by a Linear Model. Similar to PrognosticsModels, LinearModels are created by subclassing the LinearModel class. Some algorithms will only work with Linear Models. See :download:`examples.linear_model <../../prog_models/examples/linear_model.py>` example for detail
+* **Surrogate Models** Added initial draft of new feature to generate surrogate models automatically from :class:`prog_models.PrognosticsModel`. Initial implementation uses Dynamic Mode Decomposition. Additional Surrogate Model Generation approaches will be explored for future releases. [Developed by NASA's DRF Project]
+* **New Example Models** Added new :class:`prog_models.models.DCMotor`, :class:`prog_models.models.ESC`, and :class:`prog_models.models.Powertrain` models [Developed by NASA's SWS Project]
+* **Datasets** Added new feature that allows users to access prognostic datasets programmatically
+* Added new :class:`prog_models.LinearModel` class - Linear Prognostics Models can be represented by a Linear Model. Similar to PrognosticsModels, LinearModels are created by subclassing the LinearModel class. Some algorithms will only work with Linear Models.
* Added new StateContainer/InputContainer/OutputContainer objects for classes which allow for data access in matrix form and enforce expected keys.
* Added new metric for SimResult: :py:func:`prog_models.sim_result.SimResult.monotonicity`.
* :py:func:`prog_models.sim_result.SimResult.plot` now automatically shows legends
@@ -118,11 +146,11 @@ prog_models
prog_algs
**********
-* **New State Estimator Added** :class:`prog_algs.state_estimators.KalmanFilter`. Works with models derived from :class:`prog_models.LinearModel`. See :download:`examples.kalman_filter <../../prog_algs/examples/kalman_filter.py>`
+* **New State Estimator Added** :class:`prog_algs.state_estimators.KalmanFilter`. Works with models derived from :class:`prog_models.LinearModel`.
* **New Predictor Added** :class:`prog_algs.predictors.UnscentedTransformPredictor`.
-* Initial state estimate (x0) can now be passed as `UncertainData` to represent initial state uncertainty. See :download:`examples.playback <../../prog_algs/examples/playback.py>`
-* Added new metrics for :class:`prog_algs.predictors.ToEPredictionProfile`: Prognostics horizon, Cumulative Relative Accuracy (CRA). See :download:`examples.playback <../../prog_algs/examples/playback.py>`
-* Added ability to plot :class:`prog_algs.predictors.ToEPredictionProfile`: profile.plot(). See :download:`examples.playback <../../prog_algs/examples/playback.py>`
+* Initial state estimate (x0) can now be passed as `UncertainData` to represent initial state uncertainty.
+* Added new metrics for :class:`prog_algs.predictors.ToEPredictionProfile`: Prognostics horizon, Cumulative Relative Accuracy (CRA).
+* Added ability to plot :class:`prog_algs.predictors.ToEPredictionProfile`: profile.plot().
* Added new metric for :class:`prog_algs.predictors.Prediction`: Monotonicity, Relative Accuracy (RA)
* Added new metric for :class:`prog_algs.uncertain_data.UncertainData` (and subclasses): Root Mean Square Error (RMSE)
* Added new describe method for :class:`prog_algs.uncertain_data.UncertainData` (and subclasses)
diff --git a/docs/_sources/releases.rst.txt b/docs/_sources/releases.rst.txt
index 09bd578..173c1d4 100644
--- a/docs/_sources/releases.rst.txt
+++ b/docs/_sources/releases.rst.txt
@@ -4,6 +4,59 @@ Release Notes
.. .. contents::
.. :backlinks: top
+Updates in V1.6
+----------------------
+
+progpy
+**************
+* Combined previous prog_models and prog_algs packages into a single package, progpy.
+* Added new :py:class:`progpy.MixtureOfExpertsModel`, which combines multiple models of the same system into a single model, where only the best of the comprised models will be used at each timestep.
+* Added ability to set random seed in :py:class:`progpy.loading.GaussianNoiseWrapper`, allowing for repeatable experiments
+* Various bug fixes and performance improvements
+
+Upgrading from v1.5
+^^^^^^^^^^^^^^^^^^^^^^
+v1.6 combined prog_models and prog_algs into a single package progpy. To upgrade to 1.6, you will need to download the new progpy package (pip install progpy) and update all imports to use progpy. For example `from prog_models import PrognosticsModel` becomes `from progpy import PrognosticsModel`, and `from prog_algs import predictors` becomes `from progpy import predictors`.
+
+prog_server
+************
+* Updated to work with progpy v1.6
+
+Updates in V1.5
+-----------------------
+
+prog_models
+***************
+* **Direct Models**: Added support for new model type: Direct Models. Direct Models directly map current state and future load to time of event, rather than state-transition models which simulate forward to calculate time of event. They're created by implementing the :py:meth:`prog_models.PrognosticsModel.time_of_event`. See `direct model example `__ for example of use.
+* New model types that combine multiple models.
+
+ * **Ensemble Model**: Combinations of multiple models of the same system where results are aggregated. See `ensemble example `__ for example of use.
+ * **Composite Model**: Combinations of models of different systems that are interdependent. See `composite example `__ for example of use.
+
+* **New Model Type**: Aircraft flight model interface, :py:class:`prog_models.models.aircraft_model.AircraftModel`. Anticipated prognostics applications with the aircraft flight model include estimating and predicting loading of other aircraft systems (e.g., powertrain) and safety metrics.
+* New Model: Small Rotorcraft AircraftModel. See `example `__.
+* New DataModel: Polynomial Chaos Expansion (PCE) Direct Surrogate Model (:py:class:`prog_models.data_models.PolynomialChaosExpansion`). See `chaos example `__ for example of use.
+* Started transition of InputContainers, StateContainers, OutputContainer and SimResult to use Pandas DataFrames. This release will bring the interface more in compliance with DataFrames. v1.6 will fully transition the classes to DataFrames.
+* Implemented new metrics that can be used in :py:meth:`prog_models.PrognosticsModel.calc_error`: Root Mean Square Error (RMSE), Maximum Error (MAX_E), Mean Absolute Error (MAE), Mean Absolute Percentage Error (MAPE), and Dynamic Time Warping (DTW)
+* Error calculation metric (above) can now be set when calling :py:meth:`prog_models.PrognosticsModel.estimate_params`
+* Reworked integration methods in simulation
+
+ * New integration methods: RK4 and methods from scipy.integrate
+ * Integration can now be set at the model level. For continuous models the specified integration method will apply when calling next_state
+
+* Python3.11 support
+* Various bug fixes and performance improvements
+
+prog_algs
+**********
+* Integration method can now be set for state estimation and prediction by setting model.parameters[‘integration_method’].
+* Minimum time step can now be set in state estimation, using the argument 'dt'. This is useful for models that become unstable with large time steps.
+* Python3.11 support
+
+prog_server
+************
+* Python3.11 support
+
Updates in V1.4
-----------------------
diff --git a/docs/_static/documentation_options.js b/docs/_static/documentation_options.js
index 7944682..11bd8a5 100644
--- a/docs/_static/documentation_options.js
+++ b/docs/_static/documentation_options.js
@@ -1,6 +1,6 @@
var DOCUMENTATION_OPTIONS = {
URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'),
- VERSION: '1.6',
+ VERSION: '1.7',
LANGUAGE: 'None',
COLLAPSE_INDEX: false,
BUILDER: 'html',
diff --git a/docs/api_ref.html b/docs/api_ref.html
index c21002f..4e60e2e 100644
--- a/docs/api_ref.html
+++ b/docs/api_ref.html
@@ -9,7 +9,7 @@
- API Reference — ProgPy Python Packages 1.6 documentation
+ API Reference — ProgPy Python Packages 1.7 documentation
diff --git a/docs/api_ref/prog_server.html b/docs/api_ref/prog_server.html
index 7a23f9e..a717fe7 100644
--- a/docs/api_ref/prog_server.html
+++ b/docs/api_ref/prog_server.html
@@ -9,7 +9,7 @@
- prog_server API Reference — ProgPy Python Packages 1.6 documentation
+ prog_server API Reference — ProgPy Python Packages 1.7 documentation
diff --git a/docs/api_ref/prog_server/load_ests.html b/docs/api_ref/prog_server/load_ests.html
index 7771f5d..1d18f3c 100644
--- a/docs/api_ref/prog_server/load_ests.html
+++ b/docs/api_ref/prog_server/load_ests.html
@@ -9,7 +9,7 @@
- Load Estimators — ProgPy Python Packages 1.6 documentation
+ Load Estimators — ProgPy Python Packages 1.7 documentation
@@ -359,13 +359,11 @@
Moving average load estimator. Load is estimated as the mean of the last window_size samples. Noise can be added using the following optional configuration parameters:
-
-
+
base_std: standard deviation of noise
std_slope: Increase in std with time (e.g., 0.1 = increase of 0.1 in std per second)
t0: Starting time for calculation of std
-
std of applied noise is defined as base_std + std_slope (t-t0). By default no noise is added
A list of PrognosticsModels to be combined into a single model.
Provided in one of two forms:
-
A list of PrognosticsModels. The name of each model will be the class name. A number will be added for duplicates
-
A list of tuples where the first element is the model name and the second element is the model
+
A list of PrognosticsModels or functions. The name of each model will be the class name for models or ‘function’ for functions. A number will be added for duplicates
+
A list of tuples where the first element is the model/function name and the second element is the model/function
Note: Order provided will be the order that models are executed
-
connections (list[tuple[str, str]], optional) – A list of tuples where the first element is the name of the output, state, or performance metrics of one model and the second element is the name of the input of another model.
+
connections (list[tuple[str, str]], optional) – A list of tuples where the first element is the name of the output, state, or performance metrics of one model or function return and the second element is the name of the input of another model or argument of a function.
The first element of the tuple must be of the form “model_name.output_name”, “model_name.state_name”, or “model_name.performance_metric_key”.
The second element of the tuple must be of the form “model_name.input_name”.
For example, if you have two models, “Batt1” and “Batt2”, and you want to connect the output of “Batt1” to the input of “Batt2”, you would use the following connection: (“Batt1.output”, “Batt2.input”)
Keyword Arguments
-
outputs (list[str]) – Model outputs in format “model_name.output_name”. Must be subset of all outputs from models. If not provided, all outputs will be included.
+
outputs (list[str]) – Model outputs in format “model_name.output_name”. Must be subset of all outputs from models. If not provided, all outputs will be included.
Model parameters can be set and accessed using the ‘[model].[param]’ format. For example, for composite model m, m[‘foo.bar’] would set the parameter ‘bar’ for the model ‘foo’.
times (list[list]) – list of input data for use in data. Each element is the times for a single run of size (n_times)
-
inputs (list[np.array]) – list of input data for use in data. Each element is the inputs for a single run of size (n_times, n_inputs)
-
outputs (list[np.array]) – list of output data for use in data. Each element is the outputs for a single run of size (n_times, n_outputs)
-
states (list[np.array], optional) – list of state data for use in data. Each element is the states for a single run of size (n_times, n_states)
-
event_states (list[np.array], optional) – list of event state data for use in data. Each element is the event states for a single run of size (n_times, n_event_states)
+
times (list[list]) – list of input data for use in data. Each element is the times for a single run of size (n_times)
+
inputs (list[np.array]) – list of input data for use in data. Each element is the inputs for a single run of size (n_times, n_inputs)
+
outputs (list[np.array]) – list of output data for use in data. Each element is the outputs for a single run of size (n_times, n_outputs)
+
states (list[np.array], optional) – list of state data for use in data. Each element is the states for a single run of size (n_times, n_states)
+
event_states (list[np.array], optional) – list of event state data for use in data. Each element is the event states for a single run of size (n_times, n_event_states)
Fraction (0-1) of data resulting from progpy.PrognosticsModel.simulate_to_threshold() used to train DMD surrogate model
e.g. if trim_data_to = 0.7 and the simulated data spans from t=0 to 100, the surrogate model is trained on the data from t=0 to 70
Note: To trim data to a set time, use the ‘horizon’ parameter
-
stability_tol (float, optional) – Value that determines the tolerance for DMD matrix stability
-
training_noise (float, optional) – Noise added to the training data sampled from a standard normal distribution with standard deviation of training_noise. Adding noise to the training data results in a slight perturbation that removes any linear dependencies among the data
-
input_keys (list[str], optional) – List of input keys
-
state_keys (list[str], optional) – List of state keys
-
output_keys (list[str], optional) – List of output keys
-
event_keys (list[str], optional) – List of event keys
+
stability_tol (float, optional) – Value that determines the tolerance for DMD matrix stability
+
training_noise (float, optional) – Noise added to the training data sampled from a standard normal distribution with standard deviation of training_noise. Adding noise to the training data results in a slight perturbation that removes any linear dependencies among the data
+
input_keys (list[str], optional) – List of input keys
+
state_keys (list[str], optional) – List of state keys
+
output_keys (list[str], optional) – List of output keys
+
event_keys (list[str], optional) – List of event keys
load_functions (list[function]) – Each index is a callable loading function of (t, x = None) -> z used to predict future load at a given time (t) and state (x)
+
load_functions (list[function]) – Each index is a callable loading function of (t, x = None) -> z used to predict future load at a given time (t) and state (x)
Keyword Arguments
-
add_dt (bool) – If the timestep should be added as an input
+
add_dt (bool) – If the timestep should be added as an input
Addditional configuration parameters from progpy.PrognosticsModel.simulate_to_threshold(). These can be an array (of same length as load_functions) of config for each individual sim, or one value to apply to all
@@ -547,6 +547,17 @@
LSTMStateTransitionModelevent using an Keras LSTM Model.
State transition models map from the input at time t and output at time t-1 plus historical data from a set window to the output at time t.
Most users will use the LSTMStateTransitionModel.from_data() method to create a model, but the model can be created by passing in a model directly into the constructor. The LSTM model in this method maps from [u_t-n+1, z_t-n, …, u_t, z_t-1] to z_t. Past input are stored in the model internal state. Actual calculation of output is performed when LSTMStateTransitionModel.output() is called. When using in simulation that may not be until the simulation results are accessed.
+
+
Note
+
ProgPy must be installed with [datadriven] option to use LSTM model. either
inputs (list[np.array]) – list of input data for use in data. Each element is the inputs for a single run of size (n_times, n_inputs)
-
outputs (list[np.array]) – list of output data for use in data. Each element is the outputs for a single run of size (n_times, n_outputs)
-
event_states (list[np.array], optional) – list of event state data for use in data. Each element is the event state for a single run of size (n_times, n_events)
-
t_met (list[np.array], optional) – list of threshold met data for use in data. Each element is if the threshold has been met for a single run of size (n_times, n_events)
+
inputs (list[np.array]) – list of input data for use in data. Each element is the inputs for a single run of size (n_times, n_inputs)
+
outputs (list[np.array]) – list of output data for use in data. Each element is the outputs for a single run of size (n_times, n_outputs)
+
event_states (list[np.array], optional) – list of event state data for use in data. Each element is the event state for a single run of size (n_times, n_events)
+
t_met (list[np.array], optional) – list of threshold met data for use in data. Each element is if the threshold has been met for a single run of size (n_times, n_events)
Keyword Arguments
-
window (int) – Number of historical points used in the model. I.e, if window is 3, the model will map from [t-3, t-2, t-1] to t
-
input_keys (list[str]) – List of keys to use to identify input. If not supplied u[#] will be used to idenfiy inputs
-
output_keys (list[str]) – List of keys to use to identify output. If not supplied z[#] will be used to idenfiy outputs
-
event_keys (list[str]) – List of keys to use to identify events for event state and threshold met. If not supplied event[#] will be used to identify events
-
validation_percentage (float) – Percentage of data to use for validation, between 0-1
-
epochs (int) – Number of epochs (i.e., iterations) to train the model. More epochs means better results (to a point), but more time to train. Note: large numbers of epochs may result in overfitting.
-
layers (int) – Number of LSTM layers to use. More layers can represent more complex systems, but are less efficient. Note: 2 layers is typically enough for most complex systems. Default: 1
-
units (int or list[int]) – number of units (i.e., dimensionality of output state) used in each lstm layer. Using a scalar value will use the same number of units for each layer.
-
activation (str or list[str]) – Activation function to use for each layer
-
dropout (float) – Dropout rate to be applied. Dropout helps avoid overfitting
-
normalize (bool) – If the data should be normalized. This is recommended for most cases.
-
early_stopping (bool) – If early stopping is desired. Default is True
workers (int) – Number of workers to use when training. One worker indicates no multiprocessing
+
window (int) – Number of historical points used in the model. I.e, if window is 3, the model will map from [t-3, t-2, t-1] to t
+
input_keys (list[str]) – List of keys to use to identify input. If not supplied u[#] will be used to idenfiy inputs
+
output_keys (list[str]) – List of keys to use to identify output. If not supplied z[#] will be used to idenfiy outputs
+
event_keys (list[str]) – List of keys to use to identify events for event state and threshold met. If not supplied event[#] will be used to identify events
+
validation_percentage (float) – Percentage of data to use for validation, between 0-1
+
epochs (int) – Number of epochs (i.e., iterations) to train the model. More epochs means better results (to a point), but more time to train. Note: large numbers of epochs may result in overfitting.
+
layers (int) – Number of LSTM layers to use. More layers can represent more complex systems, but are less efficient. Note: 2 layers is typically enough for most complex systems. Default: 1
+
units (int or list[int]) – number of units (i.e., dimensionality of output state) used in each lstm layer. Using a scalar value will use the same number of units for each layer.
+
activation (str or list[str]) – Activation function to use for each layer
+
dropout (float) – Dropout rate to be applied. Dropout helps avoid overfitting
+
normalize (bool) – If the data should be normalized. This is recommended for most cases.
+
early_stopping (bool) – If early stopping is desired. Default is True
Create a Data Model from an existing PrognosticsModel (i.e., a surrogate model). Generates data through simulation with supplied load functions. Then calls from_data() to generate the model.
load_functions (list[function]) – Each index is a callable loading function of (t, x = None) -> z used to predict future load at a given time (t) and state (x)
+
load_functions (list[function]) – Each index is a callable loading function of (t, x = None) -> z used to predict future load at a given time (t) and state (x)
Keyword Arguments
-
add_dt (bool) – If the timestep should be added as an input
+
add_dt (bool) – If the timestep should be added as an input
Addditional configuration parameters from progpy.PrognosticsModel.simulate_to_threshold(). These can be an array (of same length as load_functions) of config for each individual sim, or one value to apply to all
@@ -669,12 +679,12 @@
times (list[float]) – list of times data for use in data. Each element is the time such that inputs[i] is the inputs at time[i]
+
times (list[float]) – list of times data for use in data. Each element is the time such that inputs[i] is the inputs at time[i]
inputs (np.array) – list of input data for use in data. Each eelement is the inputs for a single run of size (n_samples, n_inputs*n_times)
time_of_event (np.array) – Array of time of event data for use in data. Each element is the time of event for a single run of size (n_samples, n_events)
-
input_keys (list[str]) – List of input keys for the inputs
+
input_keys (list[str]) – List of input keys for the inputs
Keyword Arguments
J (chaospy.Distribution, optional) – Joint distribution to sample from. Must include distribution for each timepoint for each input [u0_t0, u0_t1, …, u1_t0, …]. If not included, input_dists must be provided
-
input_dists (list[chaospy.Distribution], optional) – List of chaospy distributions for each input for each timepoint
-
order (int, optional) – Order of the polynomial chaos expansion
+
input_dists (list[chaospy.Distribution], optional) – List of chaospy distributions for each input for each timepoint
+
order (int, optional) – Order of the polynomial chaos expansion
Error method to use when calculating error. Supported methods include:
MSE (Mean Squared Error) - DEFAULT
RMSE (Root Mean Squared Error)
@@ -778,8 +788,8 @@
DataModel InterfaceStateContainer, optional) – Initial state
-
dt (float, optional) – Maximum time step in simulation. Time step used in simulation is lower of dt and time between samples. Defaults to time between samples.
-
stability_tol (double, optional) –
Configurable parameter.
+
dt (float, optional) – Maximum time step in simulation. Time step used in simulation is lower of dt and time between samples. Defaults to time between samples.
+
stability_tol (double, optional) –
Configurable cutoff
Configurable cutoff value, between 0 and 1, that determines the fraction of the data points for which the model must be stable.
In some cases, a prognostics model will become unstable under certain conditions, after which point the model can no longer represent behavior.
stability_tol represents the fraction of the provided argument times that are required to be met in simulation,
@@ -788,13 +798,14 @@
times (list[float]) – Array of times for each sample
-
inputs (list[InputContainer]) – Array of input containers where input[x] corresponds to time[x]
-
outputs (list[OutputContainer]) – Array of output containers where output[x] corresponds to time[x]
-
method (str, optional) – Optimization method- see scipy.optimize.minimize for options
-
tol (int, optional) – Tolerance for termination. Depending on the provided minimization method, specifying tolerance sets solver-specific options to tol
-
error_method (str, optional) – Method to use in calculating error. See calc_error for options
-
bounds (tuple or dict, optional) – Bounds for optimization in format ((lower1, upper1), (lower2, upper2), …) or {key1: (lower1, upper1), key2: (lower2, upper2), …}
-
options (dict, optional) – Options passed to optimizer. see scipy.optimize.minimize for options
-
runs (list[tuple], depreciated) – data from all runs, where runs[0] is the data from run 0. Each run consists of a tuple of arrays of times, input dicts, and output dicts. Use inputs, outputs, states, times, etc. instead
+
keys (list[str] or list[tuple[str]]) – Parameter keys to optimize. Use tuple for nested parameters. For example, key (‘x0’, ‘a’) corresponds to m.parameters[‘x0’][‘a’].
+
times (list[float]) – Array of times for each sample
+
inputs (list[InputContainer]) – Array of input containers where input[x] corresponds to time[x]
+
outputs (list[OutputContainer]) – Array of output containers where output[x] corresponds to time[x]
+
method (str, optional) – Optimization method- see scipy.optimize.minimize for options
+
tol (int, optional) – Tolerance for termination. Depending on the provided minimization method, specifying tolerance sets solver-specific options to tol
+
error_method (str, optional) – Method to use in calculating error. See calc_error for options
+
bounds (tuple or dict, optional) – Bounds for optimization in format ((lower1, upper1), (lower2, upper2), …) or {key1: (lower1, upper1), key2: (lower2, upper2), …}
+
options (dict, optional) – Options passed to optimizer. see scipy.optimize.minimize for options
+
runs (list[tuple], depreciated) – data from all runs, where runs[0] is the data from run 0. Each run consists of a tuple of arrays of times, input dicts, and output dicts. Use inputs, outputs, states, times, etc. instead
times (list[list]) – list of input data for use in data. Each element is the times for a single run of size (n_times)
-
inputs (list[np.array]) – list of input data for use in data. Each element is the inputs for a single run of size (n_times, n_inputs)
-
states (list[np.array]) – list of state data for use in data. Each element is the states for a single run of size (n_times, n_states)
-
outputs (list[np.array]) – list of output data for use in data. Each element is the outputs for a single run of size (n_times, n_outputs)
-
event_states (list[np.array]) – list of event state data for use in data. Each element is the event states for a single run of size (n_times, n_event_states)
+
times (list[list]) – list of input data for use in data. Each element is the times for a single run of size (n_times)
+
inputs (list[np.array]) – list of input data for use in data. Each element is the inputs for a single run of size (n_times, n_inputs)
+
states (list[np.array]) – list of state data for use in data. Each element is the states for a single run of size (n_times, n_states)
+
outputs (list[np.array]) – list of output data for use in data. Each element is the outputs for a single run of size (n_times, n_outputs)
+
event_states (list[np.array]) – list of event state data for use in data. Each element is the event states for a single run of size (n_times, n_event_states)
time_of_event (np.array) – Array of time of event data for use in data. Each element is the time of event for a single run of size (n_samples, n_events)
Create a Data Model from an existing PrognosticsModel (i.e., a surrogate model). Generates data through simulation with supplied load functions. Then calls from_data() to generate the model.
load_functions (list[function]) – Each index is a callable loading function of (t, x = None) -> z used to predict future load at a given time (t) and state (x)
+
load_functions (list[function]) – Each index is a callable loading function of (t, x = None) -> z used to predict future load at a given time (t) and state (x)
Keyword Arguments
-
add_dt (bool) – If the timestep should be added as an input
+
add_dt (bool) – If the timestep should be added as an input
Addditional configuration parameters from progpy.PrognosticsModel.simulate_to_threshold(). These can be an array (of same length as load_functions) of config for each individual sim, or one value to apply to all
@@ -926,23 +937,23 @@
Generate a surrogate model to approximate the higher-fidelity model
Parameters
load_functions (List[abc.Callable]) – Each index is a callable loading function of (t, x = None) -> z used to predict future loading (output) at a given time (t) and state (x)
-
method (str, optional) – list[ indicating surrogate modeling method to be used
+
method (str, optional) – list[ indicating surrogate modeling method to be used
Keyword Arguments
-
dt (float or abc.Callable, optional) – Same as in simulate_to_threshold; for DMD, this value is the time step of the training data
-
save_freq (float, optional) – Same as in simulate_to_threshold; for DMD, this value is the time step with which the surrogate model is generated
-
state_keys (List[str], optional) – List of state keys to be included in the surrogate model generation. keys must be a subset of those defined in the PrognosticsModel
-
input_keys (List[str], optional) – List of input keys to be included in the surrogate model generation. keys must be a subset of those defined in the PrognosticsModel
-
output_keys (List[str], optional) – List of output keys to be included in the surrogate model generation. keys must be a subset of those defined in the PrognosticsModel
-
event_keys (List[str], optional) – List of event_state keys to be included in the surrogate model generation. keys must be a subset of those defined in the PrognosticsModel
+
dt (float or abc.Callable, optional) – Same as in simulate_to_threshold; for DMD, this value is the time step of the training data
+
save_freq (float, optional) – Same as in simulate_to_threshold; for DMD, this value is the time step with which the surrogate model is generated
+
state_keys (List[str], optional) – List of state keys to be included in the surrogate model generation. keys must be a subset of those defined in the PrognosticsModel
+
input_keys (List[str], optional) – List of input keys to be included in the surrogate model generation. keys must be a subset of those defined in the PrognosticsModel
+
output_keys (List[str], optional) – List of output keys to be included in the surrogate model generation. keys must be a subset of those defined in the PrognosticsModel
+
event_keys (List[str], optional) – List of event_state keys to be included in the surrogate model generation. keys must be a subset of those defined in the PrognosticsModel
... (optional) – Keyword arguments from simulate_to_threshold (except save_pts)
events (abc.Sequence[str] or str, optional) – Keys for events that will trigger the end of simulation.
+If blank, simulation will occur if any event will be met ()
tuple: (mode, dt), where modes could be constant or auto. If auto, dt is maximum step size
str: mode - ‘auto’ or ‘constant’
-
integration_method (str, optional) – Integration method, e.g. ‘rk4’ or ‘euler’ (default: ‘euler’)
-
save_freq (float, optional) – Frequency at which output is saved (s), e.g., save_freq = 10. A save_freq of 0 will save every step.
-
save_pts (list[float], optional) – Additional ordered list of custom times where output is saved (s), e.g., save_pts= [50, 75]
-
eval_pts (list[float], optional) – Additional ordered list of custom times where simulation is guarenteed to be evaluated (though results are not saved, as with save_pts) when dt is auto (s), e.g., eval_pts= [50, 75]
-
horizon (float, optional) – maximum time that the model will be simulated forward (s), e.g., horizon = 1000
+
integration_method (str, optional) – Integration method, e.g. ‘rk4’ or ‘euler’ (default: ‘euler’)
+
save_freq (float, optional) – Frequency at which output is saved (s), e.g., save_freq = 10. A save_freq of 0 will save every step.
+
save_pts (list[float], optional) – Additional ordered list of custom times where output is saved (s), e.g., save_pts= [50, 75]
+
eval_pts (list[float], optional) – Additional ordered list of custom times where simulation is guarenteed to be evaluated (though results are not saved, as with save_pts) when dt is auto (s), e.g., eval_pts= [50, 75]
+
horizon (float, optional) – maximum time that the model will be simulated forward (s), e.g., horizon = 1000
first_output (OutputContainer, optional) – First measured output, needed to initialize state for some classes. Can be omitted for classes that don’t use this
-
threshold_keys (abc.Sequence[str] or str, optional) – Keys for events that will trigger the end of simulation.
-If blank, simulation will occur if any event will be met ()
ConnectionError – Failed to download data. This may be because of issues with your internet connection or the datasets may have moved. Please check your internet connection and make sure you’re using the latest version of progpy.
+
ValueError – Battery not in dataset (should be RW1-28)
ConnectionError – Failed to download data. This may be because of issues with your internet connection or the datasets may have moved. Please check your internet connection and make sure you’re using the latest version of progpy.
Returns
Data and description as a tuple (description, data), where the data is a list of pandas DataFrames such that data[i] is the data for run i, corresponding with details[i], above. The columns of the dataframe are (‘relativeTime’, ‘current’ (amps), ‘voltage’, ‘temperature’ (°C)) in that order.
ConnectionError – Failed to download data. This may be because of issues with your internet connection or the datasets may have moved. Please check your internet connection and make sure you’re using the latest version of progpy.
ConnectionError – Failed to download data. This may be because of issues with your internet connection or the datasets may have moved. Please check your internet connection and make sure you’re using the latest version of progpy.
An Ensemble Model is a collection of models which run together. The results of each model are aggregated using the aggregation_method function. This is generally done to improve the accuracy of prediction when you have multiple models that each represent part of the behavior, or represent a distribution of different behaviors.
Ensemble Models are constructed from a set of other models (e.g., m=EnsembleModel((m1,m2,m3))). The models then operate functionally as one prognostic model.
process_noise (Optional, float or dict[str, float]) – Process noise (applied at dx/next_state).
Can be number (e.g., .2) applied to every state, a dictionary of values for each
state (e.g., {‘x1’: 0.2, ‘x2’: 0.3}), or a function (x) -> x
-
process_noise_dist (Optional, str) – distribution for process noise (e.g., normal, uniform, triangular)
process_noise_dist (Optional, str) – distribution for process noise (e.g., normal, uniform, triangular)
+
measurement_noise (Optional, float or dict[str, float]) – Measurement noise (applied in output eqn).
Can be number (e.g., .2) applied to every output, a dictionary of values for each
output (e.g., {‘z1’: 0.2, ‘z2’: 0.3}), or a function (z) -> z
-
measurement_noise_dist (Optional, str) – distribution for measurement noise (e.g., normal, uniform, triangular)
process_noise (Optional, float or dict[Srt, float]) – Process noise (applied at dx/next_state).
Can be number (e.g., .2) applied to every state, a dictionary of values for each
state (e.g., {‘x1’: 0.2, ‘x2’: 0.3}), or a function (x) -> x
-
process_noise_dist (Optional, str) – distribution for process noise (e.g., normal, uniform, triangular)
-
measurement_noise (Optional, float or dict[Srt, float]) – Measurement noise (applied in output eqn).
+
process_noise_dist (Optional, str) – distribution for process noise (e.g., normal, uniform, triangular)
+
measurement_noise (Optional, float or dict[Srt, float]) – Measurement noise (applied in output eqn).
Can be number (e.g., .2) applied to every output, a dictionary of values for each
output (e.g., {‘z1’: 0.2, ‘z2’: 0.3}), or a function (z) -> z
-
measurement_noise_dist (Optional, str) – distribution for measurement noise (e.g., normal, uniform, triangular)
-
qMaxThreshold (float) – Threshold for qMax (for threshold_met and event_state), after which the InsufficientCapacity event has occurred. Note: Battery manufacturers specify a threshold of 70-80% of qMax
measurement_noise_dist (Optional, str) – distribution for measurement noise (e.g., normal, uniform, triangular)
+
qMaxThreshold (float) – Threshold for qMax (for threshold_met and event_state), after which the InsufficientCapacity event has occurred. Note: Battery manufacturers specify a threshold of 70-80% of qMax
process_noise (Optional, float or dict[str, float]) – Process noise (applied at dx/next_state).
Can be number (e.g., .2) applied to every state, a dictionary of values for each
state (e.g., {‘x1’: 0.2, ‘x2’: 0.3}), or a function (x) -> x
-
process_noise_dist (Optional, str) – distribution for process noise (e.g., normal, uniform, triangular)
process_noise_dist (Optional, str) – distribution for process noise (e.g., normal, uniform, triangular)
+
measurement_noise (Optional, float or dict[str, float]) – Measurement noise (applied in output eqn).
Can be number (e.g., .2) applied to every output, a dictionary of values for each
output (e.g., {‘z1’: 0.2, ‘z2’: 0.3}), or a function (z) -> z
-
measurement_noise_dist (Optional, str) – distribution for measurement noise (e.g., normal, uniform, triangular)
process_noise (Optional, float or dict[str, float]) – Process noise (applied at dx/next_state).
Can be number (e.g., .2) applied to every state, a dictionary of values for each
state (e.g., {‘x1’: 0.2, ‘x2’: 0.3}), or a function (x) -> x
-
process_noise_dist (Optional, str) – distribution for process noise (e.g., normal, uniform, triangular)
process_noise_dist (Optional, str) – distribution for process noise (e.g., normal, uniform, triangular)
+
measurement_noise (Optional, float or dict[str, float]) – Measurement noise (applied in output eqn).
Can be number (e.g., .2) applied to every output, a dictionary of values for each
output (e.g., {‘z1’: 0.2, ‘z2’: 0.3}), or a function (z) -> z
-
measurement_noise_dist (Optional, str) – distribution for measurement noise (e.g., normal, uniform, triangular)
process_noise (Optional, float or dict[str, float]) – Process noise (applied at dx/next_state).
Can be number (e.g., .2) applied to every state, a dictionary of
values for each state (e.g., {‘x1’: 0.2, ‘x2’: 0.3}),
or a function (x) -> x
-
process_noise_dist (Optional, str) – distribution for process noise
+
process_noise_dist (Optional, str) – distribution for process noise
e.g., normal, uniform, triangular
measurement_noise (Optional, float or dict[str, float]) – Measurement noise (applied in output eqn).
Can be number (e.g., .2) applied to every output, a dictionary of
values for each output (e.g., {‘z1’: 0.2, ‘z2’: 0.3}),
or a function (z) -> z
process_noise (Optional, float or dict[str, float]) – Process noise (applied at dx/next_state).
Can be number (e.g., .2) applied to every state, a dictionary of values for each
state (e.g., {‘x1’: 0.2, ‘x2’: 0.3}), or a function (x) -> x
-
process_noise_dist (Optional, str) – distribution for process noise (e.g., normal, uniform, triangular)
process_noise_dist (Optional, str) – distribution for process noise (e.g., normal, uniform, triangular)
+
measurement_noise (Optional, float or dict[str, float]) – Measurement noise (applied in output eqn).
Can be number (e.g., .2) applied to every output, a dictionary of values for each
output (e.g., {‘z1’: 0.2, ‘z2’: 0.3}), or a function (z) -> z
-
measurement_noise_dist (Optional, str) – distribution for measurement noise (e.g., normal, uniform, triangular)
Kt (float) – back emf constant / Torque constant (V/rad/sec)
-
B (float) – Friction in motor / Damping (Not a function of thrust) (Nm/(rad/s))
-
J (float) – Total load moment of inertia (motor shaft + load) (Kg*m^2) - alternately, you can set these separately as Js and Jl
-
Js (float) – Moment of inertia of motor shaft (kg*m^2) - one component of J
-
Jl (float) – Moment of inertia from load (kg*m^2) - one component of J. Note load is whatever the motor is attached to (e.g., propeller, valve, axil, etc.)
Kt (float) – back emf constant / Torque constant (V/rad/sec)
+
B (float) – Friction in motor / Damping (Not a function of thrust) (Nm/(rad/s))
+
J (float) – Total load moment of inertia (motor shaft + load) (Kg*m^2) - alternately, you can set these separately as Js and Jl
+
Js (float) – Moment of inertia of motor shaft (kg*m^2) - one component of J
+
Jl (float) – Moment of inertia from load (kg*m^2) - one component of J. Note load is whatever the motor is attached to (e.g., propeller, valve, axil, etc.)
process_noise (Optional, float or dict[str, float]) – Process noise (applied at dx/next_state).
Can be number (e.g., .2) applied to every state, a dictionary of values for each
state (e.g., {‘x1’: 0.2, ‘x2’: 0.3}), or a function (x) -> x
-
process_noise_dist (Optional, str) – distribution for process noise (e.g., normal, uniform, triangular)
process_noise_dist (Optional, str) – distribution for process noise (e.g., normal, uniform, triangular)
+
measurement_noise (Optional, float or dict[str, float]) – Measurement noise (applied in output eqn).
Can be number (e.g., .2) applied to every output, a dictionary of values for each
output (e.g., {‘z1’: 0.2, ‘z2’: 0.3}), or a function (z) -> z
-
measurement_noise_dist (Optional, str) – distribution for measurement noise (e.g., normal, uniform, triangular)
process_noise (Optional, float or dict[str, float]) – Process noise (applied at dx/next_state).
Can be number (e.g., .2) applied to every state, a dictionary of values for each
state (e.g., {‘x1’: 0.2, ‘x2’: 0.3}), or a function (x) -> x
-
process_noise_dist (Optional, str) – distribution for process noise (e.g., normal, uniform, triangular)
process_noise_dist (Optional, str) – distribution for process noise (e.g., normal, uniform, triangular)
+
measurement_noise (Optional, float or dict[str, float]) – Measurement noise (applied in output eqn).
Can be number (e.g., .2) applied to every output, a dictionary of values for each
output (e.g., {‘z1’: 0.2, ‘z2’: 0.3}), or a function (z) -> z
-
measurement_noise_dist (Optional, str) – distribution for measurement noise (e.g., normal, uniform, triangular)
-
sawtooth_freq (float) – Frequency of PWM signal [Hz], default value in default_parameters.
process_noise (Optional, float or dict[str, float]) – Process noise (applied at dx/next_state).
Can be number (e.g., .2) applied to every state, a dictionary of values for each
state (e.g., {‘x1’: 0.2, ‘x2’: 0.3}), or a function (x) -> x
-
process_noise_dist (Optional, str) – distribution for process noise (e.g., normal, uniform, triangular)
process_noise_dist (Optional, str) – distribution for process noise (e.g., normal, uniform, triangular)
+
measurement_noise (Optional, float or dict[str, float]) – Measurement noise (applied in output eqn).
Can be number (e.g., .2) applied to every output, a dictionary of values for each
output (e.g., {‘z1’: 0.2, ‘z2’: 0.3}), or a function (z) -> z
-
measurement_noise_dist (Optional, str) – distribution for measurement noise (e.g., normal, uniform,
-
c_q (float) – Dimensionless coefficient of torque of the propeller [-], (APC data, derived).
process_noise (Optional, float or dict[str, float]) – Process noise (applied at dx/next_state).
Can be number (e.g., .2) applied to every state, a dictionary of values for each
state (e.g., {‘x1’: 0.2, ‘x2’: 0.3}), or a function (x) -> x
-
process_noise_dist (Optional, str) – distribution for process noise (e.g., normal, uniform, triangular)
process_noise_dist (Optional, str) – distribution for process noise (e.g., normal, uniform, triangular)
+
measurement_noise (Optional, float or dict[str, float]) – Measurement noise (applied in output eqn).
Can be number (e.g., .2) applied to every output, a dictionary of values for each
output (e.g., {‘z1’: 0.2, ‘z2’: 0.3}), or a function (z) -> z
-
measurement_noise_dist (Optional, str) – distribution for measurement noise (e.g., normal, uniform, triangular)
-
dt (Optional, float) – Time step in seconds for trajectory generation
air_density (Optional, float) – kg/m^3, atmospheric density
-
steadystate_input (Optional, float) – Input vector to maintain the vehicle in a stable position that is used to build the linearized model for the controller.
air_density (Optional, float) – kg/m^3, atmospheric density
+
steadystate_input (Optional, float) – Input vector to maintain the vehicle in a stable position that is used to build the linearized model for the controller.
process_noise (Optional, float or dict[str, float]) – Process noise (applied at dx/next_state).
Can be number (e.g., .2) applied to every state, a dictionary of values for each
state (e.g., {‘x1’: 0.2, ‘x2’: 0.3}), or a function (x) -> x
-
process_noise_dist (Optional, str) – distribution for process noise (e.g., normal, uniform, triangular)
process_noise_dist (Optional, str) – distribution for process noise (e.g., normal, uniform, triangular)
+
measurement_noise (Optional, float or dict[str, float]) – Measurement noise (applied in output eqn).
Can be number (e.g., .2) applied to every output, a dictionary of values for each
output (e.g., {‘z1’: 0.2, ‘z2’: 0.3}), or a function (z) -> z
-
measurement_noise_dist (Optional, str) – distribution for measurement noise (e.g., normal, uniform, triangular)
-
g (Optional, float) – Acceleration due to gravity (m/s^2). Default is 9.81 m/s^2 (standard gravity)
-
rho (Optional, float) – Air density (kg/m^3). Default is 1.225 (air density at sea level). Used in drag calculation
-
A (Optional, float) – Cross sectional area of object (m^2)
dt (float, optional) – Maximum time step in simulation. Time step used in simulation is lower of dt and time between samples. Defaults to time between samples.
-
stability_tol (double, optional) –
Configurable parameter.
+
dt (float, optional) – Maximum time step in simulation. Time step used in simulation is lower of dt and time between samples. Defaults to time between samples.
+
stability_tol (double, optional) –
Configurable cutoff
Configurable cutoff value, between 0 and 1, that determines the fraction of the data points for which the model must be stable.
In some cases, a prognostics model will become unstable under certain conditions, after which point the model can no longer represent behavior.
stability_tol represents the fraction of the provided argument times that are required to be met in simulation,
@@ -488,13 +488,14 @@
times (list[float]) – Array of times for each sample
-
inputs (list[InputContainer]) – Array of input containers where input[x] corresponds to time[x]
-
outputs (list[OutputContainer]) – Array of output containers where output[x] corresponds to time[x]
-
method (str, optional) – Optimization method- see scipy.optimize.minimize for options
-
tol (int, optional) – Tolerance for termination. Depending on the provided minimization method, specifying tolerance sets solver-specific options to tol
-
error_method (str, optional) – Method to use in calculating error. See calc_error for options
-
bounds (tuple or dict, optional) – Bounds for optimization in format ((lower1, upper1), (lower2, upper2), …) or {key1: (lower1, upper1), key2: (lower2, upper2), …}
-
options (dict, optional) – Options passed to optimizer. see scipy.optimize.minimize for options
-
runs (list[tuple], depreciated) – data from all runs, where runs[0] is the data from run 0. Each run consists of a tuple of arrays of times, input dicts, and output dicts. Use inputs, outputs, states, times, etc. instead
+
keys (list[str] or list[tuple[str]]) – Parameter keys to optimize. Use tuple for nested parameters. For example, key (‘x0’, ‘a’) corresponds to m.parameters[‘x0’][‘a’].
+
times (list[float]) – Array of times for each sample
+
inputs (list[InputContainer]) – Array of input containers where input[x] corresponds to time[x]
+
outputs (list[OutputContainer]) – Array of output containers where output[x] corresponds to time[x]
+
method (str, optional) – Optimization method- see scipy.optimize.minimize for options
+
tol (int, optional) – Tolerance for termination. Depending on the provided minimization method, specifying tolerance sets solver-specific options to tol
+
error_method (str, optional) – Method to use in calculating error. See calc_error for options
+
bounds (tuple or dict, optional) – Bounds for optimization in format ((lower1, upper1), (lower2, upper2), …) or {key1: (lower1, upper1), key2: (lower2, upper2), …}
+
options (dict, optional) – Options passed to optimizer. see scipy.optimize.minimize for options
+
runs (list[tuple], depreciated) – data from all runs, where runs[0] is the data from run 0. Each run consists of a tuple of arrays of times, input dicts, and output dicts. Use inputs, outputs, states, times, etc. instead
Generate a surrogate model to approximate the higher-fidelity model
Parameters
load_functions (List[abc.Callable]) – Each index is a callable loading function of (t, x = None) -> z used to predict future loading (output) at a given time (t) and state (x)
-
method (str, optional) – list[ indicating surrogate modeling method to be used
+
method (str, optional) – list[ indicating surrogate modeling method to be used
Keyword Arguments
-
dt (float or abc.Callable, optional) – Same as in simulate_to_threshold; for DMD, this value is the time step of the training data
-
save_freq (float, optional) – Same as in simulate_to_threshold; for DMD, this value is the time step with which the surrogate model is generated
-
state_keys (List[str], optional) – List of state keys to be included in the surrogate model generation. keys must be a subset of those defined in the PrognosticsModel
-
input_keys (List[str], optional) – List of input keys to be included in the surrogate model generation. keys must be a subset of those defined in the PrognosticsModel
-
output_keys (List[str], optional) – List of output keys to be included in the surrogate model generation. keys must be a subset of those defined in the PrognosticsModel
-
event_keys (List[str], optional) – List of event_state keys to be included in the surrogate model generation. keys must be a subset of those defined in the PrognosticsModel
+
dt (float or abc.Callable, optional) – Same as in simulate_to_threshold; for DMD, this value is the time step of the training data
+
save_freq (float, optional) – Same as in simulate_to_threshold; for DMD, this value is the time step with which the surrogate model is generated
+
state_keys (List[str], optional) – List of state keys to be included in the surrogate model generation. keys must be a subset of those defined in the PrognosticsModel
+
input_keys (List[str], optional) – List of input keys to be included in the surrogate model generation. keys must be a subset of those defined in the PrognosticsModel
+
output_keys (List[str], optional) – List of output keys to be included in the surrogate model generation. keys must be a subset of those defined in the PrognosticsModel
+
event_keys (List[str], optional) – List of event_state keys to be included in the surrogate model generation. keys must be a subset of those defined in the PrognosticsModel
... (optional) – Keyword arguments from simulate_to_threshold (except save_pts)
events (abc.Sequence[str] or str, optional) – Keys for events that will trigger the end of simulation.
+If blank, simulation will occur if any event will be met ()
tuple: (mode, dt), where modes could be constant or auto. If auto, dt is maximum step size
str: mode - ‘auto’ or ‘constant’
-
integration_method (str, optional) – Integration method, e.g. ‘rk4’ or ‘euler’ (default: ‘euler’)
-
save_freq (float, optional) – Frequency at which output is saved (s), e.g., save_freq = 10. A save_freq of 0 will save every step.
-
save_pts (list[float], optional) – Additional ordered list of custom times where output is saved (s), e.g., save_pts= [50, 75]
-
eval_pts (list[float], optional) – Additional ordered list of custom times where simulation is guarenteed to be evaluated (though results are not saved, as with save_pts) when dt is auto (s), e.g., eval_pts= [50, 75]
-
horizon (float, optional) – maximum time that the model will be simulated forward (s), e.g., horizon = 1000
+
integration_method (str, optional) – Integration method, e.g. ‘rk4’ or ‘euler’ (default: ‘euler’)
+
save_freq (float, optional) – Frequency at which output is saved (s), e.g., save_freq = 10. A save_freq of 0 will save every step.
+
save_pts (list[float], optional) – Additional ordered list of custom times where output is saved (s), e.g., save_pts= [50, 75]
+
eval_pts (list[float], optional) – Additional ordered list of custom times where simulation is guarenteed to be evaluated (though results are not saved, as with save_pts) when dt is auto (s), e.g., eval_pts= [50, 75]
+
horizon (float, optional) – maximum time that the model will be simulated forward (s), e.g., horizon = 1000
first_output (OutputContainer, optional) – First measured output, needed to initialize state for some classes. Can be omitted for classes that don’t use this
-
threshold_keys (abc.Sequence[str] or str, optional) – Keys for events that will trigger the end of simulation.
-If blank, simulation will occur if any event will be met ()
The key aspect of a load estimator is that it needs to be able to be called with either time or time and state. The most common way of accomplishing this is with a function, described in the dropdown below.
values (dict[str, list[float]]) – A dictionary with keys matching model inputs. Dictionary contains list of value for that input at until time in times (i.e., index 0 is the load until time[0], then it’s index 1). Values dictionary should have the same or one more value than times. If values has one more value than times, then the last value is the default and will be applied after the last time has passed
values (dict[str, list[float]]) – A dictionary with keys matching model inputs. Dictionary contains list of value for that input at until time in times (i.e., index 0 is the load until time[0], then it’s index 1). Values dictionary should have the same or one more value than times. If values has one more value than times, then the last value is the default and will be applied after the last time has passed
x_ref (dict[str, np.ndarray]) – dictionary of reference trajectories for each state variable (x, y, z, phi, theta, psi, …)
+
x_ref (dict[str, np.ndarray]) – dictionary of reference trajectories for each state variable (x, y, z, phi, theta, psi, …)
vehicle – UAV model object
@@ -500,12 +500,12 @@
Controllersint) – Length of the time window, in number of simulation steps, to integrate the state position error. The time window is defined
+
int_lag (int) – Length of the time window, in number of simulation steps, to integrate the state position error. The time window is defined
as the last int_lag discrete steps up until the current discrete time step. The integral of the state position error adds to
the overall state error to compute the gain matrix, and helps compensate for constant offsets between the reference (desired)
position and the actual position of the vehicle.
-
scheduled_var (str) – Variable used to create the scheduled controller gains; must correspond to a state key
-
index_scheduled_var (int) – Index corresponding to the scheduled_var in the state vector
+
scheduled_var (str) – Variable used to create the scheduled controller gains; must correspond to a state key
+
index_scheduled_var (int) – Index corresponding to the scheduled_var in the state vector
Calculate monotonicty for a single prediction.
Given a single prediction, for each event: go through all predicted states and compare those to the next one.
Calculates monotonicity for each event key using its associated mean value in UncertainData.
Immutable data class for the result of a prediction, where the predictions are stored as UnweightedSamples. Is returned from the predict method of a sample based prediction class (e.g., MonteCarlo). Objects of this class can be iterated and accessed like a list (e.g., prediction[0]), where prediction[n] represents a profile for sample n.
Parameters
-
times (list[float]) – Times for each data point where times[n] corresponds to data[:][n]
-
data (list[SimResult]) – Data points where data[n] is a SimResult for sample n
+
times (list[float]) – Times for each data point where times[n] corresponds to data[:][n]
+
data (list[SimResult]) – Data points where data[n] is a SimResult for sample n
Calculate monotonicty for a single prediction.
Given a single prediction, for each event: go through all predicted states and compare those to the next one.
Calculates monotonicity for each event key using its associated mean value in UncertainData.
process_noise (Optional, float or dict[str, float]) – Process noise (applied at dx/next_state).
Can be number (e.g., .2) applied to every state, a dictionary of values for each
state (e.g., {‘x1’: 0.2, ‘x2’: 0.3}), or a function (x) -> x
-
process_noise_dist (Optional, str) – distribution for process noise (e.g., normal, uniform, triangular)
process_noise_dist (Optional, str) – distribution for process noise (e.g., normal, uniform, triangular)
+
measurement_noise (Optional, float or dict[str, float]) – Measurement noise (applied in output eqn).
Can be number (e.g., .2) applied to every output, a dictionary of values for each
output (e.g., {‘z1’: 0.2, ‘z2’: 0.3}), or a function (z) -> z
-
measurement_noise_dist (Optional, str) – distribution for measurement noise (e.g., normal, uniform, triangular)
-
integration_method (Optional, str or OdeSolver) – Integration method used by next_state in continuous models, e.g. ‘rk4’ or ‘euler’ (default: ‘euler’). Could also be a SciPy integrator (e.g., scipy.integrate.RK45). If the model is discrete, this parameter will raise an exception.
+
measurement_noise_dist (Optional, str) – distribution for measurement noise (e.g., normal, uniform, triangular)
+
integration_method (Optional, str or OdeSolver) – Integration method used by next_state in continuous models, e.g. ‘rk4’ or ‘euler’ (default: ‘euler’). Could also be a SciPy integrator (e.g., scipy.integrate.RK45). If the model is discrete, this parameter will raise an exception.
Error method to use when calculating error. Supported methods include:
MSE (Mean Squared Error) - DEFAULT
RMSE (Root Mean Squared Error)
@@ -633,8 +633,8 @@
PrognosticsModelStateContainer, optional) – Initial state
-
dt (float, optional) – Maximum time step in simulation. Time step used in simulation is lower of dt and time between samples. Defaults to time between samples.
-
stability_tol (double, optional) –
Configurable parameter.
+
dt (float, optional) – Maximum time step in simulation. Time step used in simulation is lower of dt and time between samples. Defaults to time between samples.
+
stability_tol (double, optional) –
Configurable cutoff
Configurable cutoff value, between 0 and 1, that determines the fraction of the data points for which the model must be stable.
In some cases, a prognostics model will become unstable under certain conditions, after which point the model can no longer represent behavior.
stability_tol represents the fraction of the provided argument times that are required to be met in simulation,
@@ -643,13 +643,14 @@
times (list[float]) – Array of times for each sample
-
inputs (list[InputContainer]) – Array of input containers where input[x] corresponds to time[x]
-
outputs (list[OutputContainer]) – Array of output containers where output[x] corresponds to time[x]
-
method (str, optional) – Optimization method- see scipy.optimize.minimize for options
-
tol (int, optional) – Tolerance for termination. Depending on the provided minimization method, specifying tolerance sets solver-specific options to tol
-
error_method (str, optional) – Method to use in calculating error. See calc_error for options
-
bounds (tuple or dict, optional) – Bounds for optimization in format ((lower1, upper1), (lower2, upper2), …) or {key1: (lower1, upper1), key2: (lower2, upper2), …}
-
options (dict, optional) – Options passed to optimizer. see scipy.optimize.minimize for options
-
runs (list[tuple], depreciated) – data from all runs, where runs[0] is the data from run 0. Each run consists of a tuple of arrays of times, input dicts, and output dicts. Use inputs, outputs, states, times, etc. instead
+
keys (list[str] or list[tuple[str]]) – Parameter keys to optimize. Use tuple for nested parameters. For example, key (‘x0’, ‘a’) corresponds to m.parameters[‘x0’][‘a’].
+
times (list[float]) – Array of times for each sample
+
inputs (list[InputContainer]) – Array of input containers where input[x] corresponds to time[x]
+
outputs (list[OutputContainer]) – Array of output containers where output[x] corresponds to time[x]
+
method (str, optional) – Optimization method- see scipy.optimize.minimize for options
+
tol (int, optional) – Tolerance for termination. Depending on the provided minimization method, specifying tolerance sets solver-specific options to tol
+
error_method (str, optional) – Method to use in calculating error. See calc_error for options
+
bounds (tuple or dict, optional) – Bounds for optimization in format ((lower1, upper1), (lower2, upper2), …) or {key1: (lower1, upper1), key2: (lower2, upper2), …}
+
options (dict, optional) – Options passed to optimizer. see scipy.optimize.minimize for options
+
runs (list[tuple], depreciated) – data from all runs, where runs[0] is the data from run 0. Each run consists of a tuple of arrays of times, input dicts, and output dicts. Use inputs, outputs, states, times, etc. instead
Generate a surrogate model to approximate the higher-fidelity model
Parameters
load_functions (List[abc.Callable]) – Each index is a callable loading function of (t, x = None) -> z used to predict future loading (output) at a given time (t) and state (x)
-
method (str, optional) – list[ indicating surrogate modeling method to be used
+
method (str, optional) – list[ indicating surrogate modeling method to be used
Keyword Arguments
-
dt (float or abc.Callable, optional) – Same as in simulate_to_threshold; for DMD, this value is the time step of the training data
-
save_freq (float, optional) – Same as in simulate_to_threshold; for DMD, this value is the time step with which the surrogate model is generated
-
state_keys (List[str], optional) – List of state keys to be included in the surrogate model generation. keys must be a subset of those defined in the PrognosticsModel
-
input_keys (List[str], optional) – List of input keys to be included in the surrogate model generation. keys must be a subset of those defined in the PrognosticsModel
-
output_keys (List[str], optional) – List of output keys to be included in the surrogate model generation. keys must be a subset of those defined in the PrognosticsModel
-
event_keys (List[str], optional) – List of event_state keys to be included in the surrogate model generation. keys must be a subset of those defined in the PrognosticsModel
+
dt (float or abc.Callable, optional) – Same as in simulate_to_threshold; for DMD, this value is the time step of the training data
+
save_freq (float, optional) – Same as in simulate_to_threshold; for DMD, this value is the time step with which the surrogate model is generated
+
state_keys (List[str], optional) – List of state keys to be included in the surrogate model generation. keys must be a subset of those defined in the PrognosticsModel
+
input_keys (List[str], optional) – List of input keys to be included in the surrogate model generation. keys must be a subset of those defined in the PrognosticsModel
+
output_keys (List[str], optional) – List of output keys to be included in the surrogate model generation. keys must be a subset of those defined in the PrognosticsModel
+
event_keys (List[str], optional) – List of event_state keys to be included in the surrogate model generation. keys must be a subset of those defined in the PrognosticsModel
... (optional) – Keyword arguments from simulate_to_threshold (except save_pts)
events (abc.Sequence[str] or str, optional) – Keys for events that will trigger the end of simulation.
+If blank, simulation will occur if any event will be met ()
tuple: (mode, dt), where modes could be constant or auto. If auto, dt is maximum step size
str: mode - ‘auto’ or ‘constant’
-
integration_method (str, optional) – Integration method, e.g. ‘rk4’ or ‘euler’ (default: ‘euler’)
-
save_freq (float, optional) – Frequency at which output is saved (s), e.g., save_freq = 10. A save_freq of 0 will save every step.
-
save_pts (list[float], optional) – Additional ordered list of custom times where output is saved (s), e.g., save_pts= [50, 75]
-
eval_pts (list[float], optional) – Additional ordered list of custom times where simulation is guarenteed to be evaluated (though results are not saved, as with save_pts) when dt is auto (s), e.g., eval_pts= [50, 75]
-
horizon (float, optional) – maximum time that the model will be simulated forward (s), e.g., horizon = 1000
+
integration_method (str, optional) – Integration method, e.g. ‘rk4’ or ‘euler’ (default: ‘euler’)
+
save_freq (float, optional) – Frequency at which output is saved (s), e.g., save_freq = 10. A save_freq of 0 will save every step.
+
save_pts (list[float], optional) – Additional ordered list of custom times where output is saved (s), e.g., save_pts= [50, 75]
+
eval_pts (list[float], optional) – Additional ordered list of custom times where simulation is guarenteed to be evaluated (though results are not saved, as with save_pts) when dt is auto (s), e.g., eval_pts= [50, 75]
+
horizon (float, optional) – maximum time that the model will be simulated forward (s), e.g., horizon = 1000
first_output (OutputContainer, optional) – First measured output, needed to initialize state for some classes. Can be omitted for classes that don’t use this
-
threshold_keys (abc.Sequence[str] or str, optional) – Keys for events that will trigger the end of simulation.
-If blank, simulation will occur if any event will be met ()
SimResult is a data structure for the results of a simulation, with time. It is returned from the simulate_to* methods for inputs, outputs, states, and event_states for the beginning and ending time step of the simulation, plus any save points indicated by the savepts and save_freq configuration arguments. The class includes methods for analyzing, manipulating, and visualizing the results of the simulation.
Parameters
-
times (array[float]) – Times for each data point where times[n] corresponds to data[n]
-
data (array[Dict[str, float]]) – Data points where data[n] corresponds to times[n]
+
times (array[float]) – Times for each data point where times[n] corresponds to data[n]
+
data (array[Dict[str, float]]) – Data points where data[n] corresponds to times[n]
Calculate monotonicty for a single prediction.
Given a single simulation result, for each event: go through all predicted states and compare those to the next one.
Calculates monotonicity for each event key using its associated mean value in UncertainData.
dt (float, optional) – Maximum timestep for prediction in seconds. By default, the timestep dt is the difference between the last and current call of .estimate(). Some models are unstable at larger dt. Setting a smaller dt will force the model to take smaller steps; resulting in multiple prediction steps for each estimate step. Default is the parameters[‘dt’]
+
dt (float, optional) – Maximum timestep for prediction in seconds. By default, the timestep dt is the difference between the last and current call of .estimate(). Some models are unstable at larger dt. Setting a smaller dt will force the model to take smaller steps; resulting in multiple prediction steps for each estimate step. Default is the parameters[‘dt’]
e.g., dt = 1e-2
-
num_particles (int, optional) – Number of particles in particle filter
+
num_particles (int, optional) – Number of particles in particle filter
dt (float, optional) – Maximum timestep for prediction in seconds. By default, the timestep dt is the difference between the last and current call of .estimate(). Some models are unstable at larger dt. Setting a smaller dt will force the model to take smaller steps; resulting in multiple prediction steps for each estimate step. Default is the parameters[‘dt’]
+
dt (float, optional) – Maximum timestep for prediction in seconds. By default, the timestep dt is the difference between the last and current call of .estimate(). Some models are unstable at larger dt. Setting a smaller dt will force the model to take smaller steps; resulting in multiple prediction steps for each estimate step. Default is the parameters[‘dt’]
e.g., dt = 1e-2
-
Q (list[list[float]], optional) – Process Noise Matrix
-
R (list[list[float]], optional) – Measurement Noise Matrix
+
Q (list[list[float]], optional) – Process Noise Matrix
+
R (list[list[float]], optional) – Measurement Noise Matrix
This class defines the logic for performing a kalman filter with a LinearModel (see Prognostics Model Package). This filter uses measurement data with noise to generate a state estimate and covariance matrix.
+
This class defines the logic for performing a kalman filter with a linear model (i.e., a subclass of progpy.LinearModel). This filter uses measurement data with noise to generate a state estimate and covariance matrix.
The supported configuration parameters (keyword arguments) for UKF construction are described below:
Parameters
model (PrognosticsModel) – A prognostics model to be used in state estimation
See: Prognostics Model Package
dt (float, optional) – Maximum timestep for prediction in seconds. By default, the timestep dt is the difference between the last and current call of .estimate(). Some models are unstable at larger dt. Setting a smaller dt will force the model to take smaller steps; resulting in multiple prediction steps for each estimate step. Default is the parameters[‘dt’]
+
alpha (float, optional) – KF Scaling parameter. An alpha > 1 turns this into a fading memory filter.
dt (float, optional) – Maximum timestep for prediction in seconds. By default, the timestep dt is the difference between the last and current call of .estimate(). Some models are unstable at larger dt. Setting a smaller dt will force the model to take smaller steps; resulting in multiple prediction steps for each estimate step. Default is the parameters[‘dt’]
e.g., dt = 1e-2
-
Q (list[list[float]], optional) – Kalman Process Noise Matrix
-
R (list[list[float]], optional) – Kalman Measurement Noise Matrix
+
Q (list[list[float]], optional) – Kalman Process Noise Matrix
+
R (list[list[float]], optional) – Kalman Measurement Noise Matrix
@@ -463,15 +463,15 @@
State Estimator Interface
model (PrognosticsModel) – A prognostics model to be used in state estimation
See: Prognostics Model Package
Initial (starting) state, with keys defined by model.states
e.g., x = ScalarData({‘abc’: 332.1, ‘def’: 221.003}) given states = [‘abc’, ‘def’]
Keyword Arguments
-
t0 (float) – Initial time at which prediction begins, e.g., 0
-
dt (float) – Maximum timestep for prediction in seconds. By default, the timestep dt is the difference between the last and current call of .estimate(). Some models are unstable at larger dt. Setting a smaller dt will force the model to take smaller steps; resulting in multiple prediction steps for each estimate step. Default is the parameters[‘dt’]
+
t0 (float) – Initial time at which prediction begins, e.g., 0
+
dt (float) – Maximum timestep for prediction in seconds. By default, the timestep dt is the difference between the last and current call of .estimate(). Some models are unstable at larger dt. Setting a smaller dt will force the model to take smaller steps; resulting in multiple prediction steps for each estimate step. Default is the parameters[‘dt’]
e.g., dt = 1e-2
**kwargs – See state-estimator specific documentation for specific keyword arguments.
dt (float, optional) – Maximum timestep for prediction in seconds. By default, the timestep dt is the difference between the last and current call of .estimate(). Some models are unstable at larger dt. Setting a smaller dt will force the model to take smaller steps; resulting in multiple prediction steps for each estimate step. Default is the parameters[‘dt’]
+
dt (float, optional) – Maximum timestep for prediction in seconds. By default, the timestep dt is the difference between the last and current call of .estimate(). Some models are unstable at larger dt. Setting a smaller dt will force the model to take smaller steps; resulting in multiple prediction steps for each estimate step. Default is the parameters[‘dt’]
e.g., dt = 1e-2
**kwargs – See state-estimator specific documentation for specific keyword arguments.
Calculate Alpha lambda metric for the prediction profile
Parameters
-
ground_truth (dict[str, float]) – Ground Truth time of event for each event (e.g., {‘event1’: 748, ‘event2’, 2233, …})
-
lambda_value (float) – Prediction time at or after which metric is evaluated. Evaluation occurs at this time (if a prediction exists) or the next prediction following.
-
alpha (float) – percentage bounds around time to event (where 0.2 allows 20% error TtE)
-
beta (float) – portion of prediction that must be within those bounds
+
ground_truth (dict[str, float]) – Ground Truth time of event for each event (e.g., {‘event1’: 748, ‘event2’, 2233, …})
+
lambda_value (float) – Prediction time at or after which metric is evaluated. Evaluation occurs at this time (if a prediction exists) or the next prediction following.
+
alpha (float) – percentage bounds around time to event (where 0.2 allows 20% error TtE)
+
beta (float) – portion of prediction that must be within those bounds
Keyword Arguments
-
keys (list[str], optional) – list of keys to use. If not provided, all keys are used.
-
print (bool, optional) – If True, print the results to the screen. Default is False.
+
keys (list[str], optional) – list of keys to use. If not provided, all keys are used.
+
print (bool, optional) – If True, print the results to the screen. Default is False.
Returns
If alpha lambda was met for each key (e.g., {‘event1’: True, ‘event2’, False, …})
Compute cumulative relative accuracy for a given profile, defined as the normalized sum of relative prediction accuracies at specific time instances.
\(CRA = \Sigma \left( \dfrac{RA}{N} \right)\) for each event
Where \(\Sigma\) is summation of all relative accuracies for a given profile and N is the total count of profiles 0
Parameters
-
ground_truth (dict) – Dictionary containing ground truth; specified as key, value pairs for event and its value. E.g, {‘event1’: 47.3, ‘event2’: 52.1, ‘event3’: 46.1}
+
ground_truth (dict) – Dictionary containing ground truth; specified as key, value pairs for event and its value. E.g, {‘event1’: 47.3, ‘event2’: 52.1, ‘event3’: 46.1}
Returns
Dictionary containing cumulative relative accuracy (value) for each event (key). e.g., {‘event1’: 12.3, ‘event2’: 15.1}
Calculate monotonicty for a prediction profile.
Given a prediction profile, for each prediction: go through all predicted states and compare those to the next one.
Calculates monotonicity for each prediction key using its associated mean value in progpy.uncertain_data.UncertainData.
Compute prognostic horizon metric, defined as the difference between a time ti, when the predictions meet specified performance criteria, and the time corresponding to the true Time of Event (ToE), for each event.
\(PH = ToE - ti\)
@@ -514,17 +514,17 @@
ToEPredictionProfile
-
ground_truth (dict) – Dictionary containing ground truth; specified as key, value pairs for event and its value. E.g, {‘event1’: 47.3, ‘event2’: 52.1, ‘event3’: 46.1}
+
ground_truth (dict) – Dictionary containing ground truth; specified as key, value pairs for event and its value. E.g, {‘event1’: 47.3, ‘event2’: 52.1, ‘event3’: 46.1}
Keyword Arguments
-
print (bool) – Boolean specifying whether the prognostic horizon metric should be printed.
+
print (bool) – Boolean specifying whether the prognostic horizon metric should be printed.
Returns
Dictionary containing prognostic horizon calculations (value) for each event (key). e.g., {‘event1’: 12.3, ‘event2’: 15.1}
fig (Figure, optional) – Existing figure previously used to plot states. If passed a figure argument additional data will be added to the plot. Defaults to creating new figure
-
keys (list[str], optional) – Keys to plot. Defaults to all keys.
-
num_samples (int, optional) – Number of samples to plot. Defaults to 100
+
keys (list[str], optional) – Keys to plot. Defaults to all keys.
+
num_samples (int, optional) – Number of samples to plot. Defaults to 100
**kwargs (optional) – Additional keyword arguments passed to scatter function.
state (dict or Container) – Single state in the form of dict or model.*Container (InputContainer, OutputContainer, Statecontainer) representing states and respective values.
+
state (dict or Container) – Single state in the form of dict or model.*Container (InputContainer, OutputContainer, Statecontainer) representing states and respective values.
takeoff_time (float, optional) – take off time of the trajectory. Default is None (starting at current time).
-
etas (list[float], optional) – ETAs of each waypoints. Default is None. In that case, the ETAs are calculated based on the desired speed between waypoints.
+
lat (np.ndarray) – rad, n x 1 array, doubles, latitude coordinates of waypoints, deg
+
lon (np.ndarray) – rad, n x 1 array, doubles, longitude coordinates of waypoints, deg
+
alt (np.ndarray) – m, n x 1 array, doubles, altitude coordinates of waypoints, m
+
takeoff_time (float, optional) – take off time of the trajectory. Default is None (starting at current time).
+
etas (list[float], optional) – ETAs of each waypoints. Default is None. In that case, the ETAs are calculated based on the desired speed between waypoints.
The ProgPy framework consists of three key components that combine to create a flexible and extendible prognostics architecture.
+
+
+
The Prognostics Models are the backbone of the ProgPy architecture. Models describe the specific system that prognostics will be applied to and how the system will evolve with time. Everything else within ProgPy (e.g. simulation capabilities and prognostics tools) are built on top of a model.
+
ProgPy supports models that are physics-based, data-driven, or hybrid. ProgPy includes some built-in models (see examples below) but is also written in an easily adaptable way so users can implement models specific to their use-cases.
+
+
The Prognostics Engine encapsulates the complex logic of prognostics in a way that is modular and extendable. It includes the necessary tools to perform prognostics on the model, including state estimation, prediction, and uncertainty management. The modularity of the framework allows these capabilities to work with any model (built-in or user-defined) and the extensibility of the architecture allows users to additionally create their own methodologies.
+
The Prognostics Support Tools are a collection of capabilities to help users build new functionalities or understand prognostics results.
+
+
These three key components come together to create the comprehensive framework that is ProgPy. More details will be shared in the coming pages.
This page is a general guide for ProgPy. To access a guide specific to the features you’re using, select it in the menu below.
The latest stable release of ProgPy is hosted on PyPi. For most users, this version will be adequate. To install via the command line, use the following command:
$ pipinstallprogpy
+
If you will be using the datadriven tools (e.g., LSTM model), install the datadriven dependencies as well using the following command:
+
$ pipinstallprogpy[datadriven]
+
+
Users who would like to contribute to ProgPy or would like to use pre-release features can do so using the ProgPy GitHub repo. This isn’t recommended for most users as this version may be unstable. To do this, use the following commands:
Teubert, K. Jarvis Griffith, M. Corbetta, C. Kulkarni, P. Banerjee, J. Watkins, M. Daigle, ProgPy Python Prognostics Packages, v1.7, May 2024. URL nasa/progpy.
The latest stable release of ProgPy is hosted on PyPi. For most users, this version will be adequate. To install via the command line, use the following command:
+
$ pipinstallprogpy
+
+
+
If you will be using the datadriven tools (e.g., LSTM model), install the datadriven dependencies as well using the following command:
+
$ pipinstallprogpy[datadriven]
+
+
+
Users who would like to contribute to ProgPy or would like to use pre-release features can do so using the ProgPy GitHub repo. This isn’t recommended for most users as this version may be unstable. To do this, use the following commands:
dt (float, optional) – Maximum timestep for prediction in seconds. By default, the timestep dt is the difference between the last and current call of .estimate(). Some models are unstable at larger dt. Setting a smaller dt will force the model to take smaller steps; resulting in multiple prediction steps for each estimate step. Default is the parameters[‘dt’]
+
dt (float, optional) – Maximum timestep for prediction in seconds. By default, the timestep dt is the difference between the last and current call of .estimate(). Some models are unstable at larger dt. Setting a smaller dt will force the model to take smaller steps; resulting in multiple prediction steps for each estimate step. Default is the parameters[‘dt’]
e.g., dt = 1e-2
-
Q (list[list[float]], optional) – Process Noise Matrix
-
R (list[list[float]], optional) – Measurement Noise Matrix
+
Q (list[list[float]], optional) – Process Noise Matrix
+
R (list[list[float]], optional) – Measurement Noise Matrix
@@ -477,17 +485,17 @@
State Estimation
model (PrognosticsModel) – A prognostics model to be used in state estimation
See: Prognostics Model Package
dt (float, optional) – Maximum timestep for prediction in seconds. By default, the timestep dt is the difference between the last and current call of .estimate(). Some models are unstable at larger dt. Setting a smaller dt will force the model to take smaller steps; resulting in multiple prediction steps for each estimate step. Default is the parameters[‘dt’]
+
dt (float, optional) – Maximum timestep for prediction in seconds. By default, the timestep dt is the difference between the last and current call of .estimate(). Some models are unstable at larger dt. Setting a smaller dt will force the model to take smaller steps; resulting in multiple prediction steps for each estimate step. Default is the parameters[‘dt’]
e.g., dt = 1e-2
-
num_particles (int, optional) – Number of particles in particle filter
+
num_particles (int, optional) – Number of particles in particle filter
This class defines the logic for performing a kalman filter with a LinearModel (see Prognostics Model Package). This filter uses measurement data with noise to generate a state estimate and covariance matrix.
+
This class defines the logic for performing a kalman filter with a linear model (i.e., a subclass of progpy.LinearModel). This filter uses measurement data with noise to generate a state estimate and covariance matrix.
The supported configuration parameters (keyword arguments) for UKF construction are described below:
Parameters
model (PrognosticsModel) – A prognostics model to be used in state estimation
See: Prognostics Model Package
dt (float, optional) – Maximum timestep for prediction in seconds. By default, the timestep dt is the difference between the last and current call of .estimate(). Some models are unstable at larger dt. Setting a smaller dt will force the model to take smaller steps; resulting in multiple prediction steps for each estimate step. Default is the parameters[‘dt’]
+
alpha (float, optional) – KF Scaling parameter. An alpha > 1 turns this into a fading memory filter.
dt (float, optional) – Maximum timestep for prediction in seconds. By default, the timestep dt is the difference between the last and current call of .estimate(). Some models are unstable at larger dt. Setting a smaller dt will force the model to take smaller steps; resulting in multiple prediction steps for each estimate step. Default is the parameters[‘dt’]
e.g., dt = 1e-2
-
Q (list[list[float]], optional) – Kalman Process Noise Matrix
-
R (list[list[float]], optional) – Kalman Measurement Noise Matrix
+
Q (list[list[float]], optional) – Kalman Process Noise Matrix
+
R (list[list[float]], optional) – Kalman Measurement Noise Matrix
The results of the state estimation are stored in an object of type progpy.uncertain_data.UncertainData. This class contains a number of methods for analyzing a state estimate. This includes methods for obtaining statistics about the distribution, including the following:
mean: The mean value of the state estimate distribution.
In addition to these standard UncertainData metrics, Probability of Success (PoS) is an important metric for prognostics. Probability of Success is the probability that a event will not occur before a defined time. For example, in aeronautics, PoS might be the probability that no failure will occur before end of mission.
Below is an example calculating probability of success:
A progpy.predictors.ToEPredictionProfile contains Time of Event (ToE) predictions performed at multiple points. ToEPredictionProfile is frequently used to evaluate the prognostic quality for a given prognostic solution. It contains a number of methods to help with this, including:
-
alpha_lambda: Whether the prediction falls within specified limits at particular times with respect to a performance measure 12
+
alpha_lambda: Whether the prediction falls within specified limits at particular times with respect to a performance measure 12
cumulate_relative_accuracy: The sum of the relative accuracies of each prediction, given a ground truth
-
monotonicity: The monotonicity of the prediction series 43
-
prognostic_horizon: The difference between a time \(t_i\), when the predictions meet specified performance criteria, and the time corresponding to the true Time of Event (ToE), for each event 12
+
monotonicity: The monotonicity of the prediction series 43
+
prognostic_horizon: The difference between a time \(t_i\), when the predictions meet specified performance criteria, and the time corresponding to the true Time of Event (ToE), for each event 12
A ToEPredictionProfile also contains a plot method (profile.plot(...)), which looks like this:
Kai Goebel, Matthew John Daigle, Abhinav Saxena, Indranil Roychoudhury, Shankar Sankararaman, and José R Celaya. Prognostics: The science of making predictions. 2017
Abhinav Saxena, José Celaya, Sankalita Saha, Bhaskar Saha, and Kai Goebel. Saxena, A., Celaya, J. Metrics for Offline Evaluation of Prognostic Performance. International Journal of Prognostics and Health Management, 1(1), 20. 2010.
Future loading is an essential part of prediction and simulation. In order to simulate forward in time, you must have an estimate of how the system will be used (i.e., loaded) during the window of time that the system is simulated. Future load is essentially expected Inputs at future times.
Future Loading is an essential part of prediction and simulation. In order to simulate forward in time, you must have an estimate of how the system will be used (i.e., loaded) during the window of time that the system is simulated. Future load is essentially expected Inputs at future times.
Future loading is provided by the user either using the predifined loading classes in progpy.loading, or as a function of time and optional state. For example:
Users of ProgPy will need a model describing the behavior of the system of interest. Users will likely either use one of the models distribued with ProgPy (see Included Models), configuring it to their own system using parameter estimation (see examples.param_est), use a data-driven model class to learn system behavior from data, or build their own model (see Building New Models section, below).
+
Users of ProgPy will need a model describing the behavior of the system of interest. Users will likely either use one of the models distribued with ProgPy (see Included Models), configuring it to their own system using parameter estimation (see 02ParameterEstimation), use a data-driven model class to learn system behavior from data, or build their own model (see Building New Models section, below).
@@ -597,37 +599,19 @@
State-transition Models
New physics-based models are constructed by subclassing progpy.PrognosticsModel as illustrated in the first example. To generate a new model, create a new class for your model that inherits from this class. Alternatively, you can copy the template prog_model_template.ProgModelTemplate, replacing the methods with logic defining your specific model. The analysis and simulation tools defined in progpy.PrognosticsModel will then work with your new model.
For simple linear models, users can choose to subclass the simpler progpy.LinearModel class, as illustrated in the second example. Some methods and algorithms only function on linear models.
There are two methods in progpy through which multiple models can be combined and used together: composite models and ensemble models, described below.
Composite models are used to represent the behavior of a system of interconnected systems. Each system is represented by its own model. These models are combined into a single composite model which behaves as a single model. When definiting the composite model the user provides a discription of any connections between the state or output of one model and the input of another. For example,
Unlike composite models which model a system of systems, ensemble models are used when to combine the logic of multiple models which describe the same system. This is used when there are multiple models representing different system behaviors or conditions. The results of each model are aggregated in a way that can be defined by the user. For example,
Mixture of Experts (MoE) models combine multiple models of the same system, similar to Ensemble models. Unlike Ensemble Models, the aggregation is done by selecting the “best” model. That is the model that has performed the best over the past. Each model will have a ‘score’ that is tracked in the state, and this determines which model is best.