Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 2 additions & 3 deletions docs/source/code.rst
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,9 @@ API documentation
:maxdepth: 1

finite_difference
kalman_smooth
linear_model
smooth_finite_difference
total_variation_regularization
kalman_smooth
linear_model
optimize
utils

7 changes: 6 additions & 1 deletion docs/source/finite_difference.rst
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,9 @@ finite_difference
=================

.. automodule:: pynumdiff.finite_difference
:members:
:no-members:

.. autofunction:: pynumdiff.finite_difference.finite_difference
.. autofunction:: pynumdiff.finite_difference.first_order
.. autofunction:: pynumdiff.finite_difference.second_order
.. autofunction:: pynumdiff.finite_difference.fourth_order
8 changes: 7 additions & 1 deletion docs/source/kalman_smooth.rst
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,10 @@ kalman_smooth
=============

.. automodule:: pynumdiff.kalman_smooth
:members:
:no-members:

.. autofunction:: pynumdiff.kalman_smooth.rts_const_deriv
.. autofunction:: pynumdiff.kalman_smooth.constant_velocity
.. autofunction:: pynumdiff.kalman_smooth.constant_acceleration
.. autofunction:: pynumdiff.kalman_smooth.constant_jerk
.. autofunction:: pynumdiff.kalman_smooth.known_dynamics
10 changes: 9 additions & 1 deletion docs/source/total_variation_regularization.rst
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,12 @@ total_variation_regularization
==============================

.. automodule:: pynumdiff.total_variation_regularization
:members:
:no-members:

.. autofunction:: pynumdiff.total_variation_regularization.tvrdiff
.. autofunction:: pynumdiff.total_variation_regularization.velocity
.. autofunction:: pynumdiff.total_variation_regularization.acceleration
.. autofunction:: pynumdiff.total_variation_regularization.jerk
.. autofunction:: pynumdiff.total_variation_regularization.iterative_velocity
.. autofunction:: pynumdiff.total_variation_regularization.smooth_acceleration
.. autofunction:: pynumdiff.total_variation_regularization.jerk_sliding
36 changes: 18 additions & 18 deletions examples/1_basic_tutorial.ipynb

Large diffs are not rendered by default.

514 changes: 105 additions & 409 deletions examples/2a_optimizing_parameters_with_dxdt_known.ipynb

Large diffs are not rendered by default.

494 changes: 92 additions & 402 deletions examples/2b_optimizing_parameters_with_dxdt_unknown.ipynb

Large diffs are not rendered by default.

18 changes: 10 additions & 8 deletions examples/3_automatic_method_suggestion.ipynb

Large diffs are not rendered by default.

4 changes: 1 addition & 3 deletions pynumdiff/finite_difference/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
"""This module implements some common finite difference schemes
"""
from ._finite_difference import first_order, second_order, fourth_order

__all__ = ['first_order', 'second_order', 'fourth_order'] # So these get treated as direct members of the module by sphinx
from ._finite_difference import finite_difference, first_order, second_order, fourth_order
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There are simply more public methods now. The docstring makes reference to the fact other methods in the module really just call the backing one.

18 changes: 11 additions & 7 deletions pynumdiff/finite_difference/_finite_difference.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,15 @@
from warnings import warn


def _finite_difference(x, dt, num_iterations, order):
"""Helper for all finite difference methods, since their iteration structure is all the same.
def finite_difference(x, dt, num_iterations, order):
"""Perform iterated finite difference of a given order. This serves as the common backing function for
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Had to add more proper docstrings.

all other methods in this module.

:param np.array[float] x: data to differentiate
:param float dt: step size
:param int num_iterations: number of iterations. If >1, the derivative is integrated with trapezoidal
rule, that result is finite-differenced again, and the cycle is repeated num_iterations-1 times
:param int order: 1, 2, or 4, controls which finite differencing scheme to employ
For other parameters and return values, see public function docstrings
"""
if num_iterations < 1: raise ValueError("num_iterations must be >0")
if order not in [1, 2, 4]: raise ValueError("order must be 1, 2, or 4")
Expand Down Expand Up @@ -61,7 +65,7 @@ def _finite_difference(x, dt, num_iterations, order):


def first_order(x, dt, params=None, options={}, num_iterations=1):
"""First-order centered difference method
"""First-order difference method

:param np.array[float] x: data to differentiate
:param float dt: step size
Expand All @@ -81,7 +85,7 @@ def first_order(x, dt, params=None, options={}, num_iterations=1):
warn("`params` and `options` parameters will be removed in a future version. Use `num_iterations` instead.", DeprecationWarning)
num_iterations = params[0] if isinstance(params, list) else params

return _finite_difference(x, dt, num_iterations, 1)
return finite_difference(x, dt, num_iterations, 1)


def second_order(x, dt, num_iterations=1):
Expand All @@ -96,7 +100,7 @@ def second_order(x, dt, num_iterations=1):
- **x_hat** -- original x if :code:`num_iterations=1`, else smoothed x that yielded dxdt_hat
- **dxdt_hat** -- estimated derivative of x
"""
return _finite_difference(x, dt, num_iterations, 2)
return finite_difference(x, dt, num_iterations, 2)


def fourth_order(x, dt, num_iterations=1):
Expand All @@ -111,4 +115,4 @@ def fourth_order(x, dt, num_iterations=1):
- **x_hat** -- original x if :code:`num_iterations=1`, else smoothed x that yielded dxdt_hat
- **dxdt_hat** -- estimated derivative of x
"""
return _finite_difference(x, dt, num_iterations, 4)
return finite_difference(x, dt, num_iterations, 4)
4 changes: 1 addition & 3 deletions pynumdiff/kalman_smooth/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
"""This module implements Kalman filters
"""
from ._kalman_smooth import constant_velocity, constant_acceleration, constant_jerk, known_dynamics

__all__ = ['constant_velocity', 'constant_acceleration', 'constant_jerk', 'known_dynamics'] # So these get treated as direct members of the module by sphinx
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

For modules where I manually reference methods in the .rst, there is no need to list out __all__ anymore.

from ._kalman_smooth import rts_const_deriv, constant_velocity, constant_acceleration, constant_jerk, known_dynamics
87 changes: 52 additions & 35 deletions pynumdiff/kalman_smooth/_kalman_smooth.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,11 +80,56 @@ def _RTS_smooth(xhat0, P0, y, A, C, Q, R, u=None, B=None):
#########################################
# Constant 1st, 2nd, and 3rd derivative #
#########################################
def _constant_derivative(x, P0, A, C, R, Q, forwardbackward):
"""Helper for `constant_{velocity,acceleration,jerk}` functions, because there was a lot of
repeated code.
def rts_const_deriv(x, dt, order, qr_ratio, forwardbackward):
"""Perform Rauch-Tung-Striebel smoothing with a constant derivative model. Other constant derivative
methods in this module call this function.

:param np.array[float] x: data series to differentiate
:param float dt: step size
:param int order: which derivative to stabilize in the constant-derivative model
:param qr_ratio: the process noise level of the divided by the measurement noise level, because,
per `our analysis <https://github.com/florisvb/PyNumDiff/issues/139>`_, the mean result is
dependent on the relative rather than absolute size of :math:`q` and :math:`r`.
:param bool forwardbackward: indicates whether to run smoother forwards and backwards
(usually achieves better estimate at end points)

:return: tuple[np.array, np.array] of\n
- **x_hat** -- estimated (smoothed) x
- **dxdt_hat** -- estimated derivative of x
"""
xhat0 = np.zeros(A.shape[0]); xhat0[0] = x[0] # See #110 for why this choice of xhat0
q = 10**int(np.log10(qr_ratio)/2) # even-ish split of the powers across 0
r = q/qr_ratio
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Addressing #139.


if order == 1:
A = np.array([[1, dt], [0, 1]]) # states are x, x'. This basically does an Euler update.
C = np.array([[1, 0]]) # we measure only y = noisy x
R = np.array([[r]])
Q = np.array([[1e-16, 0], [0, q]]) # uncertainty is around the velocity
P0 = np.array(100*np.eye(2)) # See #110 for why this choice of P0
elif order == 2:
A = np.array([[1, dt, (dt**2)/2], # states are x, x', x"
[0, 1, dt],
[0, 0, 1]])
C = np.array([[1, 0, 0]]) # we measure only y = noisy x
R = np.array([[r]])
Q = np.array([[1e-16, 0, 0],
[0, 1e-16, 0],
[0, 0, q]]) # uncertainty is around the acceleration
P0 = np.array(100*np.eye(3)) # See #110 for why this choice of P0
elif order == 3:
A = np.array([[1, dt, (dt**2)/2, (dt**3)/6], # states are x, x', x", x'"
[0, 1, dt, (dt**2)/2],
[0, 0, 1, dt],
[0, 0, 0, 1]])
C = np.array([[1, 0, 0, 0]]) # we measure only y = noisy x
R = np.array([[r]])
Q = np.array([[1e-16, 0, 0, 0],
[0, 1e-16, 0, 0],
[0, 0, 1e-16, 0],
[0, 0, 0, q]]) # uncertainty is around the jerk
P0 = np.array(100*np.eye(4)) # See #110 for why this choice of P0

xhat0 = np.zeros(A.shape[0]); xhat0[0] = x[0] # The first estimate is the first seen state. See #110
xhat_smooth = _RTS_smooth(xhat0, P0, x, A, C, Q, R) # noisy x are the "y" in Kalman-land
x_hat_forward = xhat_smooth[:, 0] # first dimension is time, so slice first element at all times
dxdt_hat_forward = xhat_smooth[:, 1]
Expand Down Expand Up @@ -131,13 +176,7 @@ def constant_velocity(x, dt, params=None, options=None, r=None, q=None, forwardb
elif r == None or q == None:
raise ValueError("`q` and `r` must be given.")

A = np.array([[1, dt], [0, 1]]) # states are x, x'. This basically does an Euler update.
C = np.array([[1, 0]]) # we measure only y = noisy x
R = np.array([[r]])
Q = np.array([[1e-16, 0], [0, q]]) # uncertainty is around the velocity
P0 = np.array(100*np.eye(2)) # See #110 for why this choice of P0

return _constant_derivative(x, P0, A, C, R, Q, forwardbackward)
return rts_const_deriv(x, dt, 1, q/r, forwardbackward)


def constant_acceleration(x, dt, params=None, options=None, r=None, q=None, forwardbackward=True):
Expand Down Expand Up @@ -166,17 +205,7 @@ def constant_acceleration(x, dt, params=None, options=None, r=None, q=None, forw
elif r == None or q == None:
raise ValueError("`q` and `r` must be given.")

A = np.array([[1, dt, (dt**2)/2], # states are x, x', x"
[0, 1, dt],
[0, 0, 1]])
C = np.array([[1, 0, 0]]) # we measure only y = noisy x
R = np.array([[r]])
Q = np.array([[1e-16, 0, 0],
[0, 1e-16, 0],
[0, 0, q]]) # uncertainty is around the acceleration
P0 = np.array(100*np.eye(3)) # See #110 for why this choice of P0

return _constant_derivative(x, P0, A, C, R, Q, forwardbackward)
return rts_const_deriv(x, dt, 2, q/r, forwardbackward)


def constant_jerk(x, dt, params=None, options=None, r=None, q=None, forwardbackward=True):
Expand Down Expand Up @@ -205,19 +234,7 @@ def constant_jerk(x, dt, params=None, options=None, r=None, q=None, forwardbackw
elif r == None or q == None:
raise ValueError("`q` and `r` must be given.")

A = np.array([[1, dt, (dt**2)/2, (dt**3)/6], # states are x, x', x", x'"
[0, 1, dt, (dt**2)/2],
[0, 0, 1, dt],
[0, 0, 0, 1]])
C = np.array([[1, 0, 0, 0]]) # we measure only y = noisy x
R = np.array([[r]])
Q = np.array([[1e-16, 0, 0, 0],
[0, 1e-16, 0, 0],
[0, 0, 1e-16, 0],
[0, 0, 0, q]]) # uncertainty is around the jerk
P0 = np.array(100*np.eye(4)) # See #110 for why this choice of P0

return _constant_derivative(x, P0, A, C, R, Q, forwardbackward)
return rts_const_deriv(x, dt, 3, q/r, forwardbackward)


def known_dynamics(x, params, u=None, options=None, xhat0=None, P0=None, A=None,
Expand Down
Loading