-
Notifications
You must be signed in to change notification settings - Fork 24
Make backers public #143
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Make backers public #143
Changes from all commits
9f3cfe8
913bcc6
fa9d46c
70fe0f9
5f27575
462f665
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Large diffs are not rendered by default.
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,5 +1,3 @@ | ||
| """This module implements some common finite difference schemes | ||
| """ | ||
| from ._finite_difference import first_order, second_order, fourth_order | ||
|
|
||
| __all__ = ['first_order', 'second_order', 'fourth_order'] # So these get treated as direct members of the module by sphinx | ||
| from ._finite_difference import finite_difference, first_order, second_order, fourth_order | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -4,11 +4,15 @@ | |
| from warnings import warn | ||
|
|
||
|
|
||
| def _finite_difference(x, dt, num_iterations, order): | ||
| """Helper for all finite difference methods, since their iteration structure is all the same. | ||
| def finite_difference(x, dt, num_iterations, order): | ||
| """Perform iterated finite difference of a given order. This serves as the common backing function for | ||
|
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Had to add more proper docstrings. |
||
| all other methods in this module. | ||
|
|
||
| :param np.array[float] x: data to differentiate | ||
| :param float dt: step size | ||
| :param int num_iterations: number of iterations. If >1, the derivative is integrated with trapezoidal | ||
| rule, that result is finite-differenced again, and the cycle is repeated num_iterations-1 times | ||
| :param int order: 1, 2, or 4, controls which finite differencing scheme to employ | ||
| For other parameters and return values, see public function docstrings | ||
| """ | ||
| if num_iterations < 1: raise ValueError("num_iterations must be >0") | ||
| if order not in [1, 2, 4]: raise ValueError("order must be 1, 2, or 4") | ||
|
|
@@ -61,7 +65,7 @@ def _finite_difference(x, dt, num_iterations, order): | |
|
|
||
|
|
||
| def first_order(x, dt, params=None, options={}, num_iterations=1): | ||
| """First-order centered difference method | ||
| """First-order difference method | ||
|
|
||
| :param np.array[float] x: data to differentiate | ||
| :param float dt: step size | ||
|
|
@@ -81,7 +85,7 @@ def first_order(x, dt, params=None, options={}, num_iterations=1): | |
| warn("`params` and `options` parameters will be removed in a future version. Use `num_iterations` instead.", DeprecationWarning) | ||
| num_iterations = params[0] if isinstance(params, list) else params | ||
|
|
||
| return _finite_difference(x, dt, num_iterations, 1) | ||
| return finite_difference(x, dt, num_iterations, 1) | ||
|
|
||
|
|
||
| def second_order(x, dt, num_iterations=1): | ||
|
|
@@ -96,7 +100,7 @@ def second_order(x, dt, num_iterations=1): | |
| - **x_hat** -- original x if :code:`num_iterations=1`, else smoothed x that yielded dxdt_hat | ||
| - **dxdt_hat** -- estimated derivative of x | ||
| """ | ||
| return _finite_difference(x, dt, num_iterations, 2) | ||
| return finite_difference(x, dt, num_iterations, 2) | ||
|
|
||
|
|
||
| def fourth_order(x, dt, num_iterations=1): | ||
|
|
@@ -111,4 +115,4 @@ def fourth_order(x, dt, num_iterations=1): | |
| - **x_hat** -- original x if :code:`num_iterations=1`, else smoothed x that yielded dxdt_hat | ||
| - **dxdt_hat** -- estimated derivative of x | ||
| """ | ||
| return _finite_difference(x, dt, num_iterations, 4) | ||
| return finite_difference(x, dt, num_iterations, 4) | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,5 +1,3 @@ | ||
| """This module implements Kalman filters | ||
| """ | ||
| from ._kalman_smooth import constant_velocity, constant_acceleration, constant_jerk, known_dynamics | ||
|
|
||
| __all__ = ['constant_velocity', 'constant_acceleration', 'constant_jerk', 'known_dynamics'] # So these get treated as direct members of the module by sphinx | ||
|
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. For modules where I manually reference methods in the |
||
| from ._kalman_smooth import rts_const_deriv, constant_velocity, constant_acceleration, constant_jerk, known_dynamics | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -80,11 +80,56 @@ def _RTS_smooth(xhat0, P0, y, A, C, Q, R, u=None, B=None): | |
| ######################################### | ||
| # Constant 1st, 2nd, and 3rd derivative # | ||
| ######################################### | ||
| def _constant_derivative(x, P0, A, C, R, Q, forwardbackward): | ||
| """Helper for `constant_{velocity,acceleration,jerk}` functions, because there was a lot of | ||
| repeated code. | ||
| def rts_const_deriv(x, dt, order, qr_ratio, forwardbackward): | ||
| """Perform Rauch-Tung-Striebel smoothing with a constant derivative model. Other constant derivative | ||
| methods in this module call this function. | ||
|
|
||
| :param np.array[float] x: data series to differentiate | ||
| :param float dt: step size | ||
| :param int order: which derivative to stabilize in the constant-derivative model | ||
| :param qr_ratio: the process noise level of the divided by the measurement noise level, because, | ||
| per `our analysis <https://github.com/florisvb/PyNumDiff/issues/139>`_, the mean result is | ||
| dependent on the relative rather than absolute size of :math:`q` and :math:`r`. | ||
| :param bool forwardbackward: indicates whether to run smoother forwards and backwards | ||
| (usually achieves better estimate at end points) | ||
|
|
||
| :return: tuple[np.array, np.array] of\n | ||
| - **x_hat** -- estimated (smoothed) x | ||
| - **dxdt_hat** -- estimated derivative of x | ||
| """ | ||
| xhat0 = np.zeros(A.shape[0]); xhat0[0] = x[0] # See #110 for why this choice of xhat0 | ||
| q = 10**int(np.log10(qr_ratio)/2) # even-ish split of the powers across 0 | ||
| r = q/qr_ratio | ||
|
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Addressing #139. |
||
|
|
||
| if order == 1: | ||
| A = np.array([[1, dt], [0, 1]]) # states are x, x'. This basically does an Euler update. | ||
| C = np.array([[1, 0]]) # we measure only y = noisy x | ||
| R = np.array([[r]]) | ||
| Q = np.array([[1e-16, 0], [0, q]]) # uncertainty is around the velocity | ||
| P0 = np.array(100*np.eye(2)) # See #110 for why this choice of P0 | ||
| elif order == 2: | ||
| A = np.array([[1, dt, (dt**2)/2], # states are x, x', x" | ||
| [0, 1, dt], | ||
| [0, 0, 1]]) | ||
| C = np.array([[1, 0, 0]]) # we measure only y = noisy x | ||
| R = np.array([[r]]) | ||
| Q = np.array([[1e-16, 0, 0], | ||
| [0, 1e-16, 0], | ||
| [0, 0, q]]) # uncertainty is around the acceleration | ||
| P0 = np.array(100*np.eye(3)) # See #110 for why this choice of P0 | ||
| elif order == 3: | ||
| A = np.array([[1, dt, (dt**2)/2, (dt**3)/6], # states are x, x', x", x'" | ||
| [0, 1, dt, (dt**2)/2], | ||
| [0, 0, 1, dt], | ||
| [0, 0, 0, 1]]) | ||
| C = np.array([[1, 0, 0, 0]]) # we measure only y = noisy x | ||
| R = np.array([[r]]) | ||
| Q = np.array([[1e-16, 0, 0, 0], | ||
| [0, 1e-16, 0, 0], | ||
| [0, 0, 1e-16, 0], | ||
| [0, 0, 0, q]]) # uncertainty is around the jerk | ||
| P0 = np.array(100*np.eye(4)) # See #110 for why this choice of P0 | ||
|
|
||
| xhat0 = np.zeros(A.shape[0]); xhat0[0] = x[0] # The first estimate is the first seen state. See #110 | ||
| xhat_smooth = _RTS_smooth(xhat0, P0, x, A, C, Q, R) # noisy x are the "y" in Kalman-land | ||
| x_hat_forward = xhat_smooth[:, 0] # first dimension is time, so slice first element at all times | ||
| dxdt_hat_forward = xhat_smooth[:, 1] | ||
|
|
@@ -131,13 +176,7 @@ def constant_velocity(x, dt, params=None, options=None, r=None, q=None, forwardb | |
| elif r == None or q == None: | ||
| raise ValueError("`q` and `r` must be given.") | ||
|
|
||
| A = np.array([[1, dt], [0, 1]]) # states are x, x'. This basically does an Euler update. | ||
| C = np.array([[1, 0]]) # we measure only y = noisy x | ||
| R = np.array([[r]]) | ||
| Q = np.array([[1e-16, 0], [0, q]]) # uncertainty is around the velocity | ||
| P0 = np.array(100*np.eye(2)) # See #110 for why this choice of P0 | ||
|
|
||
| return _constant_derivative(x, P0, A, C, R, Q, forwardbackward) | ||
| return rts_const_deriv(x, dt, 1, q/r, forwardbackward) | ||
|
|
||
|
|
||
| def constant_acceleration(x, dt, params=None, options=None, r=None, q=None, forwardbackward=True): | ||
|
|
@@ -166,17 +205,7 @@ def constant_acceleration(x, dt, params=None, options=None, r=None, q=None, forw | |
| elif r == None or q == None: | ||
| raise ValueError("`q` and `r` must be given.") | ||
|
|
||
| A = np.array([[1, dt, (dt**2)/2], # states are x, x', x" | ||
| [0, 1, dt], | ||
| [0, 0, 1]]) | ||
| C = np.array([[1, 0, 0]]) # we measure only y = noisy x | ||
| R = np.array([[r]]) | ||
| Q = np.array([[1e-16, 0, 0], | ||
| [0, 1e-16, 0], | ||
| [0, 0, q]]) # uncertainty is around the acceleration | ||
| P0 = np.array(100*np.eye(3)) # See #110 for why this choice of P0 | ||
|
|
||
| return _constant_derivative(x, P0, A, C, R, Q, forwardbackward) | ||
| return rts_const_deriv(x, dt, 2, q/r, forwardbackward) | ||
|
|
||
|
|
||
| def constant_jerk(x, dt, params=None, options=None, r=None, q=None, forwardbackward=True): | ||
|
|
@@ -205,19 +234,7 @@ def constant_jerk(x, dt, params=None, options=None, r=None, q=None, forwardbackw | |
| elif r == None or q == None: | ||
| raise ValueError("`q` and `r` must be given.") | ||
|
|
||
| A = np.array([[1, dt, (dt**2)/2, (dt**3)/6], # states are x, x', x", x'" | ||
| [0, 1, dt, (dt**2)/2], | ||
| [0, 0, 1, dt], | ||
| [0, 0, 0, 1]]) | ||
| C = np.array([[1, 0, 0, 0]]) # we measure only y = noisy x | ||
| R = np.array([[r]]) | ||
| Q = np.array([[1e-16, 0, 0, 0], | ||
| [0, 1e-16, 0, 0], | ||
| [0, 0, 1e-16, 0], | ||
| [0, 0, 0, q]]) # uncertainty is around the jerk | ||
| P0 = np.array(100*np.eye(4)) # See #110 for why this choice of P0 | ||
|
|
||
| return _constant_derivative(x, P0, A, C, R, Q, forwardbackward) | ||
| return rts_const_deriv(x, dt, 3, q/r, forwardbackward) | ||
|
|
||
|
|
||
| def known_dynamics(x, params, u=None, options=None, xhat0=None, P0=None, A=None, | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
There are simply more public methods now. The docstring makes reference to the fact other methods in the module really just call the backing one.