Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
19 commits
Select commit Hold shift + click to select a range
20932bc
made first order actually first order and added fourth order
pavelkomarov Jul 1, 2025
f66fcb2
updated to get tests to pass
pavelkomarov Jul 1, 2025
45b5a8d
Merge branch 'master' of github.com:florisvb/PyNumDiff into improve-fd
pavelkomarov Jul 1, 2025
84fe010
Merge branch 'master' of github.com:florisvb/PyNumDiff into improve-fd
pavelkomarov Jul 2, 2025
5fdb450
Merge branch 'master' of github.com:florisvb/PyNumDiff into improve-fd
pavelkomarov Jul 2, 2025
2e68536
added fourth_order to the package-level __init__.py
pavelkomarov Jul 2, 2025
61a9e65
Merge branch 'master' of github.com:florisvb/PyNumDiff into improve-fd
pavelkomarov Jul 4, 2025
61a1be5
Merge branch 'master' of github.com:florisvb/PyNumDiff into improve-fd
pavelkomarov Jul 8, 2025
2bb4b43
Merge branch 'master' of github.com:florisvb/PyNumDiff into improve-fd
pavelkomarov Jul 14, 2025
2764aa1
Merge branch 'master' of github.com:florisvb/PyNumDiff into improve-fd
pavelkomarov Jul 15, 2025
4c4683a
Merge branch 'master' of github.com:florisvb/PyNumDiff into improve-fd
pavelkomarov Jul 15, 2025
abaacca
made first order actually first order and added fourth order
pavelkomarov Jul 1, 2025
0285574
updated to get tests to pass
pavelkomarov Jul 1, 2025
62975a7
added fourth_order to the package-level __init__.py
pavelkomarov Jul 2, 2025
e96dbf1
fixing merge conflicts
pavelkomarov Jul 15, 2025
61534c2
Merge branch 'master' of github.com:florisvb/PyNumDiff into improve-fd
pavelkomarov Jul 15, 2025
e9c1ef4
reworked fd code to account for edge blowup, reran notebooks
pavelkomarov Jul 15, 2025
21c3d38
updated notebooks
pavelkomarov Jul 15, 2025
df996dc
fixing last couple things
pavelkomarov Jul 16, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
190 changes: 142 additions & 48 deletions examples/1_basic_tutorial.ipynb

Large diffs are not rendered by default.

162 changes: 118 additions & 44 deletions examples/2a_optimizing_parameters_with_dxdt_known.ipynb

Large diffs are not rendered by default.

160 changes: 117 additions & 43 deletions examples/2b_optimizing_parameters_with_dxdt_unknown.ipynb

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion pynumdiff/__init__.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
"""Import useful functions from all modules
"""
from pynumdiff._version import __version__
from pynumdiff.finite_difference import first_order, second_order
from pynumdiff.finite_difference import first_order, second_order, fourth_order
from pynumdiff.smooth_finite_difference import mediandiff, meandiff, gaussiandiff,\
friedrichsdiff, butterdiff, splinediff
from pynumdiff.total_variation_regularization import iterative_velocity, velocity,\
Expand Down
4 changes: 2 additions & 2 deletions pynumdiff/finite_difference/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
"""This module implements some common finite difference schemes
"""
from ._finite_difference import first_order, second_order
from ._finite_difference import first_order, second_order, fourth_order

__all__ = ['first_order', 'second_order'] # So these get treated as direct members of the module by sphinx
__all__ = ['first_order', 'second_order', 'fourth_order'] # So these get treated as direct members of the module by sphinx
120 changes: 75 additions & 45 deletions pynumdiff/finite_difference/_finite_difference.py
Original file line number Diff line number Diff line change
@@ -1,81 +1,111 @@
"""This is handy for this module https://web.media.mit.edu/~crtaylor/calculator.html"""
import numpy as np
from pynumdiff.utils import utility
from warnings import warn


def first_order(x, dt, params=None, options={}, num_iterations=None):
def _finite_difference(x, dt, num_iterations, order):
"""Helper for all finite difference methods, since their iteration structure is all the same.

:param int order: 1, 2, or 4, controls which finite differencing scheme to employ
For other parameters and return values, see public function docstrings
"""
if num_iterations < 1: raise ValueError("num_iterations must be >0")
if order not in [1, 2, 4]: raise ValueError("order must be 1, 2, or 4")

x_hat = x # preserve a reference to x, because if iterating we need it to find the final constant of integration
dxdt_hat = np.zeros(x.shape) # preallocate reusable memory

# For all but the last iteration, do the differentate->integrate smoothing loop, being careful with endpoints
for i in range(num_iterations-1):
if order == 1:
dxdt_hat[:-1] = np.diff(x_hat)
dxdt_hat[-1] = dxdt_hat[-2] # using stencil -1,0 vs stencil 0,1 you get an expression for the same value
elif order == 2:
dxdt_hat[1:-1] = (x_hat[2:] - x_hat[:-2])/2 # second-order center-difference formula
dxdt_hat[0] = x_hat[1] - x_hat[0]
dxdt_hat[-1] = x_hat[-1] - x_hat[-2] # use first-order endpoint formulas so as not to amplify noise. See #104
elif order == 4:
dxdt_hat[2:-2] = (8*(x_hat[3:-1] - x_hat[1:-3]) - x_hat[4:] + x_hat[:-4])/12 # fourth-order center-difference
dxdt_hat[:2] = np.diff(x_hat[:3])
dxdt_hat[-2:] = np.diff(x_hat[-3:])

x_hat = utility.integrate_dxdt_hat(dxdt_hat, dt=1) # estimate new x_hat by integrating derivative
# We can skip dividing by dt here and pass dt=1, because the integration multiplies dt back in.
# No need to find integration constant until the very end, because we just differentiate again.

if order == 1:
dxdt_hat[:-1] = np.diff(x_hat)
dxdt_hat[-1] = dxdt_hat[-2] # using stencil -1,0 vs stencil 0,1 you get an expression for the same value
elif order == 2:
dxdt_hat[1:-1] = x_hat[2:] - x_hat[:-2] # second-order center-difference formula
dxdt_hat[0] = -3 * x_hat[0] + 4 * x_hat[1] - x_hat[2] # second-order endpoint formulas
dxdt_hat[-1] = 3 * x_hat[-1] - 4 * x_hat[-2] + x_hat[-3]
dxdt_hat /= 2
elif order == 4:
dxdt_hat[2:-2] = 8*(x_hat[3:-1] - x_hat[1:-3]) - x_hat[4:] + x_hat[:-4] # fourth-order center-difference
dxdt_hat[0] = -25*x_hat[0] + 48*x_hat[1] - 36*x_hat[2] + 16*x_hat[3] - 3*x_hat[4]
dxdt_hat[1] = -3*x_hat[0] - 10*x_hat[1] + 18*x_hat[2] - 6*x_hat[3] + x_hat[4]
dxdt_hat[-2] = 3*x_hat[-1] + 10*x_hat[-2] - 18*x_hat[-3] + 6*x_hat[-4] - x_hat[-5]
dxdt_hat[-1] = 25*x_hat[-1] - 48*x_hat[-2] + 36*x_hat[-3] - 16*x_hat[-4] + 3*x_hat[-5]
dxdt_hat /= 12
dxdt_hat /= dt # don't forget to scale by dt, can't skip it this time

if num_iterations > 1: # We've lost a constant of integration in the above
x_hat += utility.estimate_integration_constant(x, x_hat) # uses least squares

return x_hat, dxdt_hat


def first_order(x, dt, params=None, options={}, num_iterations=1):
"""First-order centered difference method

:param np.array[float] x: data to differentiate
:param float dt: step size
:param list[float] or float params: (**deprecated**, prefer :code:`num_iterations`)
:param dict options: (**deprecated**, prefer :code:`num_iterations`) a dictionary consisting of {'iterate': (bool)}
:param int num_iterations: If performing iterated FD to smooth the estimates, give the number of iterations.
If ungiven, FD will not be iterated.
:param int num_iterations: number of iterations. If >1, the derivative is integrated with trapezoidal
rule, that result is finite-differenced again, and the cycle is repeated num_iterations-1 times

:return: tuple[np.array, np.array] of\n
- **x_hat** -- estimated (smoothed) x
- **x_hat** -- original x if :code:`num_iterations=1`, else smoothed x that yielded dxdt_hat
- **dxdt_hat** -- estimated derivative of x
"""
warn("`first_order` in past releases was actually calculating a second-order FD. Use `second_order` to achieve " +
"approximately the same behavior. Note that odd-order methods have asymmetrical stencils, which causes " +
"horizontal drift in the answer, especially when iterating.", DeprecationWarning)
if params != None and 'iterate' in options:
warn("`params` and `options` parameters will be removed in a future version. Use `num_iterations` instead.", DeprecationWarning)
if isinstance(params, list): params = params[0]
return _iterate_first_order(x, dt, params)
elif num_iterations:
return _iterate_first_order(x, dt, num_iterations)

dxdt_hat = np.diff(x) / dt # Calculate the finite difference
dxdt_hat = np.hstack((dxdt_hat[0], dxdt_hat, dxdt_hat[-1])) # Pad the data
dxdt_hat = np.mean((dxdt_hat[0:-1], dxdt_hat[1:]), axis=0) # Re-finite dxdt_hat using linear interpolation
num_iterations = params[0] if isinstance(params, list) else params

return x, dxdt_hat
return _finite_difference(x, dt, num_iterations, 1)


def second_order(x, dt):
"""Second-order centered difference method
def second_order(x, dt, num_iterations=1):
"""Second-order centered difference method, with special endpoint formulas.

:param np.array[float] x: data to differentiate
:param float dt: step size
:param int num_iterations: number of iterations. If >1, the derivative is integrated with trapezoidal
rule, that result is finite-differenced again, and the cycle is repeated num_iterations-1 times

:return: tuple[np.array, np.array] of\n
- **x_hat** -- estimated (smoothed) x
- **x_hat** -- original x if :code:`num_iterations=1`, else smoothed x that yielded dxdt_hat
- **dxdt_hat** -- estimated derivative of x
"""
dxdt_hat = (x[2:] - x[0:-2]) / (2 * dt)
first_dxdt_hat = (-3 * x[0] + 4 * x[1] - x[2]) / (2 * dt)
last_dxdt_hat = (3 * x[-1] - 4 * x[-2] + x[-3]) / (2 * dt)
dxdt_hat = np.hstack((first_dxdt_hat, dxdt_hat, last_dxdt_hat))
return x, dxdt_hat
return _finite_difference(x, dt, num_iterations, 2)


def _x_hat_using_finite_difference(x, dt):
"""Find a smoothed estimate of the true function by taking FD and then integrating with trapezoids
"""
x_hat, dxdt_hat = first_order(x, dt)
x_hat = utility.integrate_dxdt_hat(dxdt_hat, dt)
x0 = utility.estimate_initial_condition(x, x_hat)
return x_hat + x0


def _iterate_first_order(x, dt, num_iterations):
"""Iterative first order centered finite difference.
def fourth_order(x, dt, num_iterations=1):
"""Fourth-order centered difference method, with special endpoint formulas.

:param np.array[float] x: data to differentiate
:param float dt: step size
:param int num_iterations: number of iterations
:param int num_iterations: number of iterations. If >1, the derivative is integrated with trapezoidal
rule, that result is finite-differenced again, and the cycle is repeated num_iterations-1 times

:return: tuple[np.array, np.array] of\n
- **x_hat** -- estimated (smoothed) x
- **x_hat** -- original x if :code:`num_iterations=1`, else smoothed x that yielded dxdt_hat
- **dxdt_hat** -- estimated derivative of x
"""
w = np.linspace(0, 1, len(x)) # set up weights, [0., ... 1.0]

# forward backward passes
for _ in range(num_iterations):
xf = _x_hat_using_finite_difference(x, dt)
xb = _x_hat_using_finite_difference(x[::-1], dt)
x = xf * w + xb[::-1] * (1 - w)

x_hat, dxdt_hat = first_order(x, dt)

return x_hat, dxdt_hat
return _finite_difference(x, dt, num_iterations, 4)
20 changes: 8 additions & 12 deletions pynumdiff/linear_model/_linear_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,9 @@
# Savitzky-Golay filter #
#########################
def savgoldiff(x, dt, params=None, options=None, poly_order=None, window_size=None, smoothing_win=None):
"""Use the Savitzky-Golay to smooth the data and calculate the first derivative. It wses
"""Use the Savitzky-Golay to smooth the data and calculate the first derivative. It uses
scipy.signal.savgol_filter. The Savitzky-Golay is very similar to the sliding polynomial fit,
but slightly noisier, and much faster
but slightly noisier, and much faster.

:param np.array[float] x: data to differentiate
:param float dt: step size
Expand Down Expand Up @@ -48,7 +48,7 @@ def savgoldiff(x, dt, params=None, options=None, poly_order=None, window_size=No
dxdt_hat = utility.convolutional_smoother(dxdt_hat, kernel)

x_hat = utility.integrate_dxdt_hat(dxdt_hat, dt)
x0 = utility.estimate_initial_condition(x, x_hat)
x0 = utility.estimate_integration_constant(x, x_hat)
x_hat = x_hat + x0

return x_hat, dxdt_hat
Expand Down Expand Up @@ -199,11 +199,8 @@ def _polydiff(x, dt, poly_order, weights=None):
# Linear diff #
###############
def _solve_for_A_and_C_given_X_and_Xdot(X, Xdot, num_integrations, dt, gammaC=1e-1, gammaA=1e-6,
solver=None, A_known=None, epsilon=1e-6, rows_of_interest='all'):
solver=None, A_known=None, epsilon=1e-6):
"""Given state and the derivative, find the system evolution and measurement matrices."""
if rows_of_interest == 'all':
rows_of_interest = np.arange(0, X.shape[0])

# Set up the variables
A = cvxpy.Variable((X.shape[0], X.shape[0]))
C = cvxpy.Variable((X.shape[0], num_integrations))
Expand All @@ -219,7 +216,7 @@ def _solve_for_A_and_C_given_X_and_Xdot(X, Xdot, num_integrations, dt, gammaC=1e
Csum = Csum + Cn

# Define the objective function
error = cvxpy.sum_squares(Xdot[rows_of_interest, :] - ( cvxpy.matmul(A, X) + Csum)[rows_of_interest, :])
error = cvxpy.sum_squares(Xdot - ( cvxpy.matmul(A, X) + Csum))
C_regularization = gammaC*cvxpy.sum(cvxpy.abs(C))
A_regularization = gammaA*cvxpy.sum(cvxpy.abs(A))
obj = cvxpy.Minimize(error + C_regularization + A_regularization)
Expand All @@ -238,8 +235,7 @@ def _solve_for_A_and_C_given_X_and_Xdot(X, Xdot, num_integrations, dt, gammaC=1e
prob = cvxpy.Problem(obj, constraints)
prob.solve(solver=solver)

A = np.array(A.value)
return A, np.array(C.value)
return np.array(A.value), np.array(C.value)

def lineardiff(x, dt, params=None, options=None, order=None, gamma=None, window_size=None,
step_size=10, kernel='friedrichs', solver=None):
Expand Down Expand Up @@ -306,7 +302,7 @@ def _lineardiff(x, dt, order, gamma, solver=None):
dxdt_hat = np.ravel(Xdot_reconstructed[-1, :])

x_hat = utility.integrate_dxdt_hat(dxdt_hat, dt)
x_hat = x_hat + utility.estimate_initial_condition(x+mean, x_hat)
x_hat = x_hat + utility.estimate_integration_constant(x+mean, x_hat)

return x_hat, dxdt_hat

Expand Down Expand Up @@ -408,7 +404,7 @@ def spectraldiff(x, dt, params=None, options=None, high_freq_cutoff=None, even_e

# Integrate to get x_hat
x_hat = utility.integrate_dxdt_hat(dxdt_hat, dt)
x0 = utility.estimate_initial_condition(x[padding:original_L+padding], x_hat)
x0 = utility.estimate_integration_constant(x[padding:original_L+padding], x_hat)
x_hat = x_hat + x0

return x_hat, dxdt_hat
8 changes: 5 additions & 3 deletions pynumdiff/optimize/_optimize.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
from multiprocessing import Pool

from ..utils import evaluate
from ..finite_difference import first_order
from ..finite_difference import first_order, second_order, fourth_order
from ..smooth_finite_difference import mediandiff, meandiff, gaussiandiff, friedrichsdiff, butterdiff, splinediff
from ..linear_model import spectraldiff, polydiff, savgoldiff, lineardiff
from ..total_variation_regularization import velocity, acceleration, jerk, iterative_velocity, smooth_acceleration, jerk_sliding
Expand Down Expand Up @@ -80,6 +80,8 @@
{'q': (1e-10, 1e10),
'r': (1e-10, 1e10)})
}
for method in [second_order, fourth_order]:
method_params_and_bounds[method] = method_params_and_bounds[first_order]
for method in [meandiff, gaussiandiff, friedrichsdiff]:
method_params_and_bounds[method] = method_params_and_bounds[mediandiff]
for method in [acceleration, jerk]:
Expand Down Expand Up @@ -158,15 +160,15 @@ def optimize(func, x, dt, search_space={}, dxdt_truth=None, tvgamma=1e-2, paddin
singleton_params = {k:v for k,v in params.items() if not isinstance(v, list)}

# The search space is the Cartesian product of all dimensions where multiple options are given
search_space_types = {k:type(v[0]) for k,v in params.items() if isinstance(v, list)} # for converting back and forth from point
search_space_types = {k:type(v[0]) for k,v in params.items() if isinstance(v, list)} # map param name -> type for converting to and from point
if any(v not in [float, int, bool] for v in search_space_types.values()):
raise ValueError("Optimization over categorical strings currently not supported")
# If excluding string type, I can just cast ints and bools to floats, and we're good to go
cartesian_product = product(*[np.array(params[k]).astype(float) for k in search_space_types])

bounds = [bounds[k] if k in bounds else # pass these to minimize(). It should respect them.
(0, 1) if v == bool else
None for k,v in search_space_types.items()]
None for k,v in search_space_types.items()] # None means no bound on a dimension

# wrap the objective and scipy.optimize.minimize because the objective and options are always the same
_obj_fun = partial(_objective_function, func=func, x=x, dt=dt, singleton_params=singleton_params,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
from warnings import warn

# included code
from pynumdiff.finite_difference import first_order as finite_difference
from pynumdiff.finite_difference import second_order as finite_difference
from pynumdiff.utils import utility


Expand Down
2 changes: 1 addition & 1 deletion pynumdiff/tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ def store_plots(request):
def pytest_sessionfinish(session, exitstatus):
if not hasattr(session.config, 'plots'): return
for method,(fig,axes) in session.config.plots.items():
axes[-1,-1].legend()
fig.legend(*axes[-1, -1].get_legend_handles_labels(), loc='lower left', ncol=2)
fig.suptitle(method.__name__)
fig.tight_layout()
pyplot.show()
Loading