diff --git a/pynumdiff/tests/test_diff_methods.py b/pynumdiff/tests/test_diff_methods.py index 2261f73..7309160 100644 --- a/pynumdiff/tests/test_diff_methods.py +++ b/pynumdiff/tests/test_diff_methods.py @@ -4,8 +4,8 @@ from warnings import warn from ..linear_model import lineardiff, polydiff, savgoldiff, spectraldiff -from ..total_variation_regularization import velocity, acceleration, jerk, iterative_velocity -from ..kalman_smooth import constant_velocity, constant_acceleration, constant_jerk, known_dynamics +from ..total_variation_regularization import velocity, acceleration, jerk, iterative_velocity, smooth_acceleration +from ..kalman_smooth import constant_velocity, constant_acceleration, constant_jerk from ..smooth_finite_difference import mediandiff, meandiff, gaussiandiff, friedrichsdiff, butterdiff, splinediff from ..finite_difference import first_order, second_order # Function aliases for testing cases where parameters change the behavior in a big way @@ -47,6 +47,12 @@ def iterated_first_order(*args, **kwargs): return first_order(*args, **kwargs) (constant_acceleration, {'r':1e-4, 'q':1e-1}), (constant_acceleration, [1e-4, 1e-1]), (constant_jerk, {'r':1e-4, 'q':10}), (constant_jerk, [1e-4, 10]), # TODO (known_dynamics), but presently it doesn't calculate a derivative + (velocity, {'gamma':0.5}), (velocity, [0.5]), + (acceleration, {'gamma':1}), (acceleration, [1]), + (jerk, {'gamma':10}), (jerk, [10]), + (iterative_velocity, {'num_iterations':5, 'gamma':0.05}), (iterative_velocity, [5, 0.05]), + (smooth_acceleration, {'gamma':2, 'window_size':5}), (smooth_acceleration, [2, 5]) + # TODO (jerk_sliding), because with the test cases here (len < 1000) it would just be a duplicate of jerk ] # All the testing methodology follows the exact same pattern; the only thing that changes is the @@ -148,7 +154,37 @@ def iterated_first_order(*args, **kwargs): return first_order(*args, **kwargs) [(-4, -4), (-2, -2), (0, 0), (1, 0)], [(-1, -2), (1, 0), (0, 0), (1, 0)], [(1, 0), (2, 2), (1, 0), (2, 2)], - [(1, 1), (3, 3), (1, 1), (3, 3)]] + [(1, 1), (3, 3), (1, 1), (3, 3)]], + velocity: [[(-25, -25), (-18, -19), (0, -1), (1, 0)], + [(-12, -12), (-11, -12), (-1, -1), (-1, -2)], + [(0, 0), (1, 0), (0, 0), (1, 0)], + [(0, -1), (1, 1), (0, 0), (1, 0)], + [(1, 1), (2, 2), (1, 1), (2, 2)], + [(1, 0), (3, 3), (1, 0), (3, 3)]], + acceleration: [[(-25, -25), (-18, -18), (0, -1), (0, 0)], + [(-10, -10), (-9, -9), (-1, -1), (0, -1)], + [(-10, -10), (-9, -10), (-1, -1), (0, -1)], + [(0, -1), (1, 0), (0, -1), (1, 0)], + [(1, 1), (2, 2), (1, 1), (2, 2)], + [(1, 1), (3, 3), (1, 1), (3, 3)]], + jerk: [[(-25, -25), (-18, -18), (-1, -1), (0, 0)], + [(-9, -10), (-9, -9), (-1, -1), (0, 0)], + [(-10, -10), (-9, -10), (-1, -1), (0, 0)], + [(0, 0), (1, 1), (0, 0), (1, 1)], + [(1, 1), (2, 2), (1, 1), (2, 2)], + [(1, 1), (3, 3), (1, 1), (3, 3)]], + iterative_velocity: [[(-9, -10), (-25, -25), (0, -1), (0, 0)], + [(0, 0), (0, 0), (0, 0), (1, 0)], + [(0, 0), (1, 0), (1, 0), (1, 0)], + [(1, 0), (1, 1), (1, 0), (1, 1)], + [(2, 1), (2, 2), (2, 1), (2, 2)], + [(1, 1), (3, 3), (1, 1), (3, 3)]], + smooth_acceleration: [[(-9, -10), (-18, -18), (0, -1), (0, 0)], + [(-9, -9), (-10, -10), (-1, -1), (-1, -1)], + [(-2, -2), (-1, -1), (-1, -1), (0, -1)], + [(0, 0), (1, 0), (0, -1), (1, 0)], + [(1, 1), (2, 2), (1, 1), (2, 2)], + [(1, 1), (3, 3), (1, 1), (3, 3)]] } # Essentially run the cartesian product of [diff methods] x [test functions] through this one test @@ -162,13 +198,14 @@ def test_diff_method(diff_method_and_params, test_func_and_deriv, request): # re i, latex_name, f, df = test_func_and_deriv # some methods rely on cvxpy, and we'd like to allow use of pynumdiff without convex optimization - if diff_method in [lineardiff, velocity]: + if diff_method in [lineardiff, velocity, acceleration, jerk, smooth_acceleration]: try: import cvxpy except: warn(f"Cannot import cvxpy, skipping {diff_method} test."); return - x = f(t) # sample the function - x_noisy = x + noise # add a little noise - dxdt = df(t) # true values of the derivative at samples + # sample the true function and make noisy samples, and sample true derivative + x = f(t) + x_noisy = x + noise + dxdt = df(t) # differentiate without and with noise, accounting for new and old styles of calling functions x_hat, dxdt_hat = diff_method(x, dt, **params) if isinstance(params, dict) \ @@ -178,6 +215,7 @@ def test_diff_method(diff_method_and_params, test_func_and_deriv, request): # re else diff_method(x_noisy, dt, params) if (isinstance(params, list) and len(diff_method_and_params) < 3) \ else diff_method(x_noisy, dt, params, options) + # plotting code if request.config.getoption("--plot") and not isinstance(params, list): # Get the plot flag from pytest configuration fig, axes = request.config.plots[diff_method] # get the appropriate plot, set up by the store_plots fixture in conftest.py axes[i, 0].plot(t, f(t), label=r"$x(t)$") @@ -198,16 +236,20 @@ def test_diff_method(diff_method_and_params, test_func_and_deriv, request): # re if i == 0: axes[i, 1].set_title('with noise') # check x_hat and x_hat_noisy are close to x and that dxdt_hat and dxdt_hat_noisy are close to dxdt - if request.config.getoption("--bounds"): print("]\n[", end="") + if request.config.getoption("--bounds"): print("\n[", end="") for j,(a,b) in enumerate([(x,x_hat), (dxdt,dxdt_hat), (x,x_hat_noisy), (dxdt,dxdt_hat_noisy)]): + l2_error = np.linalg.norm(a - b) + linf_error = np.max(np.abs(a - b)) + + # bounds-printing for establishing bounds if request.config.getoption("--bounds"): - l2_error = np.linalg.norm(a - b) - linf_error = np.max(np.abs(a - b)) #print(f"({l2_error},{linf_error})", end=", ") print(f"({int(np.ceil(np.log10(l2_error))) if l2_error > 0 else -25}, {int(np.ceil(np.log10(linf_error))) if linf_error > 0 else -25})", end=", ") + # bounds checking else: log_l2_bound, log_linf_bound = error_bounds[diff_method][i][j] - assert np.linalg.norm(a - b) < 10**log_l2_bound - assert np.max(np.abs(a - b)) < 10**log_linf_bound - if 0 < np.linalg.norm(a - b) < 10**(log_l2_bound - 1) or 0 < np.max(np.abs(a - b)) < 10**(log_linf_bound - 1): + assert l2_error < 10**log_l2_bound + assert linf_error < 10**log_linf_bound + # methods that get super duper close can converge to different very small limits on different runs + if 1e-18 < l2_error < 10**(log_l2_bound - 1) or 1e-18 < linf_error < 10**(log_linf_bound - 1): print(f"Improvement detected for method {diff_method.__name__}") diff --git a/pynumdiff/tests/test_total_variation_regularization.py b/pynumdiff/tests/test_total_variation_regularization.py deleted file mode 100644 index 3a94f0c..0000000 --- a/pynumdiff/tests/test_total_variation_regularization.py +++ /dev/null @@ -1,115 +0,0 @@ -""" -Unit tests for total variation regularization -""" -# pylint: skip-file - -import numpy as np -import pytest -from pynumdiff.total_variation_regularization import * - -x = np.array([1., 4., 9., 3., 20., - 8., 16., 2., 15., 10., - 15., 3., 5., 7., 4.]) -dt = 0.01 - - -def test_velocity(): - try: - import cvxpy - except: - pytest.skip("could not import cvxpy, skipping test_velocity", allow_module_level=True) - - params = [0.5] - x_hat, dxdt_hat = velocity(x, dt, params, options={'solver': 'CVXOPT'}) - x_smooth = np.array([1.60206974, 3.84254116, 6.08301239, 8.32348272, 14.76608638, - 12.76589239, 10.76569864, 7.70248886, 11.43239643, 11.4325017, - 11.43260691, 6.42354819, 5.78305309, 5.14255819, 4.50206322]) - dxdt = np.array([2.24047187e+02, 2.24047133e+02, 2.24047078e+02, 4.34153700e+02, - 2.22120483e+02, -2.00019387e+02, -2.53170177e+02, 3.33348898e+01, - 1.86500642e+02, 1.05238579e-02, -2.50447675e+02, -2.82477691e+02, - -6.40494998e+01, -6.40494935e+01, -6.40494871e+01]) - np.testing.assert_almost_equal(x_smooth, x_hat, decimal=2) - np.testing.assert_almost_equal(dxdt, dxdt_hat, decimal=2) - -def test_iterative_velocity(): - params = [1, 0.05] - x_hat, dxdt_hat = iterative_velocity(x, dt, params) - x_smooth = np.array([1.256, 3.254, 5.249, 7.197, 8.96, 10.287, 11.08, 11.407, - 11.305, 10.875, 10.235, 9.371, 8.305, 7.174, 6.042]) - dxdt = np.array([199.802, 199.742, 199.222, 190.43, 162.105, 103.282, - 55.311, 10.12, -30.571, -55.409, -72.603, -100.119, - -113.097, -113.097, -113.464]) - np.testing.assert_almost_equal(x_smooth, x_hat, decimal=2) - np.testing.assert_almost_equal(dxdt, dxdt_hat, decimal=2) - -def test_acceleration(): - try: - import cvxpy - except: - pytest.skip("could not import cvxpy, skipping test_acceleration", allow_module_level=True) - - params = [1] - x_hat, dxdt_hat = acceleration(x, dt, params, options={'solver': 'CVXOPT'}) - x_smooth = np.array([0.87728375, 4.44441238, 7.31141687, 9.47829719, 10.94505335, - 11.7116852, 11.78131319, 11.5560333, 11.03584752, 10.2207553, - 9.11075633, 7.7058506, 6.41426253, 5.23599238, 4.17104012]) - dxdt = np.array([391.71907211, 321.70665613, 251.69424015, 181.6818242, - 111.66940057, 41.81299196, -7.78259499, -37.27328368, - -66.76389967, -96.25455924, -125.74523529, -134.82469003, - -123.49291116, -112.16112081, -100.82933046]) - np.testing.assert_almost_equal(x_smooth, x_hat, decimal=2) - np.testing.assert_almost_equal(dxdt, dxdt_hat, decimal=2) - -def test_smooth_acceleration(): - try: - import cvxpy - except: - pytest.skip("could not import cvxpy, skipping test_smooth_acceleration", allow_module_level=True) - - params = [5, 30] - x_hat, dxdt_hat = smooth_acceleration(x, dt, params, options={'solver': 'CVXOPT'}) - x_smooth = np.array([4.2, 5.52913444, 6.77037146, 7.87267273, 8.79483088, - 9.5044844, 9.97926076, 10.20730827, 10.18728338, 9.92792114, - 9.44728533, 8.77174094, 7.93472066, 6.97538656, 5.93725369]) - dxdt = np.array([136.43269721, 129.9388182, 118.30858578, 102.15166804, - 82.27996127, 59.65074227, 35.30453082, 10.30497111, - -14.30994982, -37.56249817, -58.56466324, -76.54421499, - -90.85984169, -101.00697716, -106.61959829]) - np.testing.assert_almost_equal(x_smooth, x_hat, decimal=2) - np.testing.assert_almost_equal(dxdt, dxdt_hat, decimal=2) - -def test_jerk(): - try: - import cvxpy - except: - pytest.skip("could not import cvxpy, skipping test_jerk", allow_module_level=True) - - params = [10] - x_hat, dxdt_hat = jerk(x, dt, params, options={'solver': 'CVXOPT'}) - x_smooth = np.array([0.71013072, 4.51405229, 7.42619407, 9.53278029, 10.92003519, - 11.674183, 11.88144796, 11.6280543, 11.00022625, 10.08418804, - 8.9661639, 7.73237808, 6.4690548, 5.2624183, 4.19869281]) - dxdt = np.array([420.66993476, 335.80316742, 250.93640008, 174.69205619, - 107.07013572, 48.07063861, -2.30643522, -44.06108581, - -77.19331317, -101.70311726, -117.59049798, -124.85545525, - -123.49798898, -113.51809914, -103.5382093]) - np.testing.assert_almost_equal(x_smooth, x_hat, decimal=2) - np.testing.assert_almost_equal(dxdt, dxdt_hat, decimal=2) - -def test_jerk_sliding(): - try: - import cvxpy - except: - pytest.skip("could not import cvxpy, skipping test_jerk_sliding", allow_module_level=True) - - params = [10] - x_hat, dxdt_hat = jerk_sliding(x, dt, params, options={'solver': 'CVXOPT'}) - x_smooth = np.array([0.71013072, 4.51405229, 7.42619407, 9.53278029, 10.92003519, - 11.674183, 11.88144796, 11.6280543, 11.00022625, 10.08418804, - 8.9661639, 7.73237808, 6.4690548, 5.2624183, 4.19869281]) - dxdt = np.array([420.66993476, 335.80316742, 250.93640008, 174.69205619, - 107.07013572, 48.07063861, -2.30643522, -44.06108581, - -77.19331317, -101.70311726, -117.59049798, -124.85545525, - -123.49798898, -113.51809914, -103.5382093]) - np.testing.assert_almost_equal(x_smooth, x_hat, decimal=2) - np.testing.assert_almost_equal(dxdt, dxdt_hat, decimal=2) diff --git a/pynumdiff/total_variation_regularization/_total_variation_regularization.py b/pynumdiff/total_variation_regularization/_total_variation_regularization.py index bc380c0..ea5bed6 100644 --- a/pynumdiff/total_variation_regularization/_total_variation_regularization.py +++ b/pynumdiff/total_variation_regularization/_total_variation_regularization.py @@ -8,7 +8,7 @@ except ImportError: pass -def iterative_velocity(x, dt, params, options=None, num_iterations=None, gamma=None, cg_maxiter=1000, scale='small'): +def iterative_velocity(x, dt, params=None, options=None, num_iterations=None, gamma=None, cg_maxiter=1000, scale='small'): """Use an iterative solver to find the total variation regularized 1st derivative. See _chartrand_tvregdiff.py for details, author info, and license. Methods described in: Rick Chartrand, "Numerical differentiation of noisy, nonsmooth data," ISRN Applied Mathematics, @@ -72,6 +72,7 @@ def _total_variation_regularized_derivative(x, dt, order, gamma, solver=None): # Normalize mean = np.mean(x) std = np.std(x) + if std == 0: std = 1 # safety guard x = (x-mean)/std # Define the variables for the highest order derivative and the integration constants @@ -108,7 +109,7 @@ def _total_variation_regularized_derivative(x, dt, order, gamma, solver=None): return x_hat*std+mean, dxdt_hat*std -def velocity(x, dt, params, options=None, gamma=None, solver=None): +def velocity(x, dt, params=None, options=None, gamma=None, solver=None): """Use convex optimization (cvxpy) to solve for the velocity total variation regularized derivative. :param np.array[float] x: data to differentiate @@ -135,7 +136,7 @@ def velocity(x, dt, params, options=None, gamma=None, solver=None): return _total_variation_regularized_derivative(x, dt, 1, gamma, solver=solver) -def acceleration(x, dt, params, options=None, gamma=None, solver=None): +def acceleration(x, dt, params=None, options=None, gamma=None, solver=None): """Use convex optimization (cvxpy) to solve for the acceleration total variation regularized derivative. :param np.array[float] x: data to differentiate @@ -162,7 +163,7 @@ def acceleration(x, dt, params, options=None, gamma=None, solver=None): return _total_variation_regularized_derivative(x, dt, 2, gamma, solver=solver) -def jerk(x, dt, params, options=None, gamma=None, solver=None): +def jerk(x, dt, params=None, options=None, gamma=None, solver=None): """Use convex optimization (cvxpy) to solve for the jerk total variation regularized derivative. :param np.array[float] x: data to differentiate @@ -189,7 +190,7 @@ def jerk(x, dt, params, options=None, gamma=None, solver=None): return _total_variation_regularized_derivative(x, dt, 3, gamma, solver=solver) -def smooth_acceleration(x, dt, params, options=None, gamma=None, window_size=None, solver=None): +def smooth_acceleration(x, dt, params=None, options=None, gamma=None, window_size=None, solver=None): """Use convex optimization (cvxpy) to solve for the acceleration total variation regularized derivative, and then apply a convolutional gaussian smoother to the resulting derivative to smooth out the peaks. The end result is similar to the jerk method, but can be more time-efficient. @@ -228,7 +229,7 @@ def smooth_acceleration(x, dt, params, options=None, gamma=None, window_size=Non return x_hat, dxdt_hat -def jerk_sliding(x, dt, params, options=None, gamma=None, solver=None): +def jerk_sliding(x, dt, params=None, options=None, gamma=None, solver=None): """Use convex optimization (cvxpy) to solve for the jerk total variation regularized derivative. :param np.array[float] x: data to differentiate