From 217867e21b43808515b2608f8a6d559c9f10e959 Mon Sep 17 00:00:00 2001 From: pavelkomarov Date: Mon, 30 Jun 2025 18:33:23 -0700 Subject: [PATCH 1/9] jerk_sliding now uses the slide_function utility --- pynumdiff/linear_model/_linear_model.py | 6 +- pynumdiff/tests/test_diff_methods.py | 12 +++- .../_total_variation_regularization.py | 63 +------------------ pynumdiff/utils/utility.py | 8 +-- 4 files changed, 19 insertions(+), 70 deletions(-) diff --git a/pynumdiff/linear_model/_linear_model.py b/pynumdiff/linear_model/_linear_model.py index eda431f..a657655 100644 --- a/pynumdiff/linear_model/_linear_model.py +++ b/pynumdiff/linear_model/_linear_model.py @@ -105,7 +105,7 @@ def _polydiff(x, dt, polynomial_order, weights=None): return _polydiff(x, dt, polynomial_order) kernel = {'gaussian':utility.gaussian_kernel, 'friedrichs':utility.friedrichs_kernel}[kernel](window_size) - return utility.slide_function(_polydiff, x, dt, kernel, polynomial_order, step_size=step_size, pass_weights=True) + return utility.slide_function(_polydiff, x, dt, kernel, polynomial_order, stride=step_size, pass_weights=True) ############# @@ -312,8 +312,8 @@ def _lineardiff(x, dt, order, gamma, solver=None): kernel = {'gaussian':utility.gaussian_kernel, 'friedrichs':utility.friedrichs_kernel}[kernel](window_size) - x_hat_forward, _ = utility.slide_function(_lineardiff, x, dt, kernel, order, gamma, step_size=step_size, solver=solver) - x_hat_backward, _ = utility.slide_function(_lineardiff, x[::-1], dt, kernel, order, gamma, step_size=step_size, solver=solver) + x_hat_forward, _ = utility.slide_function(_lineardiff, x, dt, kernel, order, gamma, stride=step_size, solver=solver) + x_hat_backward, _ = utility.slide_function(_lineardiff, x[::-1], dt, kernel, order, gamma, stride=step_size, solver=solver) # weights w = np.arange(1, len(x_hat_forward)+1,1)[::-1] diff --git a/pynumdiff/tests/test_diff_methods.py b/pynumdiff/tests/test_diff_methods.py index c725602..9e27880 100644 --- a/pynumdiff/tests/test_diff_methods.py +++ b/pynumdiff/tests/test_diff_methods.py @@ -4,7 +4,7 @@ from warnings import warn from ..linear_model import lineardiff, polydiff, savgoldiff, spectraldiff -from ..total_variation_regularization import velocity, acceleration, jerk, iterative_velocity, smooth_acceleration +from ..total_variation_regularization import velocity, acceleration, jerk, iterative_velocity, smooth_acceleration, jerk_sliding from ..kalman_smooth import constant_velocity, constant_acceleration, constant_jerk from ..smooth_finite_difference import mediandiff, meandiff, gaussiandiff, friedrichsdiff, butterdiff, splinediff from ..finite_difference import first_order, second_order @@ -14,9 +14,11 @@ def iterated_first_order(*args, **kwargs): return first_order(*args, **kwargs) dt = 0.1 t = np.arange(0, 3+dt, dt) # sample locations, including the endpoint tt = np.linspace(0, 3) # full domain, for visualizing denser plots +ttt = np.linspace(0, 3, 1001) # for testing jerk_sliding, which requires > 1000 points np.random.seed(7) # for repeatability of the test, so we don't get random failures noise = 0.05*np.random.randn(*t.shape) + # Analytic (function, derivative) pairs on which to test differentiation methods. test_funcs_and_derivs = [ (0, r"$x(t)=1$", lambda t: np.ones(t.shape), lambda t: np.zeros(t.shape)), # constant @@ -54,6 +56,7 @@ def iterated_first_order(*args, **kwargs): return first_order(*args, **kwargs) (smooth_acceleration, {'gamma':2, 'window_size':5}), (smooth_acceleration, [2, 5]) # TODO (jerk_sliding), because with the test cases here (len < 1000) it would just be a duplicate of jerk ] +diff_methods_and_params = [(jerk_sliding, {'gamma':5})] # All the testing methodology follows the exact same pattern; the only thing that changes is the # closeness to the right answer various methods achieve with the given parameterizations. So index a @@ -201,6 +204,9 @@ def test_diff_method(diff_method_and_params, test_func_and_deriv, request): # re if diff_method in [lineardiff, velocity, acceleration, jerk, smooth_acceleration]: try: import cvxpy except: warn(f"Cannot import cvxpy, skipping {diff_method} test."); return + if diff_method == jerk_sliding: + t = ttt + noise = 0.05*np.random.randn(*t.shape) # sample the true function and make noisy samples, and sample true derivative x = f(t) @@ -243,8 +249,8 @@ def test_diff_method(diff_method_and_params, test_func_and_deriv, request): # re # bounds-printing for establishing bounds if request.config.getoption("--bounds"): - #print(f"({l2_error},{linf_error})", end=", ") - print(f"({int(np.ceil(np.log10(l2_error))) if l2_error > 0 else -25}, {int(np.ceil(np.log10(linf_error))) if linf_error > 0 else -25})", end=", ") + print(f"({l2_error},{linf_error})", end=", ") + #print(f"({int(np.ceil(np.log10(l2_error))) if l2_error > 0 else -25}, {int(np.ceil(np.log10(linf_error))) if linf_error > 0 else -25})", end=", ") # bounds checking else: log_l2_bound, log_linf_bound = error_bounds[diff_method][i][j] diff --git a/pynumdiff/total_variation_regularization/_total_variation_regularization.py b/pynumdiff/total_variation_regularization/_total_variation_regularization.py index 06717a9..3a2099e 100644 --- a/pynumdiff/total_variation_regularization/_total_variation_regularization.py +++ b/pynumdiff/total_variation_regularization/_total_variation_regularization.py @@ -253,65 +253,8 @@ def jerk_sliding(x, dt, params=None, options=None, gamma=None, solver=None): elif gamma == None: raise ValueError("`gamma` must be given.") - window_size = 1000 - stride = 200 - - if len(x) < window_size: + if len(x) <= 1000: return _total_variation_regularized_derivative(x, dt, 3, gamma, solver=solver) - # slide the jerk - final_xsmooth = [] - final_xdot_hat = [] - first_idx = 0 - final_idx = first_idx + window_size - last_loop = False - - final_weighting = [] - - try: - while not last_loop: - xsmooth, xdot_hat = _total_variation_regularized_derivative(x[first_idx:final_idx], dt, 3, - gamma, solver=solver) - - xsmooth = np.hstack(([0]*first_idx, xsmooth, [0]*(len(x)-final_idx))) - final_xsmooth.append(xsmooth) - - xdot_hat = np.hstack(([0]*first_idx, xdot_hat, [0]*(len(x)-final_idx))) - final_xdot_hat.append(xdot_hat) - - # blending - w = np.hstack(([0]*first_idx, - np.arange(1, 201)/200, - [1]*(final_idx-first_idx-400), - (np.arange(1, 201)/200)[::-1], - [0]*(len(x)-final_idx))) - final_weighting.append(w) - - if final_idx >= len(x): - last_loop = True - else: - first_idx += stride - final_idx += stride - if final_idx > len(x): - final_idx = len(x) - if final_idx - first_idx < 200: - first_idx -= (200 - (final_idx - first_idx)) - - # normalize columns - weights = np.vstack(final_weighting) - for c in range(weights.shape[1]): - weights[:, c] /= np.sum(weights[:, c]) - - # weighted sums - xsmooth = np.vstack(final_xsmooth) - xsmooth = np.sum(xsmooth*weights, axis=0) - - xdot_hat = np.vstack(final_xdot_hat) - xdot_hat = np.sum(xdot_hat*weights, axis=0) - - return xsmooth, xdot_hat - - except ValueError: - warn('Solver failed, returning finite difference instead') - from pynumdiff.finite_difference import second_order - return second_order(x, dt) + kernel = np.hstack((np.arange(1, 201)/200, np.ones(600), (np.arange(0, 201)/200)[::-1])) + return utility.slide_function(jerk_sliding, x, dt, kernel, gamma, stride=200) diff --git a/pynumdiff/utils/utility.py b/pynumdiff/utils/utility.py index b3908eb..96e3ffa 100644 --- a/pynumdiff/utils/utility.py +++ b/pynumdiff/utils/utility.py @@ -176,7 +176,7 @@ def convolutional_smoother(x, kernel, iterations=1): return x_hat[len(x):len(x)*2] -def slide_function(func, x, dt, kernel, *args, step_size=1, pass_weights=False, **kwargs): +def slide_function(func, x, dt, kernel, *args, stride=1, pass_weights=False, **kwargs): """Slide a smoothing derivative function across a timeseries with specified window size. :param callable func: name of the function to slide @@ -184,7 +184,7 @@ def slide_function(func, x, dt, kernel, *args, step_size=1, pass_weights=False, :param float dt: step size :param np.array[float] kernel: values to weight the sliding window :param list args: passed to func - :param int step_size: step size for slide (e.g. 1 means slide by 1 step) + :param int stride: step size for slide (e.g. 1 means slide by 1 step) :param bool pass_weights: whether weights should be passed to func via update to kwargs :param dict kwargs: passed to func @@ -195,11 +195,11 @@ def slide_function(func, x, dt, kernel, *args, step_size=1, pass_weights=False, if len(kernel) % 2 == 0: raise ValueError("Kernel window size should be odd.") half_window_size = (len(kernel) - 1)//2 # int because len(kernel) is always odd - weights = np.zeros((int(np.ceil(len(x)/step_size)), len(x))) + weights = np.zeros((int(np.ceil(len(x)/stride)), len(x))) x_hats = np.zeros(weights.shape) dxdt_hats = np.zeros(weights.shape) - for i,midpoint in enumerate(range(0, len(x), step_size)): # iterate window midpoints + for i,midpoint in enumerate(range(0, len(x), stride)): # iterate window midpoints # find where to index data and kernel, taking care at edges window = slice(max(0, midpoint - half_window_size), min(len(x), midpoint + half_window_size + 1)) # +1 because slicing works [,) From 94727bcb8d4b2093cede7932c1f75ea52e6163fb Mon Sep 17 00:00:00 2001 From: pavelkomarov Date: Mon, 30 Jun 2025 19:03:46 -0700 Subject: [PATCH 2/9] ripped out diff_methods_and_params = [(jerk_sliding, {'gamma':5})] line --- pynumdiff/tests/test_diff_methods.py | 1 - 1 file changed, 1 deletion(-) diff --git a/pynumdiff/tests/test_diff_methods.py b/pynumdiff/tests/test_diff_methods.py index 9e27880..98d228d 100644 --- a/pynumdiff/tests/test_diff_methods.py +++ b/pynumdiff/tests/test_diff_methods.py @@ -56,7 +56,6 @@ def iterated_first_order(*args, **kwargs): return first_order(*args, **kwargs) (smooth_acceleration, {'gamma':2, 'window_size':5}), (smooth_acceleration, [2, 5]) # TODO (jerk_sliding), because with the test cases here (len < 1000) it would just be a duplicate of jerk ] -diff_methods_and_params = [(jerk_sliding, {'gamma':5})] # All the testing methodology follows the exact same pattern; the only thing that changes is the # closeness to the right answer various methods achieve with the given parameterizations. So index a From 58f66779420077a27f40dba34a20dfde210c6822 Mon Sep 17 00:00:00 2001 From: pavelkomarov Date: Mon, 30 Jun 2025 19:10:41 -0700 Subject: [PATCH 3/9] forgot to update a keyword arg --- pynumdiff/tests/test_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pynumdiff/tests/test_utils.py b/pynumdiff/tests/test_utils.py index d0f2884..ecee153 100644 --- a/pynumdiff/tests/test_utils.py +++ b/pynumdiff/tests/test_utils.py @@ -73,7 +73,7 @@ def identity(x, dt): return x, 0 # should come back the same x = np.arange(100) kernel = utility.gaussian_kernel(9) - x_hat, dxdt_hat = utility.slide_function(identity, x, 0.1, kernel, step_size=2) + x_hat, dxdt_hat = utility.slide_function(identity, x, 0.1, kernel, stride=2) assert np.allclose(x, x_hat) From 7ec39c5170b18529d65175bc0bddebf60364fab2 Mon Sep 17 00:00:00 2001 From: pavelkomarov Date: Mon, 30 Jun 2025 20:02:22 -0700 Subject: [PATCH 4/9] fixing sliding jerk and test case --- pynumdiff/tests/test_diff_methods.py | 3 ++- .../_total_variation_regularization.py | 11 ++++++----- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/pynumdiff/tests/test_diff_methods.py b/pynumdiff/tests/test_diff_methods.py index 98d228d..a3bfd0b 100644 --- a/pynumdiff/tests/test_diff_methods.py +++ b/pynumdiff/tests/test_diff_methods.py @@ -14,7 +14,7 @@ def iterated_first_order(*args, **kwargs): return first_order(*args, **kwargs) dt = 0.1 t = np.arange(0, 3+dt, dt) # sample locations, including the endpoint tt = np.linspace(0, 3) # full domain, for visualizing denser plots -ttt = np.linspace(0, 3, 1001) # for testing jerk_sliding, which requires > 1000 points +ttt = np.linspace(0, 3, 101) # for testing jerk_sliding, which requires > 1001 points np.random.seed(7) # for repeatability of the test, so we don't get random failures noise = 0.05*np.random.randn(*t.shape) @@ -205,6 +205,7 @@ def test_diff_method(diff_method_and_params, test_func_and_deriv, request): # re except: warn(f"Cannot import cvxpy, skipping {diff_method} test."); return if diff_method == jerk_sliding: t = ttt + dt = 0.03 noise = 0.05*np.random.randn(*t.shape) # sample the true function and make noisy samples, and sample true derivative diff --git a/pynumdiff/total_variation_regularization/_total_variation_regularization.py b/pynumdiff/total_variation_regularization/_total_variation_regularization.py index 3a2099e..798e7b5 100644 --- a/pynumdiff/total_variation_regularization/_total_variation_regularization.py +++ b/pynumdiff/total_variation_regularization/_total_variation_regularization.py @@ -97,7 +97,7 @@ def _total_variation_regularized_derivative(x, dt, order, gamma, solver=None): dxdt_hat = y/dt # y only holds the dx values; to get deriv scale by dt x_hat = np.cumsum(y) + integration_constants.value[order-1] # smoothed data - dxdt_hat = (dxdt_hat[0:-1] + dxdt_hat[1:])/2 # take first order FD to smooth a touch + dxdt_hat = (dxdt_hat[:-1] + dxdt_hat[1:])/2 # take first order FD to smooth a touch ddxdt_hat_f = dxdt_hat[-1] - dxdt_hat[-2] dxdt_hat_f = dxdt_hat[-1] + ddxdt_hat_f # What is this doing? Could we use a 2nd order FD above natively? dxdt_hat = np.hstack((dxdt_hat, dxdt_hat_f)) @@ -106,7 +106,7 @@ def _total_variation_regularized_derivative(x, dt, order, gamma, solver=None): d = dxdt_hat[2] - dxdt_hat[1] dxdt_hat[0] = dxdt_hat[1] - d - return x_hat*std+mean, dxdt_hat*std + return x_hat*std+mean, dxdt_hat*std # derivative is linear, so scale derivative by std def velocity(x, dt, params=None, options=None, gamma=None, solver=None): @@ -253,8 +253,9 @@ def jerk_sliding(x, dt, params=None, options=None, gamma=None, solver=None): elif gamma == None: raise ValueError("`gamma` must be given.") - if len(x) <= 1000: + if len(x) <= 100: + warn("len(x) <= 1000, calling standard jerk() without sliding") return _total_variation_regularized_derivative(x, dt, 3, gamma, solver=solver) - kernel = np.hstack((np.arange(1, 201)/200, np.ones(600), (np.arange(0, 201)/200)[::-1])) - return utility.slide_function(jerk_sliding, x, dt, kernel, gamma, stride=200) + kernel = np.hstack((np.arange(1, 21)/20, np.ones(60), (np.arange(0, 21)/20)[::-1])) + return utility.slide_function(_total_variation_regularized_derivative, x, dt, kernel, 3, gamma, stride=20) From a176c0746757157f9e096c5aca3642fd6e333f66 Mon Sep 17 00:00:00 2001 From: pavelkomarov Date: Mon, 30 Jun 2025 20:40:03 -0700 Subject: [PATCH 5/9] fixed jerk sliding tests --- pynumdiff/tests/test_diff_methods.py | 40 +++++++++++++++++----------- 1 file changed, 24 insertions(+), 16 deletions(-) diff --git a/pynumdiff/tests/test_diff_methods.py b/pynumdiff/tests/test_diff_methods.py index a3bfd0b..f4d7ef9 100644 --- a/pynumdiff/tests/test_diff_methods.py +++ b/pynumdiff/tests/test_diff_methods.py @@ -11,13 +11,12 @@ # Function aliases for testing cases where parameters change the behavior in a big way def iterated_first_order(*args, **kwargs): return first_order(*args, **kwargs) -dt = 0.1 -t = np.arange(0, 3+dt, dt) # sample locations, including the endpoint +t = np.linspace(0, 3, 31) # sample locations, including the endpoint tt = np.linspace(0, 3) # full domain, for visualizing denser plots -ttt = np.linspace(0, 3, 101) # for testing jerk_sliding, which requires > 1001 points +ttt = np.linspace(0, 3, 201) # for testing jerk_sliding, which requires > 1001 points np.random.seed(7) # for repeatability of the test, so we don't get random failures noise = 0.05*np.random.randn(*t.shape) - +long_noise = 0.05*np.random.randn(*ttt.shape) # Analytic (function, derivative) pairs on which to test differentiation methods. test_funcs_and_derivs = [ @@ -53,8 +52,8 @@ def iterated_first_order(*args, **kwargs): return first_order(*args, **kwargs) (acceleration, {'gamma':1}), (acceleration, [1]), (jerk, {'gamma':10}), (jerk, [10]), (iterative_velocity, {'num_iterations':5, 'gamma':0.05}), (iterative_velocity, [5, 0.05]), - (smooth_acceleration, {'gamma':2, 'window_size':5}), (smooth_acceleration, [2, 5]) - # TODO (jerk_sliding), because with the test cases here (len < 1000) it would just be a duplicate of jerk + (smooth_acceleration, {'gamma':2, 'window_size':5}), (smooth_acceleration, [2, 5]), + (jerk_sliding, {'gamma':1e2}), (jerk_sliding, [1e2]) ] # All the testing methodology follows the exact same pattern; the only thing that changes is the @@ -186,7 +185,13 @@ def iterated_first_order(*args, **kwargs): return first_order(*args, **kwargs) [(-2, -2), (-1, -1), (-1, -1), (0, -1)], [(0, 0), (1, 0), (0, -1), (1, 0)], [(1, 1), (2, 2), (1, 1), (2, 2)], - [(1, 1), (3, 3), (1, 1), (3, 3)]] + [(1, 1), (3, 3), (1, 1), (3, 3)]], + jerk_sliding: [[(-14, -15), (-14, -14), (0, -1), (1, 0)], + [(-4, -4), (-3, -3), (0, -1), (1, 0)], + [(-4, -4), (-3, -3), (0, -1), (1, 0)], + [(-3, -4), (-1, -2), (0, -1), (1, 0)], + [(0, 0), (2, 1), (0, 0), (2, 1)], + [(1, 1), (3, 3), (1, 1), (3, 3)]] } # Essentially run the cartesian product of [diff methods] x [test functions] through this one test @@ -203,15 +208,18 @@ def test_diff_method(diff_method_and_params, test_func_and_deriv, request): # re if diff_method in [lineardiff, velocity, acceleration, jerk, smooth_acceleration]: try: import cvxpy except: warn(f"Cannot import cvxpy, skipping {diff_method} test."); return - if diff_method == jerk_sliding: - t = ttt - dt = 0.03 - noise = 0.05*np.random.randn(*t.shape) # sample the true function and make noisy samples, and sample true derivative - x = f(t) - x_noisy = x + noise - dxdt = df(t) + if diff_method != jerk_sliding: + x = f(t) + x_noisy = x + noise + dxdt = df(t) + dt = t[1] - t[0] + else: # different density for jerk_sliding + x = f(ttt) + x_noisy = x + long_noise + dxdt = df(ttt) + dt = ttt[1] - ttt[0] # differentiate without and with noise, accounting for new and old styles of calling functions x_hat, dxdt_hat = diff_method(x, dt, **params) if isinstance(params, dict) \ @@ -249,8 +257,8 @@ def test_diff_method(diff_method_and_params, test_func_and_deriv, request): # re # bounds-printing for establishing bounds if request.config.getoption("--bounds"): - print(f"({l2_error},{linf_error})", end=", ") - #print(f"({int(np.ceil(np.log10(l2_error))) if l2_error > 0 else -25}, {int(np.ceil(np.log10(linf_error))) if linf_error > 0 else -25})", end=", ") + #print(f"({l2_error},{linf_error})", end=", ") + print(f"({int(np.ceil(np.log10(l2_error))) if l2_error > 0 else -25}, {int(np.ceil(np.log10(linf_error))) if linf_error > 0 else -25})", end=", ") # bounds checking else: log_l2_bound, log_linf_bound = error_bounds[diff_method][i][j] From fca5afea61ba78412cf740317e7f1d1aa564ffa0 Mon Sep 17 00:00:00 2001 From: pavelkomarov Date: Mon, 30 Jun 2025 20:42:14 -0700 Subject: [PATCH 6/9] corrected comment --- pynumdiff/tests/test_diff_methods.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pynumdiff/tests/test_diff_methods.py b/pynumdiff/tests/test_diff_methods.py index f4d7ef9..7f47259 100644 --- a/pynumdiff/tests/test_diff_methods.py +++ b/pynumdiff/tests/test_diff_methods.py @@ -13,7 +13,7 @@ def iterated_first_order(*args, **kwargs): return first_order(*args, **kwargs) t = np.linspace(0, 3, 31) # sample locations, including the endpoint tt = np.linspace(0, 3) # full domain, for visualizing denser plots -ttt = np.linspace(0, 3, 201) # for testing jerk_sliding, which requires > 1001 points +ttt = np.linspace(0, 3, 201) # for testing jerk_sliding, which requires more points np.random.seed(7) # for repeatability of the test, so we don't get random failures noise = 0.05*np.random.randn(*t.shape) long_noise = 0.05*np.random.randn(*ttt.shape) From 6d39f81d34cdbafeaf29b1cf7744f714808e0c98 Mon Sep 17 00:00:00 2001 From: pavelkomarov Date: Mon, 30 Jun 2025 20:44:25 -0700 Subject: [PATCH 7/9] convex solver failing in CI. Trying CLARABEL --- pynumdiff/tests/test_diff_methods.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pynumdiff/tests/test_diff_methods.py b/pynumdiff/tests/test_diff_methods.py index 7f47259..a505fd0 100644 --- a/pynumdiff/tests/test_diff_methods.py +++ b/pynumdiff/tests/test_diff_methods.py @@ -53,7 +53,7 @@ def iterated_first_order(*args, **kwargs): return first_order(*args, **kwargs) (jerk, {'gamma':10}), (jerk, [10]), (iterative_velocity, {'num_iterations':5, 'gamma':0.05}), (iterative_velocity, [5, 0.05]), (smooth_acceleration, {'gamma':2, 'window_size':5}), (smooth_acceleration, [2, 5]), - (jerk_sliding, {'gamma':1e2}), (jerk_sliding, [1e2]) + (jerk_sliding, {'gamma':1e2, 'solver':'CLARABEL'}), (jerk_sliding, [1e2], {'solver':'CLARABEL'}) ] # All the testing methodology follows the exact same pattern; the only thing that changes is the From 9bb86a657caf34251f7b236658d087f9550a689f Mon Sep 17 00:00:00 2001 From: pavelkomarov Date: Mon, 30 Jun 2025 20:50:58 -0700 Subject: [PATCH 8/9] wasn't passing solver through to slide_function --- pynumdiff/tests/test_diff_methods.py | 8 ++++---- .../_total_variation_regularization.py | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/pynumdiff/tests/test_diff_methods.py b/pynumdiff/tests/test_diff_methods.py index a505fd0..f9876d4 100644 --- a/pynumdiff/tests/test_diff_methods.py +++ b/pynumdiff/tests/test_diff_methods.py @@ -186,10 +186,10 @@ def iterated_first_order(*args, **kwargs): return first_order(*args, **kwargs) [(0, 0), (1, 0), (0, -1), (1, 0)], [(1, 1), (2, 2), (1, 1), (2, 2)], [(1, 1), (3, 3), (1, 1), (3, 3)]], - jerk_sliding: [[(-14, -15), (-14, -14), (0, -1), (1, 0)], - [(-4, -4), (-3, -3), (0, -1), (1, 0)], - [(-4, -4), (-3, -3), (0, -1), (1, 0)], - [(-3, -4), (-1, -2), (0, -1), (1, 0)], + jerk_sliding: [[(-13, -14), (-12, -13), (0, -1), (1, 0)], + [(-12, -13), (-12, -12), (0, -1), (0, 0)], + [(-13, -14), (-12, -13), (0, -1), (0, 0)], + [(-1, -2), (0, 0), (0, -1), (1, 0)], [(0, 0), (2, 1), (0, 0), (2, 1)], [(1, 1), (3, 3), (1, 1), (3, 3)]] } diff --git a/pynumdiff/total_variation_regularization/_total_variation_regularization.py b/pynumdiff/total_variation_regularization/_total_variation_regularization.py index 798e7b5..f15778d 100644 --- a/pynumdiff/total_variation_regularization/_total_variation_regularization.py +++ b/pynumdiff/total_variation_regularization/_total_variation_regularization.py @@ -258,4 +258,4 @@ def jerk_sliding(x, dt, params=None, options=None, gamma=None, solver=None): return _total_variation_regularized_derivative(x, dt, 3, gamma, solver=solver) kernel = np.hstack((np.arange(1, 21)/20, np.ones(60), (np.arange(0, 21)/20)[::-1])) - return utility.slide_function(_total_variation_regularized_derivative, x, dt, kernel, 3, gamma, stride=20) + return utility.slide_function(_total_variation_regularized_derivative, x, dt, kernel, 3, gamma, stride=20, solver=solver) From 62d680b6fa14251fc206ec45101b4b61cc6c74ea Mon Sep 17 00:00:00 2001 From: pavelkomarov Date: Mon, 30 Jun 2025 21:19:02 -0700 Subject: [PATCH 9/9] added window_size as a parameter to jerk_sliding, because that makes more sense --- pynumdiff/tests/conftest.py | 1 + pynumdiff/tests/test_diff_methods.py | 28 +++++++------------ .../_total_variation_regularization.py | 15 ++++++---- 3 files changed, 21 insertions(+), 23 deletions(-) diff --git a/pynumdiff/tests/conftest.py b/pynumdiff/tests/conftest.py index e36dbca..bfe923f 100644 --- a/pynumdiff/tests/conftest.py +++ b/pynumdiff/tests/conftest.py @@ -12,6 +12,7 @@ def store_plots(request): request.config.plots = defaultdict(lambda: pyplot.subplots(6, 2, figsize=(12,7))) # 6 is len(test_funcs_and_derivs) def pytest_sessionfinish(session, exitstatus): + if not hasattr(session.config, 'plots'): return for method,(fig,axes) in session.config.plots.items(): axes[-1,-1].legend() fig.suptitle(method.__name__) diff --git a/pynumdiff/tests/test_diff_methods.py b/pynumdiff/tests/test_diff_methods.py index f9876d4..0f8337d 100644 --- a/pynumdiff/tests/test_diff_methods.py +++ b/pynumdiff/tests/test_diff_methods.py @@ -11,12 +11,11 @@ # Function aliases for testing cases where parameters change the behavior in a big way def iterated_first_order(*args, **kwargs): return first_order(*args, **kwargs) +dt = 0.1 t = np.linspace(0, 3, 31) # sample locations, including the endpoint tt = np.linspace(0, 3) # full domain, for visualizing denser plots -ttt = np.linspace(0, 3, 201) # for testing jerk_sliding, which requires more points np.random.seed(7) # for repeatability of the test, so we don't get random failures noise = 0.05*np.random.randn(*t.shape) -long_noise = 0.05*np.random.randn(*ttt.shape) # Analytic (function, derivative) pairs on which to test differentiation methods. test_funcs_and_derivs = [ @@ -53,7 +52,7 @@ def iterated_first_order(*args, **kwargs): return first_order(*args, **kwargs) (jerk, {'gamma':10}), (jerk, [10]), (iterative_velocity, {'num_iterations':5, 'gamma':0.05}), (iterative_velocity, [5, 0.05]), (smooth_acceleration, {'gamma':2, 'window_size':5}), (smooth_acceleration, [2, 5]), - (jerk_sliding, {'gamma':1e2, 'solver':'CLARABEL'}), (jerk_sliding, [1e2], {'solver':'CLARABEL'}) + (jerk_sliding, {'gamma':1, 'window_size':15}), (jerk_sliding, [1], {'window_size':15}) ] # All the testing methodology follows the exact same pattern; the only thing that changes is the @@ -186,11 +185,11 @@ def iterated_first_order(*args, **kwargs): return first_order(*args, **kwargs) [(0, 0), (1, 0), (0, -1), (1, 0)], [(1, 1), (2, 2), (1, 1), (2, 2)], [(1, 1), (3, 3), (1, 1), (3, 3)]], - jerk_sliding: [[(-13, -14), (-12, -13), (0, -1), (1, 0)], - [(-12, -13), (-12, -12), (0, -1), (0, 0)], - [(-13, -14), (-12, -13), (0, -1), (0, 0)], - [(-1, -2), (0, 0), (0, -1), (1, 0)], - [(0, 0), (2, 1), (0, 0), (2, 1)], + jerk_sliding: [[(-15, -15), (-16, -17), (0, -1), (1, 0)], + [(-14, -14), (-14, -14), (0, -1), (0, 0)], + [(-14, -14), (-14, -14), (0, -1), (0, 0)], + [(-1, -1), (0, 0), (0, -1), (0, 0)], + [(0, 0), (2, 2), (0, 0), (2, 2)], [(1, 1), (3, 3), (1, 1), (3, 3)]] } @@ -210,16 +209,9 @@ def test_diff_method(diff_method_and_params, test_func_and_deriv, request): # re except: warn(f"Cannot import cvxpy, skipping {diff_method} test."); return # sample the true function and make noisy samples, and sample true derivative - if diff_method != jerk_sliding: - x = f(t) - x_noisy = x + noise - dxdt = df(t) - dt = t[1] - t[0] - else: # different density for jerk_sliding - x = f(ttt) - x_noisy = x + long_noise - dxdt = df(ttt) - dt = ttt[1] - ttt[0] + x = f(t) + x_noisy = x + noise + dxdt = df(t) # differentiate without and with noise, accounting for new and old styles of calling functions x_hat, dxdt_hat = diff_method(x, dt, **params) if isinstance(params, dict) \ diff --git a/pynumdiff/total_variation_regularization/_total_variation_regularization.py b/pynumdiff/total_variation_regularization/_total_variation_regularization.py index f15778d..56ba776 100644 --- a/pynumdiff/total_variation_regularization/_total_variation_regularization.py +++ b/pynumdiff/total_variation_regularization/_total_variation_regularization.py @@ -229,7 +229,7 @@ def smooth_acceleration(x, dt, params=None, options=None, gamma=None, window_siz return x_hat, dxdt_hat -def jerk_sliding(x, dt, params=None, options=None, gamma=None, solver=None): +def jerk_sliding(x, dt, params=None, options=None, gamma=None, solver=None, window_size=101): """Use convex optimization (cvxpy) to solve for the jerk total variation regularized derivative. :param np.array[float] x: data to differentiate @@ -239,6 +239,7 @@ def jerk_sliding(x, dt, params=None, options=None, gamma=None, solver=None): :param float gamma: the regularization parameter :param str solver: the solver CVXPY should use, 'MOSEK', 'CVXOPT', 'CLARABEL', 'ECOS', etc. In testing, 'MOSEK' was the most robust. If not given, fall back to CVXPY's default. + :param int window_size: how wide to make the kernel :return: tuple[np.array, np.array] of\n - **x_hat** -- estimated (smoothed) x @@ -250,12 +251,16 @@ def jerk_sliding(x, dt, params=None, options=None, gamma=None, solver=None): gamma = params[0] if isinstance(params, list) else params if options != None: if 'solver' in options: solver = options['solver'] + if 'window_size' in options: window_size = options['window_size'] elif gamma == None: raise ValueError("`gamma` must be given.") - if len(x) <= 100: - warn("len(x) <= 1000, calling standard jerk() without sliding") + if len(x) < window_size: + warn("len(x) <= window_size, calling standard jerk() without sliding") return _total_variation_regularized_derivative(x, dt, 3, gamma, solver=solver) - kernel = np.hstack((np.arange(1, 21)/20, np.ones(60), (np.arange(0, 21)/20)[::-1])) - return utility.slide_function(_total_variation_regularized_derivative, x, dt, kernel, 3, gamma, stride=20, solver=solver) + ramp = window_size//5 + kernel = np.hstack((np.arange(1, ramp+1)/ramp, np.ones(window_size - 2*ramp), (np.arange(1, ramp+1)/ramp)[::-1])) + return utility.slide_function(_total_variation_regularized_derivative, x, dt, kernel, 3, gamma, stride=ramp, solver=solver) + +