diff --git a/pynumdiff/linear_model/_linear_model.py b/pynumdiff/linear_model/_linear_model.py index 1fd73a9..8262615 100644 --- a/pynumdiff/linear_model/_linear_model.py +++ b/pynumdiff/linear_model/_linear_model.py @@ -6,10 +6,8 @@ from pynumdiff.finite_difference import first_order as finite_difference from pynumdiff.utils import utility -try: - import cvxpy -except ImportError: - pass +try: import cvxpy +except ImportError: pass KERNELS = {'friedrichs': utility._friedrichs_kernel, 'gaussian': utility._gaussian_kernel} @@ -299,7 +297,7 @@ def polydiff(x, dt, params=None, options=None, polynomial_order=None, window_siz def __solve_for_A_and_C_given_X_and_Xdot__(X, Xdot, num_integrations, dt, gammaC=1e-1, gammaA=1e-6, - solver='MOSEK', A_known=None, epsilon=1e-6, rows_of_interest='all'): + solver=None, A_known=None, epsilon=1e-6, rows_of_interest='all'): """Given state and the derivative, find the system evolution and measurement matrices. """ @@ -338,7 +336,7 @@ def __solve_for_A_and_C_given_X_and_Xdot__(X, Xdot, num_integrations, dt, gammaC # Solve the problem prob = cvxpy.Problem(obj, constraints) - prob.solve(solver=solver) # MOSEK does not take max_iters + prob.solve(solver=solver) A = np.array(A.value) return A, np.array(C.value) @@ -355,7 +353,7 @@ def __integrate_dxdt_hat_matrix__(dxdt_hat, dt): return x -def _lineardiff(x, dt, N, gamma, solver='MOSEK', weights=None): +def _lineardiff(x, dt, N, gamma, solver=None, weights=None): """Estimate the parameters for a system xdot = Ax, and use that to calculate the derivative :param np.array[float] x: time series to differentiate @@ -405,7 +403,7 @@ def _lineardiff(x, dt, N, gamma, solver='MOSEK', weights=None): def lineardiff(x, dt, params=None, options=None, order=None, gamma=None, window_size=None, - sliding=True, step_size=10, kernel='friedrichs', solver='MOSEK'): + sliding=True, step_size=10, kernel='friedrichs', solver=None): """Slide a smoothing derivative function across a time series with specified window size. :param np.array[float] x: array of time series to differentiate diff --git a/pynumdiff/tests/test_diff_methods.py b/pynumdiff/tests/test_diff_methods.py index 2f142d9..2261f73 100644 --- a/pynumdiff/tests/test_diff_methods.py +++ b/pynumdiff/tests/test_diff_methods.py @@ -33,7 +33,7 @@ def iterated_first_order(*args, **kwargs): return first_order(*args, **kwargs) (first_order, {}), # empty dictionary for the case of no parameters. no params -> no diff in new vs old (iterated_first_order, {'num_iterations':5}), (iterated_first_order, [5], {'iterate':True}), (second_order, {}), - #(lineardiff, {'order':3, 'gamma':5, 'window_size':10, 'solver':'CVXOPT'}), + (lineardiff, {'order':3, 'gamma':5, 'window_size':10, 'solver':'CLARABEL'}), (lineardiff, [3, 5, 10], {'solver':'CLARABEL'}), (polydiff, {'polynomial_order':2, 'window_size':3}), (polydiff, [2, 3]), (savgoldiff, {'polynomial_order':2, 'window_size':4, 'smoothing_win':4}), (savgoldiff, [2, 4, 4]), (spectraldiff, {'high_freq_cutoff':0.1}), (spectraldiff, [0.1]), @@ -48,7 +48,6 @@ def iterated_first_order(*args, **kwargs): return first_order(*args, **kwargs) (constant_jerk, {'r':1e-4, 'q':10}), (constant_jerk, [1e-4, 10]), # TODO (known_dynamics), but presently it doesn't calculate a derivative ] -diff_methods_and_params = [(constant_jerk, {'r':1e-4, 'q':10})] # All the testing methodology follows the exact same pattern; the only thing that changes is the # closeness to the right answer various methods achieve with the given parameterizations. So index a @@ -72,7 +71,12 @@ def iterated_first_order(*args, **kwargs): return first_order(*args, **kwargs) [(-25, -25), (0, -1), (0, 0), (1, 1)], [(-25, -25), (1, 1), (0, 0), (1, 1)], [(-25, -25), (3, 3), (0, 0), (3, 3)]], - #lineardiff: [TBD when #91 is solved], + lineardiff: [[(-6, -6), (-5, -6), (0, -1), (0, 0)], + [(0, 0), (1, 1), (0, 0), (1, 1)], + [(1, 0), (2, 1), (1, 0), (2, 1)], + [(1, 0), (1, 1), (1, 0), (1, 1)], + [(1, 1), (2, 2), (1, 1), (2, 2)], + [(1, 1), (3, 3), (1, 1), (3, 3)]], polydiff: [[(-14, -15), (-14, -14), (0, -1), (1, 1)], [(-14, -14), (-13, -13), (0, -1), (1, 1)], [(-14, -14), (-13, -13), (0, -1), (1, 1)], @@ -173,21 +177,6 @@ def test_diff_method(diff_method_and_params, test_func_and_deriv, request): # re x_hat_noisy, dxdt_hat_noisy = diff_method(x_noisy, dt, **params) if isinstance(params, dict) \ else diff_method(x_noisy, dt, params) if (isinstance(params, list) and len(diff_method_and_params) < 3) \ else diff_method(x_noisy, dt, params, options) - - # check x_hat and x_hat_noisy are close to x and that dxdt_hat and dxdt_hat_noisy are close to dxdt - if request.config.getoption("--bounds"): print("]\n[", end="") - for j,(a,b) in enumerate([(x,x_hat), (dxdt,dxdt_hat), (x,x_hat_noisy), (dxdt,dxdt_hat_noisy)]): - if request.config.getoption("--bounds"): - l2_error = np.linalg.norm(a - b) - linf_error = np.max(np.abs(a - b)) - #print(f"({l2_error},{linf_error})", end=", ") - print(f"({int(np.ceil(np.log10(l2_error))) if l2_error > 0 else -25}, {int(np.ceil(np.log10(linf_error))) if linf_error > 0 else -25})", end=", ") - else: - log_l2_bound, log_linf_bound = error_bounds[diff_method][i][j] - assert np.linalg.norm(a - b) < 10**log_l2_bound - assert np.max(np.abs(a - b)) < 10**log_linf_bound - if 0 < np.linalg.norm(a - b) < 10**(log_l2_bound - 1) or 0 < np.max(np.abs(a - b)) < 10**(log_linf_bound - 1): - print(f"Improvement detected for method {diff_method.__name__}") if request.config.getoption("--plot") and not isinstance(params, list): # Get the plot flag from pytest configuration fig, axes = request.config.plots[diff_method] # get the appropriate plot, set up by the store_plots fixture in conftest.py @@ -207,3 +196,18 @@ def test_diff_method(diff_method_and_params, test_func_and_deriv, request): # re else: axes[i, 1].set_xlabel('t') axes[i, 1].set_yticklabels([]) if i == 0: axes[i, 1].set_title('with noise') + + # check x_hat and x_hat_noisy are close to x and that dxdt_hat and dxdt_hat_noisy are close to dxdt + if request.config.getoption("--bounds"): print("]\n[", end="") + for j,(a,b) in enumerate([(x,x_hat), (dxdt,dxdt_hat), (x,x_hat_noisy), (dxdt,dxdt_hat_noisy)]): + if request.config.getoption("--bounds"): + l2_error = np.linalg.norm(a - b) + linf_error = np.max(np.abs(a - b)) + #print(f"({l2_error},{linf_error})", end=", ") + print(f"({int(np.ceil(np.log10(l2_error))) if l2_error > 0 else -25}, {int(np.ceil(np.log10(linf_error))) if linf_error > 0 else -25})", end=", ") + else: + log_l2_bound, log_linf_bound = error_bounds[diff_method][i][j] + assert np.linalg.norm(a - b) < 10**log_l2_bound + assert np.max(np.abs(a - b)) < 10**log_linf_bound + if 0 < np.linalg.norm(a - b) < 10**(log_l2_bound - 1) or 0 < np.max(np.abs(a - b)) < 10**(log_linf_bound - 1): + print(f"Improvement detected for method {diff_method.__name__}") diff --git a/pynumdiff/total_variation_regularization/_total_variation_regularization.py b/pynumdiff/total_variation_regularization/_total_variation_regularization.py index 1f22a9a..bc380c0 100644 --- a/pynumdiff/total_variation_regularization/_total_variation_regularization.py +++ b/pynumdiff/total_variation_regularization/_total_variation_regularization.py @@ -7,13 +7,6 @@ try: import cvxpy except ImportError: pass -try: - import mosek - solver = 'MOSEK' # https://www.mosek.com/ -except ImportError: - warn("MOSEK not installed, falling back to CVXPY's defaults") - solver = None # passing this to solve() allows CVXPY to use whatever it deems best - def iterative_velocity(x, dt, params, options=None, num_iterations=None, gamma=None, cg_maxiter=1000, scale='small'): """Use an iterative solver to find the total variation regularized 1st derivative. See @@ -61,17 +54,16 @@ def iterative_velocity(x, dt, params, options=None, num_iterations=None, gamma=N return x_hat, dxdt_hat -def _total_variation_regularized_derivative(x, dt, order, gamma, solver=solver): +def _total_variation_regularized_derivative(x, dt, order, gamma, solver=None): """Generalized total variation regularized derivatives. Use convex optimization (cvxpy) to solve for a - total variation regularized derivative. Default solver is MOSEK: , which is not - always available. Fall back to CVXPY's default. + total variation regularized derivative. :param np.array[float] x: data to differentiate :param float dt: time step :param int order: 1, 2, or 3, the derivative to regularize :param float gamma: regularization parameter :param str solver: Solver to use. Solver options include: 'MOSEK', 'CVXOPT', 'CLARABEL', 'ECOS'. - In testing, 'MOSEK' was the most robust. + In testing, 'MOSEK' was the most robust. If not given, fall back to CVXPY's default. :return: tuple[np.array, np.array] of\n - **x_hat** -- estimated (smoothed) x @@ -116,7 +108,7 @@ def _total_variation_regularized_derivative(x, dt, order, gamma, solver=solver): return x_hat*std+mean, dxdt_hat*std -def velocity(x, dt, params, options=None, gamma=None, solver=solver): +def velocity(x, dt, params, options=None, gamma=None, solver=None): """Use convex optimization (cvxpy) to solve for the velocity total variation regularized derivative. :param np.array[float] x: data to differentiate @@ -125,7 +117,7 @@ def velocity(x, dt, params, options=None, gamma=None, solver=solver): :param dict options: (**deprecated**, prefer :code:`solver`) a dictionary consisting of {'solver': (str)} :param float gamma: the regularization parameter :param str solver: the solver CVXPY should use, 'MOSEK', 'CVXOPT', 'CLARABEL', 'ECOS', etc. - In testing, 'MOSEK' was the most robust. Default is to use CVXPY's default. + In testing, 'MOSEK' was the most robust. If not given, fall back to CVXPY's default. :return: tuple[np.array, np.array] of\n - **x_hat** -- estimated (smoothed) x @@ -143,7 +135,7 @@ def velocity(x, dt, params, options=None, gamma=None, solver=solver): return _total_variation_regularized_derivative(x, dt, 1, gamma, solver=solver) -def acceleration(x, dt, params, options=None, gamma=None, solver=solver): +def acceleration(x, dt, params, options=None, gamma=None, solver=None): """Use convex optimization (cvxpy) to solve for the acceleration total variation regularized derivative. :param np.array[float] x: data to differentiate @@ -152,7 +144,7 @@ def acceleration(x, dt, params, options=None, gamma=None, solver=solver): :param dict options: (**deprecated**, prefer :code:`solver`) a dictionary consisting of {'solver': (str)} :param float gamma: the regularization parameter :param str solver: the solver CVXPY should use, 'MOSEK', 'CVXOPT', 'CLARABEL', 'ECOS', etc. - In testing, 'MOSEK' was the most robust. Default is to use CVXPY's default. + In testing, 'MOSEK' was the most robust. If not given, fall back to CVXPY's default. :return: tuple[np.array, np.array] of\n - **x_hat** -- estimated (smoothed) x @@ -170,7 +162,7 @@ def acceleration(x, dt, params, options=None, gamma=None, solver=solver): return _total_variation_regularized_derivative(x, dt, 2, gamma, solver=solver) -def jerk(x, dt, params, options=None, gamma=None, solver=solver): +def jerk(x, dt, params, options=None, gamma=None, solver=None): """Use convex optimization (cvxpy) to solve for the jerk total variation regularized derivative. :param np.array[float] x: data to differentiate @@ -179,7 +171,7 @@ def jerk(x, dt, params, options=None, gamma=None, solver=solver): :param dict options: (**deprecated**, prefer :code:`solver`) a dictionary consisting of {'solver': (str)} :param float gamma: the regularization parameter :param str solver: the solver CVXPY should use, 'MOSEK', 'CVXOPT', 'CLARABEL', 'ECOS', etc. - In testing, 'MOSEK' was the most robust. Default is to use CVXPY's default. + In testing, 'MOSEK' was the most robust. If not given, fall back to CVXPY's default. :return: tuple[np.array, np.array] of\n - **x_hat** -- estimated (smoothed) x @@ -197,7 +189,7 @@ def jerk(x, dt, params, options=None, gamma=None, solver=solver): return _total_variation_regularized_derivative(x, dt, 3, gamma, solver=solver) -def smooth_acceleration(x, dt, params, options=None, gamma=None, window_size=None, solver=solver): +def smooth_acceleration(x, dt, params, options=None, gamma=None, window_size=None, solver=None): """Use convex optimization (cvxpy) to solve for the acceleration total variation regularized derivative, and then apply a convolutional gaussian smoother to the resulting derivative to smooth out the peaks. The end result is similar to the jerk method, but can be more time-efficient. @@ -209,7 +201,7 @@ def smooth_acceleration(x, dt, params, options=None, gamma=None, window_size=Non :param float gamma: the regularization parameter :param int window_size: window size for gaussian kernel :param str solver: the solver CVXPY should use, 'MOSEK', 'CVXOPT', 'CLARABEL', 'ECOS', etc. - In testing, 'MOSEK' was the most robust. Default is to use CVXPY's default. + In testing, 'MOSEK' was the most robust. If not given, fall back to CVXPY's default. :return: tuple[np.array, np.array] of\n - **x_hat** -- estimated (smoothed) x @@ -236,7 +228,7 @@ def smooth_acceleration(x, dt, params, options=None, gamma=None, window_size=Non return x_hat, dxdt_hat -def jerk_sliding(x, dt, params, options=None, gamma=None, solver=solver): +def jerk_sliding(x, dt, params, options=None, gamma=None, solver=None): """Use convex optimization (cvxpy) to solve for the jerk total variation regularized derivative. :param np.array[float] x: data to differentiate @@ -245,7 +237,7 @@ def jerk_sliding(x, dt, params, options=None, gamma=None, solver=solver): :param dict options: (**deprecated**, prefer :code:`solver`) a dictionary consisting of {'solver': (str)} :param float gamma: the regularization parameter :param str solver: the solver CVXPY should use, 'MOSEK', 'CVXOPT', 'CLARABEL', 'ECOS', etc. - In testing, 'MOSEK' was the most robust. Default is to use CVXPY's default. + In testing, 'MOSEK' was the most robust. If not given, fall back to CVXPY's default. :return: tuple[np.array, np.array] of\n - **x_hat** -- estimated (smoothed) x