Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 14 additions & 21 deletions examples/1_basic_tutorial.ipynb

Large diffs are not rendered by default.

31 changes: 11 additions & 20 deletions examples/2a_optimizing_parameters_with_dxdt_known.ipynb

Large diffs are not rendered by default.

35 changes: 14 additions & 21 deletions examples/2b_optimizing_parameters_with_dxdt_unknown.ipynb

Large diffs are not rendered by default.

123 changes: 18 additions & 105 deletions pynumdiff/linear_model/_linear_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
import numpy as np
from warnings import warn

from pynumdiff import smooth_finite_difference
from pynumdiff.finite_difference import first_order as finite_difference
from pynumdiff.utils import utility

Expand Down Expand Up @@ -111,90 +110,6 @@ def _polydiff(x, dt, poly_order, weights=None):
return utility.slide_function(_polydiff, x, dt, kernel, poly_order, stride=step_size, pass_weights=True)


#############
# Chebychev #
# Removed - Not Useful and requires old package
#############
# def __chebydiff__(x, dt, params, options=None):
# """
# Fit the timeseries with chebyshev polynomials, and differentiate this model.

# :param x: (np.array of floats, 1xN) data to differentiate
# :param dt: (float) time step
# :param params: (list) [N] : (int) order of the polynomial
# :param options:
# :return: x_hat : estimated (smoothed) x
# dxdt_hat : estimated derivative of x
# """

# if isinstance(params, list):
# n = params[0]
# else:
# n = params

# mean = np.mean(x)
# x = x - mean

# def f(y):
# t = np.linspace(-1, 1, len(x))
# return np.interp(y, t, x)

# # Chebychev polynomial
# poly = pychebfun.chebfun(f, N=n, domain=[-1, 1])
# ts = np.linspace(poly.domain()[0], poly.domain()[-1], len(x))

# x_hat = poly(ts) + mean
# dxdt_hat = poly.differentiate()(ts)*(2/len(x))/dt

# return x_hat, dxdt_hat

# def chebydiff(x, dt, params, options=None):
# """
# Slide a smoothing derivative function across a times eries with specified window size.

# :param x: data to differentiate
# :type x: np.array (float)

# :param dt: step size
# :type dt: float

# :param params: a list of 2 elements:

# - N: order of the polynomial
# - window_size: size of the sliding window (ignored if not sliding)

# :type params: list (int)

# :param options: a dictionary consisting of 3 key value pairs:

# - 'sliding': whether to use sliding approach
# - 'step_size': step size for sliding
# - 'kernel_name': kernel to use for weighting and smoothing windows ('gaussian' or 'friedrichs')

# :type options: dict {'sliding': (bool), 'step_size': (int), 'kernel_name': (string)}, optional

# :return: a tuple consisting of:

# - x_hat: estimated (smoothed) x
# - dxdt_hat: estimated derivative of x

# :rtype: tuple -> (np.array, np.array)
# """

# if options is None:
# options = {'sliding': True, 'step_size': 1, 'kernel_name': 'friedrichs'}

# if 'sliding' in options.keys() and options['sliding']:
# window_size = copy.copy(params[-1])
# if window_size < params[0]*2:
# window_size = params[0]*2+1
# params[1] = window_size
# return __slide_function__(__chebydiff__, x, dt, params, window_size,
# options['step_size'], options['kernel_name'])

# return __chebydiff__(x, dt, params)


###############
# Linear diff #
###############
Expand Down Expand Up @@ -345,10 +260,11 @@ def spectraldiff(x, dt, params=None, options=None, high_freq_cutoff=None, even_e
:param list[float] or float params: (**deprecated**, prefer :code:`high_freq_cutoff`)
:param dict options: (**deprecated**, prefer :code:`even_extension`
and :code:`pad_to_zero_dxdt`) a dictionary consisting of {'even_extension': (bool), 'pad_to_zero_dxdt': (bool)}
:param float high_freq_cutoff: The high frequency cutoff. Frequencies below this threshold will be kept, and above will be zeroed.
:param float high_freq_cutoff: The high frequency cutoff as a multiple of the Nyquist frequency: Should be between 0
and 0.5. Frequencies below this threshold will be kept, and above will be zeroed.
:param bool even_extension: if True, extend the data with an even extension so signal starts and ends at the same value.
:param bool pad_to_zero_dxdt: if True, extend the data with extensions that smoothly force the derivative to zero. This
allows the spectral derivative to fit data which does not start and end with derivatives equal to zero.
:param bool pad_to_zero_dxdt: if True, extend the data with extra regions that smoothly force the derivative to
zero before taking FFT.

:return: tuple[np.array, np.array] of\n
- **x_hat** -- estimated (smoothed) x
Expand All @@ -364,16 +280,17 @@ def spectraldiff(x, dt, params=None, options=None, high_freq_cutoff=None, even_e
elif high_freq_cutoff == None:
raise ValueError("`high_freq_cutoff` must be given.")

original_L = len(x)
L = len(x)

# make derivative go to zero at ends (optional)
if pad_to_zero_dxdt:
padding = 100
pre = x[0]*np.ones(padding)
pre = x[0]*np.ones(padding) # extend the edges
post = x[-1]*np.ones(padding)
x = np.hstack((pre, x, post))
x_hat, _ = smooth_finite_difference.meandiff(x, dt, window_size=int(padding/2))
x_hat[padding:-padding] = x[padding:-padding]
kernel = utility.mean_kernel(padding//2)
x_hat = utility.convolutional_smoother(x, kernel) # smooth the edges in
x_hat[padding:-padding] = x[padding:-padding] # replace middle with original signal
x = x_hat
else:
padding = 0
Expand All @@ -383,28 +300,24 @@ def spectraldiff(x, dt, params=None, options=None, high_freq_cutoff=None, even_e
x = np.hstack((x, x[::-1]))

# If odd, make N even, and pad x
L = len(x)
if L % 2 != 0:
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This adding an entry stuff isn't necessary, because you can just zero the Nyquist term in your k array if N is even, and you'll have no Nyquist term if N is odd. I form k as I do in spectral-derivatives now.

N = L + 1
x = np.hstack((x, x[-1] + dt*(x[-1]-x[-1])))
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Also, this x[-1] + dt*(x[-1]-x[-1]) = x[-1]. It was probably meant to be x[-1] + dt*(x[-1]-x[-2]).

else:
N = L
N = len(x)

# Define the frequency range.
k = np.asarray(list(range(0, int(N/2))) + [0] + list(range(int(-N/2) + 1,0)))
k = k*2*np.pi/(dt*N)
k = np.concatenate((np.arange(N//2 + 1), np.arange(-N//2 + 1, 0)))
if N % 2 == 0: k[N//2] = 0 # odd derivatives get the Nyquist element zeroed out
omega = k*2*np.pi/(dt*N) # turn wavenumbers into frequencies in radians/s

# Frequency based smoothing: remove signals with a frequency higher than high_freq_cutoff
discrete_high_freq_cutoff = int(high_freq_cutoff*N)
k[discrete_high_freq_cutoff:N-discrete_high_freq_cutoff] = 0
discrete_cutoff = int(high_freq_cutoff*N)
omega[discrete_cutoff:N-discrete_cutoff] = 0

# Derivative = 90 deg phase shift
dxdt_hat = np.real(np.fft.ifft(1.0j * k * np.fft.fft(x)))
dxdt_hat = dxdt_hat[padding:original_L+padding]
dxdt_hat = np.real(np.fft.ifft(1.0j * omega * np.fft.fft(x)))
dxdt_hat = dxdt_hat[padding:L+padding]

# Integrate to get x_hat
x_hat = utility.integrate_dxdt_hat(dxdt_hat, dt)
x0 = utility.estimate_integration_constant(x[padding:original_L+padding], x_hat)
x0 = utility.estimate_integration_constant(x[padding:L+padding], x_hat)
x_hat = x_hat + x0

return x_hat, dxdt_hat
9 changes: 2 additions & 7 deletions pynumdiff/optimize/_optimize.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,8 @@

# Map from method -> (search_space, bounds_low_hi)
method_params_and_bounds = {
spectraldiff: ({'even_extension': True,
'pad_to_zero_dxdt': True,
spectraldiff: ({'even_extension': [True, False],
'pad_to_zero_dxdt': [True, False],
'high_freq_cutoff': [1e-3, 5e-2, 1e-2, 5e-2, 1e-1]},
{'high_freq_cutoff': (1e-5, 1-1e-5)}),
polydiff: ({'step_size': [1, 2, 5],
Expand All @@ -33,11 +33,6 @@
{'poly_order': (1, 12),
'window_size': (3, 1000),
'smoothing_win': (3, 1000)}),
#chebydiff ({order: [3, 5, 7, 9],
# window_size: [10, 30, 50, 90, 130],
# kernel: 'friedrichs'},
# {order: (1, 10),
# window_size: (10, 1000)})
lineardiff: ({'kernel': 'gaussian',
'order': 3,
'gamma': [1e-1, 1, 10, 100],
Expand Down
10 changes: 2 additions & 8 deletions pynumdiff/tests/test_optimize.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,17 +101,11 @@ def test_savgoldiff():
def test_spectraldiff():
params1, val1 = optimize(spectraldiff, x, dt, dxdt_truth=dxdt_truth, padding='auto')
params2, val2 = optimize(spectraldiff, x, dt, tvgamma=tvgamma, padding='auto')
np.testing.assert_almost_equal(params1['high_freq_cutoff'], 0.0913, decimal=3)
np.testing.assert_almost_equal(params2['high_freq_cutoff'], 0.21, decimal=2)
np.testing.assert_almost_equal(params1['high_freq_cutoff'], 0.105, decimal=3)
np.testing.assert_almost_equal(params2['high_freq_cutoff'], 0.10, decimal=2)

def test_polydiff():
params1, val1 = optimize(polydiff, x, dt, search_space={'step_size':1}, dxdt_truth=dxdt_truth, padding='auto')
params2, val2 = optimize(polydiff, x, dt, search_space={'step_size':1}, tvgamma=tvgamma, dxdt_truth=None, padding='auto')
assert (params1['poly_order'], params1['window_size']) == (6, 50)
assert (params2['poly_order'], params2['window_size']) == (4, 10)

# def test_chebydiff(self):
# params1, val1 = optimize(chebydiff, x, dt, dxdt_truth=dxdt_truth)
# params2, val2 = optimize(chebydiff, x, dt, tvgamma=tvgamma, dxdt_truth=None)
# assert params1 == [9, 108]
# assert params2 == [9, 94]