Commit 454e9d70 authored by Michael Rudolf's avatar Michael Rudolf
Browse files

Fixed fitting problem and minor format changes.

 - Adressed Issue #1 by adding a new fitting procedure
 - Adjusted format to remove most pylint/pycodestyle warnings
parent 37697236
......@@ -42,7 +42,7 @@ def convert(path, file_in, var):
'.dat': _readdat,
'.tdms': _readtdms}
(root, ext) = os.path.splitext(path+file_in)
(_, ext) = os.path.splitext(path+file_in)
data = file_convert[ext](path, file_in) # data contains basic content
nsamples = int(var['nsamples'])
......@@ -99,8 +99,7 @@ def eval_shearstress(cur_dat, var, review=None):
"""Helper function to automatically pick the points"""
# smoothing function: 1st#: window size, 2nd#: polynomial order
shear_smth = medfilt(cur_dat['shearstress'], int(var['smoothwindow']))
# Processing
velnoise, stressnoise = var['velnoise'], var['stressnoise']
# Searching for velocity changes to define data range
vel_above = np.nonzero(cur_dat['velocity'] > var['velnoise'])
switch = np.nonzero(np.diff(vel_above) > 10)
......@@ -166,7 +165,7 @@ def eval_shearstress(cur_dat, var, review=None):
(sel_dat, sel) = _select_data(cur_dat)
pdata = pick_func[pick](cur_dat['shearstress'][sel_dat])
goodPick = True
except ValueError as e:
except ValueError as _:
pass
ax.plot(cur_dat['displacement'][sel_dat],
......@@ -205,7 +204,7 @@ def eval_shearstress(cur_dat, var, review=None):
try: # Sometimes autodetect finds two peaks at the same spot
i_d2 = np.argmax(
cur_dat['liddispl'][picks['peak'][1]:picks['dynamic'][1]])
except ValueError as e: # Fallback = peak pick
except ValueError as _: # Fallback = peak pick
print('Detected two peaks at same position!')
print('Please do a manual revision!!!')
i_d2 = picks['peak'][1]
......@@ -259,9 +258,9 @@ def eval_shearstress(cur_dat, var, review=None):
nonlocal ALL_OK
ALL_OK = not ALL_OK
base_col_list = ['r', 'y', 'b']
col_list = [base_col_list[i] for i in range(len(picks))]
(peak, dyn, stat, deltad, p_ind) = _evaluate_picks(cur_dat, picks)
# base_col_list = ['r', 'y', 'b']
# col_list = [base_col_list[i] for i in range(len(picks))]
(peak, dyn, stat, _, _) = _evaluate_picks(cur_dat, picks)
# fig.show()
pnt_peak = ax.plot(peak[0], peak[1], 's',
color='r',
......@@ -304,7 +303,7 @@ def eval_shearstress(cur_dat, var, review=None):
def _cb_peak(event):
nonlocal pnt_peak
btn['peak']()
(peak, dyn, stat, deltad, p_ind) = _evaluate_picks(cur_dat, picks)
(peak, _, _, _, _) = _evaluate_picks(cur_dat, picks)
for p in pnt_peak:
p.set_xdata(peak[0])
p.set_ydata(peak[1])
......@@ -312,7 +311,7 @@ def eval_shearstress(cur_dat, var, review=None):
def _cb_dyn(event):
nonlocal pnt_dyn
btn['dyn']()
(peak, dyn, stat, deltad, p_ind) = _evaluate_picks(cur_dat, picks)
(_, dyn, _, _, _) = _evaluate_picks(cur_dat, picks)
for p in pnt_dyn:
p.set_xdata(dyn[0])
p.set_ydata(dyn[1])
......@@ -320,7 +319,7 @@ def eval_shearstress(cur_dat, var, review=None):
def _cb_stat(event):
nonlocal pnt_stat
btn['stat']()
(peak, dyn, stat, deltad, p_ind) = _evaluate_picks(cur_dat, picks)
(_, _, stat, _, _) = _evaluate_picks(cur_dat, picks)
for p in pnt_stat:
p.set_xdata(stat[0])
p.set_ydata(stat[1])
......@@ -377,7 +376,7 @@ def eval_shearstress(cur_dat, var, review=None):
# Further calculations for output
normal = int(np.mean(cur_dat['normalstress'])/var['prec'])*var['prec']
if var['options']['pred_norm']:
normal = checkNormStress(normal, var['pred_norms'])
normal = checkNormStress(normal, var['pred_norms'])
# Calculate relative weakening (after Ritter et al., 2016)
weak = 1-(dyn[1]/peak[1])
weak_p = (peak[1]/dyn[1])-1
......@@ -392,7 +391,7 @@ def rst_analmut(x, y):
calculation of friction coefficients (slope, M) and y-axis intercept
(cohesion, C) by mutual two point linear regressions:
"""
n = len(x)
# n = len(x)
ys = [y[k] for k in range(len(x)) for j in range(len(x)-k)]
xs = [x[k] for k in range(len(x)) for j in range(len(x)-k)]
M = np.array([(y[k+j]-y[k])/(x[k+j]-x[k])
......@@ -411,7 +410,7 @@ def rst_analmut(x, y):
# %%=======================STANDARD LINEAR REGRESSION==========================
def rst_analstd(x, y):
def rst_analstd_old(x, y):
""" Correlation of normal and shear stress: y = slope * x+intercept """
x = np.array(x)
y = np.array(y)
......@@ -429,9 +428,23 @@ def rst_analstd(x, y):
out_var = (slope, std_slope, intercept, std_intercept, y_fit)
return out_var
def rst_analstd(x, y):
"""
Fitting shear stress with linear curve_fitting. Errors estimated using 100
randomized sets of data.
"""
x = np.array(x)
y = np.array(y)
pfit, perr = fit_bootstrap(x, y, fitfric)
y_fit = fitfric(x, *pfit)
return (pfit[0], perr[0], pfit[1], perr[1], y_fit)
def fitfric(x, a, b):
return a*x+b
# %%=====================VELOCITY STEPPING ANALYSIS============================
def vst_analysis(data):
mu_app = data['shearstress']/data['normalstress'] # Apparent friction
......@@ -462,59 +475,44 @@ def vst_analysis(data):
# %%======================BOOTSTRAP LINEAR REGRESSION==========================
def fit_bootstrap(p0, datax, datay, function=None,
def fit_bootstrap(datax, datay, function=None,
yerr_systematic=0.0, nsigma=2, nsets=100):
"""
Does a bootstrap fit of datax and datay with the initial parameters p0.
As a standard the function used is a linear function
Does a bootstrap fit of datax and datay by fitting a number of nsets. Each
set contains normal distributed noise derived from the standard deviation
of the residuals.
You can choose the confidence interval that you want for your
parameter estimates:
1sigma corresponds to 68.3% confidence interval
2sigma corresponds to 95.44% confidence interval
"""
def _poly1(x, a, b):
"""Linear Function"""
return (a*x+b)
if not function:
function = _poly1
# Error function producing residuals
def errfunc(p, x, y):
return function(x, *p) - y
# Fit first time
pfit, perr = leastsq(errfunc,
p0,
args=(datax, datay),
full_output=0)
pfit, _ = curve_fit(function, datax, datay)
# Get the stdev of the residuals
residuals = errfunc(pfit, datax, datay)
sigma_res = np.std(residuals)
sigma_res = nsigma*np.std(residuals)
sigma_err_total = np.sqrt(sigma_res**2 + yerr_systematic**2)
# 100 random data sets are generated and fitted
ps = []
for i in range(nsets):
for _ in range(nsets):
randomDelta = np.random.normal(0., sigma_err_total, len(datay))
randomdataY = datay + randomDelta
randomfit, randomcov = \
leastsq(errfunc, p0, args=(datax, randomdataY),
full_output=0)
randomfit, _ = curve_fit(function, datax, randomdataY)
ps.append(randomfit)
ps = np.array(ps)
mean_pfit = np.mean(ps, 0)
err_pfit = nsigma * np.std(ps, 0)
mean_pfit = np.mean(ps, axis=0)
err_pfit = nsigma * np.std(ps, axis=0)
pfit_bootstrap = mean_pfit
perr_bootstrap = err_pfit
return pfit_bootstrap, perr_bootstrap
return mean_pfit, err_pfit
# %%========================PLOT===============================================
......@@ -587,7 +585,7 @@ def plothist(path, name, strength, data_mut):
# function for plotting histograms:
def histplot(axrow, coef, coh, tit_mu, tit_C):
global histfit_coh, lnspc, statscoef, statscoh
global histfit_coh, lnspc, statscoh
# ==============FRICTION COEFFICIENT========================
axrow[0].hist(coef[~np.isnan(coef)],
bins=nbins,
......@@ -687,7 +685,7 @@ def plotts(path, name, exp_data, normal_stress):
label=cur_norm)
# Extract labels, sort them and only display once per repetition
handles, labels = ax3.get_legend_handles_labels()
labels_num = [int(float(l)) for l in labels]
labels_num = [int(float(lbl)) for lbl in labels]
sort_ind = np.argsort(labels_num)
handles = np.take_along_axis(np.array(handles), sort_ind, axis=0)
labels = np.take_along_axis(np.array(labels), sort_ind, axis=0)
......@@ -713,6 +711,7 @@ def plotts(path, name, exp_data, normal_stress):
edgecolor='w')
plt.close()
# %%=================PLOT VST ANALYSIS=========================================
def plotVST(path, data, pfit, perr, name_fit):
""" Plots VST analysis data """
......@@ -741,20 +740,30 @@ def plotVST(path, data, pfit, perr, name_fit):
# Displacement vs. shear velocity:
ax2.plot(displ, vel, 'r', lw=1)
ax2.set_xlabel('Shear displacement $d$ [mm]')
ax2.set_ylabel('Shear velocity $v$ [$\mathregular{mm s^{-1}}$]', color='r')
ax2.set_ylabel(
r'Shear velocity $v$ [$\mathregular{mm s^{-1}}$]',
color='r'
)
ax2.set_yscale('log')
ax2.spines['right'].set_color('r')
ax2.spines['left'].set_color('royalblue')
ax2.tick_params('y', which='both', colors='r')
ax2.set_xlim(0, np.nanmax(displ)+ np.nanmax(displ)/100)
ax2.set_xlim(0, np.nanmax(displ) + np.nanmax(displ)/100)
# shear velocity vs. fricton:
ax3.plot(vel, fric, 'k.', ms=2)
ax3.plot(vel, fitfric(np.log10(vel), *pfit), 'g-', label='curve fit: '+ name_fit)
ax3.plot(
vel, fitfric(np.log10(vel), *pfit),
'g-',
label='curve fit: ' + name_fit
)
ax3.set_xscale('log')
ax3.set_xlabel('Shear velocity $v$ [$\mathregular{mm s^{-1}}$]')
ax3.set_xlabel(r'Shear velocity $v$ [$\mathregular{mm s^{-1}}$]')
ax3.set_ylabel('Friction')
ax3.set_xlim(np.nanmin(vel)-(np.nanmin(vel)/5), np.nanmax(vel)+np.nanmax(vel)/5)
ax3.set_xlim(
np.nanmin(vel)-(np.nanmin(vel)/5),
np.nanmax(vel)+np.nanmax(vel)/5
)
ax3.legend(fontsize=8,
facecolor='w',
edgecolor='k',
......@@ -793,7 +802,6 @@ def saveTS(path, name, exp_data, normal_stress):
ts_data = np.vstack((ts_data, next_str))
ts_data = ts_data.T
# create file and save it
with open(path+name+'_ts.txt', 'w+') as f:
csvout = csv.writer(f,
......@@ -858,7 +866,7 @@ def saveFric(path, name, fricmut, fricstd):
'\t' + \
str(np.around(fricstd[2][3], 4)) + \
'\n'
write_f = f.write(f_string)
f.write(f_string)
f.closed
with open(path+name+'_fricmut.txt', 'w') as f:
f.write(header+'\n')
......@@ -892,7 +900,7 @@ def saveFric(path, name, fricmut, fricstd):
str(np.around(fricmut[2][2], 4)) + \
'\t' + \
str(np.around(fricmut[2][3], 4))+'\n'
write_f = f.write(f_string)
f.write(f_string)
f.closed
......@@ -951,11 +959,6 @@ def dilation(path, name, lid_pos, exp_data, prop_file, cfg):
marker = ['o', '^', 's', 'h']
label2 = ['Peak', 'Dynamic', 'Reactivation', 'Initial']
try:
compact_density = [float(d[3]) for d in props['data']]
except expression as identifier:
pass
fig, (ax1, ax2) = plt.subplots(ncols=2,
sharey=True,
figsize=(10, 5))
......@@ -978,7 +981,7 @@ def dilation(path, name, lid_pos, exp_data, prop_file, cfg):
ax2.plot(tden, density,
marker=marker[j],
color=linecolor[j])
all_dens = np.concatenate((densities, tardens))
# all_dens = np.concatenate((densities, tardens))
fig.legend()
ax1.set_ylabel('Density during shearing (kg/m$^3$)')
ax1.set_xlabel('Normal stress (Pa)')
......
......@@ -3,7 +3,7 @@
# @Author: M. Rudolf, M. Warsitzka
# @Date: 2019-02-20 12:00:00
# @Last Modified by: M. Rudolf
# @Last Modified time: 2020-09-23 13:20:58
# @Last Modified time: 2020-09-24 14:54:27
"""
RST_pick_GUI.py
......@@ -250,12 +250,12 @@ class RST_pick_GUI(tk.Tk):
except ValueError as _:
Vars[item] = json.loads(self.params[item].get())
self.cfg['parameters'][item] = self.params[item].get()
self.cfg ['options']['plot_ts'] = str(self.plot_ts.get())
self.cfg ['options']['save_ts'] = str(self.save_ts.get())
self.cfg ['options']['rst'] = str(self.rst.get())
self.cfg ['options']['rev_pick'] = str(self.rev_pick.get())
self.cfg ['options']['is_VST'] = str(self.is_VST.get())
self.cfg ['options']['pred_norm'] = str(self.pred_norm.get())
self.cfg['options']['plot_ts'] = str(self.plot_ts.get())
self.cfg['options']['save_ts'] = str(self.save_ts.get())
self.cfg['options']['rst'] = str(self.rst.get())
self.cfg['options']['rev_pick'] = str(self.rev_pick.get())
self.cfg['options']['is_VST'] = str(self.is_VST.get())
self.cfg['options']['pred_norm'] = str(self.pred_norm.get())
Vars['options'] = dict()
for opt in self.cfg['options'].keys():
Vars['options'][opt] = self.cfg['options'][opt]
......@@ -290,20 +290,24 @@ class RST_pick_GUI(tk.Tk):
for cur_dat in exp_data
]
else:
eval_data = [rfnc.eval_shearstress(cur_dat, Vars)
for cur_dat in exp_data]
eval_data = [
rfnc.eval_shearstress(cur_dat, Vars)
for cur_dat in exp_data
]
print('Automatic analysis successful')
else:
print('Automatic analysis skipped, initiating manual picking')
eval_data = [rfnc.eval_shearstress(cur_dat, Vars, review='manual')
for cur_dat in exp_data]
eval_data = [
rfnc.eval_shearstress(cur_dat, Vars, review='manual')
for cur_dat in exp_data
]
# Create lists of parameters for analysis
normal_stress = [ev[0] for ev in eval_data]
peak_stress = [ev[1][1] for ev in eval_data]
dyn_stress = [ev[2][1] for ev in eval_data]
stat_stress = [ev[3][1] for ev in eval_data]
dilation = [ev[4] for ev in eval_data]
# dilation = [ev[4] for ev in eval_data]
weak = [ev[5] for ev in eval_data]
weak_p = [ev[6] for ev in eval_data]
p_ind = [ev[7] for ev in eval_data]
......@@ -335,7 +339,7 @@ class RST_pick_GUI(tk.Tk):
data_mut = (peakdata_mut, dyndata_mut, reactdata_mut)
fric_std = (peakfric_std, dynfric_std, reactfric_std)
# ===================PLOT DATA======================================
# ===================PLOT DATA=====================================
rfnc.plotstd(path_out, projectname, strength, fric_std)
rfnc.plothist(path_out, projectname, strength, data_mut)
print('>>> Friction data plotted')
......@@ -344,18 +348,31 @@ class RST_pick_GUI(tk.Tk):
rfnc.plotts(path_out, projectname, exp_data, normal_stress)
print('>>> Time series data plotted')
# ====================Save DATA=====================================
# ====================Save DATA===================================
if self.rst.get() == 1:
rfnc.saveStrength(path_out, projectname, strength)
rfnc.saveFric(path_out, projectname, fric_mut, fric_std)
lid_pos = rfnc.savelidpos(path_out, projectname, p_ind, exp_data)
lid_pos = rfnc.savelidpos(
path_out,
projectname,
p_ind,
exp_data
)
print('>>> Friction data saved')
prop_file = [path_in+f for f in os.listdir(path_in)
if f.endswith('csv')]
prop_file = [
path_in + f
for f in os.listdir(path_in)
if f.endswith('csv')
]
if prop_file:
print('>>> Prop file detected! Starting dilation analysis...')
rfnc.dilation(path_out, projectname, lid_pos, exp_data,
prop_file[0], self.cfg)
print(
'>>> Prop file detected! Starting dilation analysis...'
)
rfnc.dilation(
path_out, projectname,
lid_pos, exp_data,
prop_file[0], self.cfg
)
else:
print('>>> There is no prop file! Going on as usual.')
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment