Commit 771a57a0 authored by Michael Rudolf's avatar Michael Rudolf
Browse files

Small updates and fixes:

 - Statistical calculations now use distribution fitting.
 - Error propagation with the `uncertainties` package.
 - Downsampling is now obeying the options.
 - No downsampling for VST tests
 - Added a batch script for multiple folders and some other tests.
 - Updated Setup instructions.
parent 13288ae3
......@@ -13,13 +13,13 @@ __DATE__: 20-Feb-2019
'''
import os
import time
import h5py
import matplotlib as mpl
import nptdms
import numpy as np
import os
import time
from matplotlib import pyplot as plt
from scipy import optimize
......@@ -99,7 +99,7 @@ def picker_plot(exp):
def fit_bootstrap(p0, datax, datay, function, yerr_systematic=0.0):
errfunc = lambda p, x, y: function(x, *p) - y
def errfunc(p, x, y): return function(x, *p) - y
# Fit first time
pfit, perr = optimize.leastsq(errfunc,
......@@ -115,7 +115,7 @@ def fit_bootstrap(p0, datax, datay, function, yerr_systematic=0.0):
# 100 random data sets are generated and fitted
ps = []
for i in range(100):
for i in range(200):
randomDelta = np.random.normal(0., sigma_err_total, len(datay))
randomdataY = datay + randomDelta
......
......@@ -9,11 +9,13 @@ data of this data publication.
# %%=============IMPORT=====================================================
import os
from os.path import splitext
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from os.path import splitext
# %%==============PARAMETERS=================================================
A = 0.022619 # area of shear zone (= surface area of lid) (m^2)
li = 0.0776 # inner lever (center cell to center of shear zone)
......
......@@ -21,11 +21,11 @@ AppUpdatesURL={#MyAppURL}
DefaultDirName={autopf}\{#MyAppName}
DefaultGroupName={#MyAppName}
AllowNoIcons=yes
LicenseFile=D:\nextcloud\GitRepos\rst-evaluation\LICENSE.txt
LicenseFile=C:\Users\mrudolf\Nextcloud\GitRepos\rst-evaluation\LICENSE
; Uncomment the following line to run in non administrative install mode (install for current user only.)
;PrivilegesRequired=lowest
PrivilegesRequiredOverridesAllowed=dialog
OutputDir=D:\nextcloud\GitRepos\rst-evaluation\installer
OutputDir=C:\Users\mrudolf\Nextcloud\GitRepos\rst-evaluation\installer
OutputBaseFilename=setup_RST_pick_GUI
Compression=lzma
SolidCompression=yes
......@@ -61,8 +61,8 @@ Name: "ukrainian"; MessagesFile: "compiler:Languages\Ukrainian.isl"
Name: "desktopicon"; Description: "{cm:CreateDesktopIcon}"; GroupDescription: "{cm:AdditionalIcons}"; Flags: unchecked
[Files]
Source: "D:\nextcloud\GitRepos\rst-evaluation\dist\RST_pick_GUI\{#MyAppExeName}"; DestDir: "{app}"; Flags: ignoreversion
Source: "D:\nextcloud\GitRepos\rst-evaluation\dist\RST_pick_GUI\*"; DestDir: "{app}"; Flags: ignoreversion recursesubdirs createallsubdirs
Source: "C:\Users\mrudolf\Nextcloud\GitRepos\rst-evaluation\dist\RST_pick_GUI\{#MyAppExeName}"; DestDir: "{app}"; Flags: ignoreversion
Source: "C:\Users\mrudolf\Nextcloud\GitRepos\rst-evaluation\dist\RST_pick_GUI\*"; DestDir: "{app}"; Flags: ignoreversion recursesubdirs createallsubdirs
; NOTE: Don't use "Flags: ignoreversion" on any shared system files
[Icons]
......@@ -72,4 +72,3 @@ Name: "{autodesktop}\{#MyAppName}"; Filename: "{app}\{#MyAppExeName}"; Tasks: de
[Run]
Filename: "{app}\{#MyAppExeName}"; Description: "{cm:LaunchProgram,{#StringChange(MyAppName, '&', '&&')}}"; Flags: nowait postinstall skipifsilent
......@@ -2,10 +2,10 @@
:: for windows.
@ECHO OFF
:: Compute requirements.txt
pipreqs --no-pin --force --print RSTpicking >> requirements.txt
pipreqs --no-pin --print FileConversion >> requirements.txt
pipreqs --no-pin --print ManualPicking >> requirements.txt
pipreqs --no-pin --print RSTAnalysis >> requirements.txt
pipreqs --no-pin --force --print rstevaluation >> requirements.txt
@REM pipreqs --no-pin --print FileConversion >> requirements.txt
@REM pipreqs --no-pin --print ManualPicking >> requirements.txt
@REM pipreqs --no-pin --print RSTAnalysis >> requirements.txt
:: Removes duplicates in requirements.txt
setlocal disableDelayedExpansion
......@@ -35,6 +35,6 @@ del "%sorted%"
:: Create executable
python -m pip install pyinstaller
pyinstaller --clean -y --icon "D:\nextcloud\GitRepos\rst-evaluation\RSTpicking\images\rst-evaluation_x256.ico" --add-data "D:\nextcloud\GitRepos\rst-evaluation\RSTpicking\images\rst-evaluation_x256.ico;images" "RSTpicking\RST_pick_GUI.py"
pyinstaller --clean -y --icon "rstevaluation\images\rst-evaluation_x256.ico" --add-data "rstevaluation\images\rst-evaluation_x256.ico;images" "rstevaluation\rstevaluation\RST_pick_GUI.py"
:: Create installer (requires InnoSetup on $PATH)
iscc "RST_pick_GUI.iss"
""" Advanced data analysis functions for ringshear tester data """
import numpy as np
import scipy.optimize as spopt
import scipy.stats as stats
from uncertainties.core import ufloat
# =======================MUTUAL LINEAR REGRESSION==============================
......@@ -16,17 +18,18 @@ def rst_analmut(x, y):
[
(y[k+j]-y[k])/(x[k+j]-x[k])
for k in range(len(x)) for j in range(len(x)-k)
if not (x[k+j]-x[k]) == 0
]
)
C = np.array([(y-m*x) for y, m, x in zip(ys, M, xs)])
M = M[np.nonzero(np.isfinite(M))]
C = C[np.nonzero(np.isfinite(C))]
M_avg = np.mean(M)
M_std = np.std(M)
C_avg = np.mean(C)
C_std = np.std(C)
fric_mut = (M_avg, M_std, C_avg, C_std)
# M = M[np.nonzero(np.isfinite(M))]
# C = C[np.nonzero(np.isfinite(C))]
statsM = stats.norm.fit([d.n for d in M])
statsC = stats.norm.fit([d.n for d in C])
uncM = ufloat(statsM[0], 2*statsM[1])
uncC = ufloat(statsC[0], 2*statsC[1])
fric_mut = (uncM, uncC)
data_mut = (M, C)
return fric_mut, data_mut
......@@ -57,8 +60,17 @@ def rst_analstd(x, y):
using 100 randomized sets of data.
"""
x = np.array(x)
y = np.array(y)
pfit, perr = fit_bootstrap(x, y, fitfric)
ynv = np.array([d.nominal_value for d in y])
yerr = np.array([d.std_dev for d in y])
# pfit, perr = fit_bootstrap(x, ynv, fitfric, yerr_systematic=10,
# yerr=yerr)
pfit, pcov = spopt.curve_fit(
fitfric,
x, ynv,
sigma=yerr
)
perr = np.sqrt(np.diag(pcov))
y_fit = fitfric(x, *pfit)
return (pfit[0], perr[0], pfit[1], perr[1], y_fit)
......@@ -98,7 +110,8 @@ def vst_analysis(data):
# %%======================BOOTSTRAP LINEAR REGRESSION==========================
def fit_bootstrap(datax, datay, function=None,
yerr_systematic=0.0, nsigma=2, nsets=100):
yerr_systematic=0.0, nsigma=2, nsets=100,
yerr=None):
"""
Does a bootstrap fit of datax and datay by fitting a number of nsets. Each
set contains normal distributed noise derived from the standard deviation
......@@ -115,7 +128,7 @@ def fit_bootstrap(datax, datay, function=None,
return function(x, *p) - y
# Fit first time
pfit, _ = spopt.curve_fit(function, datax, datay)
pfit, _ = spopt.curve_fit(function, datax, datay, sigma=yerr)
# Get the stdev of the residuals
residuals = errfunc(pfit, datax, datay)
......@@ -128,7 +141,8 @@ def fit_bootstrap(datax, datay, function=None,
for _ in range(nsets):
randomDelta = np.random.normal(0., sigma_err_total, len(datay))
randomdataY = datay + randomDelta
randomfit, _ = spopt.curve_fit(function, datax, randomdataY)
randomfit, _ = spopt.curve_fit(
function, datax, randomdataY, sigma=yerr)
ps.append(randomfit)
ps = np.array(ps)
mean_pfit = np.mean(ps, axis=0)
......
......@@ -11,6 +11,7 @@ import shutil
import nptdms
import numpy as np
import uncertainties as unc
from tqdm import tqdm
from . import data as rstdat
......@@ -41,14 +42,18 @@ def convert(path, file_in, config):
(data['shearstress'] * (var['li']*var['A'])) / var['lo']
)
nsamples = var['nsamples']
if len(data['time']) > nsamples:
if (
len(data['time']) > nsamples and
config.getboolean('parameters', 'downsample_filter') and
not config.getboolean('options', 'is_VST')
):
freq = 1/data['time'][1]
R = int(freq / 5)
data['velocity'] = rstdat.downsample_data(data['velocity'], R)
data['shearforce'] = rstdat.downsample_data(data['shearforce'], R)
data['normalforce'] = rstdat.downsample_data(data['normalforce'], R)
data['time'] = np.linspace(0, data['time'][-1], len(data['velocity']))
print('Data to large... Downsampled to 5 Hz.')
print('Data too large... Downsampled to 5 Hz.')
# Corrections
data['time'] = data['time']-data['time'][0]
......@@ -278,15 +283,12 @@ def saveStrength(path, name, strength):
""" Saves the picked datapoints. """
label = ['_peak', '_dynamic', '_reactivation']
for i in range(0, 3):
Stren = np.vstack((np.round(strength[0], 2),
np.round(strength[i+1], 2)))
Stren = np.vstack((strength[0], strength[i+1]))
header_stress = 'Normal stress [Pa]'+'\t'+'Shear strength [Pa]'
with open(path+name+label[i]+'.txt', "w") as f:
with open(os.path.join(path, name+label[i]+'.txt'), "w") as f:
f.write(header_stress+'\n')
for row in Stren.T:
csv.writer(f,
delimiter='\t',
lineterminator='\n').writerow(row)
for strdat in Stren.T:
f.write('{:}\t{:fP}'.format(strdat[0], strdat[1]))
f.closed
......@@ -301,70 +303,56 @@ def saveFric(path, name, fricmut, fricstd):
'Cohesion [Pa]' + \
'\t' + \
'Std deviation (Coh.) [Pa]'
with open(path+name+'_fricstd.txt', 'w') as f:
with open(os.path.join(path, name+'_fricstd.txt'), 'w') as f:
f.write(header+'\n')
f_string = ''
f_string += 'Peak friction:' + \
'\t' + \
str(np.around(fricstd[0][0], 4)) + \
'\t'+str(np.around(fricstd[0][1], 4)) + \
'\t'+str(np.around(fricstd[0][2], 4)) + \
'\t'+str(np.around(fricstd[0][3], 4)) + \
mferr = unc.ufloat(fricstd[0][0], fricstd[0][1])
cferr = unc.ufloat(fricstd[0][2], fricstd[0][3])
f_string += (
'Peak friction:' +
'\t{:fP}'.format(mferr) +
'\t{:fP}'.format(cferr) +
'\n'
f_string += 'Dynamic friction:' + \
'\t' + \
str(np.around(fricstd[1][0], 4)) + \
'\t' + \
str(np.around(fricstd[1][1], 4)) + \
'\t' + \
str(np.around(fricstd[1][2], 4)) + \
'\t' + \
str(np.around(fricstd[1][3], 4)) + \
)
mferr = unc.ufloat(fricstd[1][0], fricstd[1][1])
cferr = unc.ufloat(fricstd[1][2], fricstd[1][3])
f_string += (
'Dynamic friction:' +
'\t{:fP}'.format(mferr) +
'\t{:fP}'.format(cferr) +
'\n'
f_string += 'Reactivation friction:' + \
'\t' + \
str(np.around(fricstd[2][0], 4)) + \
'\t' + \
str(np.around(fricstd[2][1], 4)) + \
'\t' + \
str(np.around(fricstd[2][2], 4)) + \
'\t' + \
str(np.around(fricstd[2][3], 4)) + \
)
mferr = unc.ufloat(fricstd[2][0], fricstd[2][1])
cferr = unc.ufloat(fricstd[2][2], fricstd[2][3])
f_string += (
'Reactivation friction:' +
'\t{:fP}'.format(mferr) +
'\t{:fP}'.format(cferr) +
'\n'
)
f.write(f_string)
f.closed
with open(path+name+'_fricmut.txt', 'w') as f:
with open(os.path.join(path, name+'_fricmut.txt'), 'w') as f:
f.write(header+'\n')
f_string = ''
f_string += 'Peak friction:' + \
'\t' + \
str(np.around(fricmut[0][0], 4)) + \
'\t' + \
str(np.around(fricmut[0][1], 4)) + \
'\t' + \
str(np.around(fricmut[0][2], 4)) + \
'\t' + \
str(np.around(fricmut[0][3], 4)) + \
f_string += (
'Peak friction:' +
'\t{:fP}'.format(fricmut[0][0]) +
'\t{:fP}'.format(fricmut[0][1]) +
'\n'
f_string += 'Dynamic friction:' + \
'\t' + \
str(np.around(fricmut[1][0], 4)) + \
'\t' + \
str(np.around(fricmut[1][1], 4)) + \
'\t' + \
str(np.around(fricmut[1][2], 4)) + \
'\t' + \
str(np.around(fricmut[1][3], 4)) + \
)
f_string += (
'Dynamic friction:' +
'\t{:fP}'.format(fricmut[1][0]) +
'\t{:fP}'.format(fricmut[1][1]) +
'\n'
f_string += 'Reactivation friction:' + \
'\t' + \
str(np.around(fricmut[2][0], 4)) + \
'\t' + \
str(np.around(fricmut[2][1], 4)) + \
'\t' + \
str(np.around(fricmut[2][2], 4)) + \
'\t' + \
str(np.around(fricmut[2][3], 4))+'\n'
)
f_string += (
'Reactivation friction:' +
'\t{:fP}'.format(fricmut[2][0]) +
'\t{:fP}'.format(fricmut[2][1]) +
'\n'
)
f.write(f_string)
f.closed
......@@ -387,7 +375,7 @@ def savelidpos(path, name, p_ind, exp_data):
rows.sort(key=operator.itemgetter(0))
with open(path+name+'_lidpos.txt', 'w+') as f:
with open(os.path.join(path, name+'_lidpos.txt'), 'w+') as f:
w = csv.writer(
f,
delimiter=',')
......@@ -416,7 +404,11 @@ def check_tdms(file_list, config):
if cur_len > max_len:
max_len = cur_len
if max_len > config.getint('parameters', 'nsamples'):
if (
max_len > config.getint('parameters', 'nsamples') and
config.getboolean('parameters', 'downsample_filter') and
not config.getboolean('options', 'is_VST')
):
print(
'Converting folder. Raw data files will be moved to raw_data.'
)
......@@ -431,3 +423,7 @@ def check_tdms(file_list, config):
shutil.move(file_path, new_path)
new_file_path = rstdat.downsample_file(new_path)
shutil.move(new_file_path, file_path)
def main():
join
......@@ -405,12 +405,12 @@ class RST_pick_GUI(tk.Tk):
'li': 0.0776, # inner lever (center cell-center shear zone)
'lo': 0.1250, # outer lever (center cell-hinge point)
'vel': 30, # shear velocity (mm/min)
'prec': 500, # precision for normal stress in filenames
'prec': 10, # precision for normal stress in filenames
'velnoise': 1e-2,
'stressnoise': 0.025,
'cellweight': 2185.3, # weight of shear cell
'smoothwindow': 5, # Smoothing Factor for plots
'pred_norms': [500, 1000, 2000, 4000, 8000, 16000, 20000],
'pred_norms': [500, 1000, 2000, 4000, 8000, 16000],
'nsamples': 10000, # Maximum number of samples to use
'downsample_filter': 0, # Use downsampling filter or not.
}
......
......@@ -9,7 +9,9 @@ import sys
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import uncertainties as unc
from scipy.signal import medfilt
from uncertainties import unumpy as unp
# %%==================EVALUATION SHEAR CURVE DATA==============================
......@@ -28,7 +30,6 @@ def eval_shearstress(cur_dat, config, review=None):
'manual':
skips automatic picking and opens a manual picking dialog
"""
global picks
print('Picking %s' % cur_dat['name'])
if not review:
......@@ -67,7 +68,6 @@ def eval_shearstress(cur_dat, config, review=None):
# Calculate relative weakening (after Ritter et al., 2016)
weak = 1-(dyn[1]/peak[1])
weak_p = (peak[1]/dyn[1])-1
return (normal, peak, dyn, stat, deltad, weak, weak_p, p_ind)
......@@ -78,6 +78,15 @@ def _auto_pick(cur_dat, config):
cur_dat['shearstress'],
config.getint('parameters', 'smoothwindow')
)
shear_smth = unp.uarray(
shear_smth,
np.full_like(
shear_smth,
# estimated accuracy of machine (+-10 Pa)
# and error of sensor (0.5%)
np.sqrt(10**2 + (shear_smth*0.005)**2)
)
)
# Searching for velocity changes to define data range
vel_above = np.nonzero(
......@@ -145,22 +154,30 @@ def _manual_pick(cur_dat, config,
try:
(sel_dat, sel) = _select_data(cur_dat)
pdata = pick_func[pick](cur_dat['shearstress'][sel_dat])
picked_val = unc.ufloat(
pdata, np.sqrt(10**2+pdata*.005**2)
)
goodPick = True
except ValueError:
pass
ax.plot(cur_dat['displacement'][sel_dat],
ax.plot(
cur_dat['displacement'][sel_dat],
cur_dat['shearstress'][sel_dat],
color=pick_col[pick])
color=pick_col[pick]
)
plt.pause(0.001)
ax.hlines(pdata,
ax.hlines(
picked_val.nominal_value,
np.min(cur_dat['displacement']),
np.max(cur_dat['displacement']),
color=pick_col[pick])
picks[pick] = [pdata, sel]
color=pick_col[pick]
)
picks[pick] = [picked_val, sel]
plt.pause(0.001)
plt.pause(1)
plt.close()
picks['allauto'] = False
return picks
......@@ -187,7 +204,7 @@ def _evaluate_picks(cur_dat, picks):
p_ind['static'] = picks['static'][1]
# Calculate dilation for peak
i_d1 = np.argmin(cur_dat['liddispl'][:picks['peak'][1]])
i_d1 = np.argmin(cur_dat['liddispl'][:picks['peak'][1]+1])
d1 = cur_dat['liddispl'][i_d1]
try: # Sometimes autodetect finds two peaks at the same spot
i_d2 = np.argmax(
......@@ -232,7 +249,17 @@ def _pick_base_plot(cur_dat, config):
ax.plot(
cur_dat['displacement'],
cur_dat['shearstress'],
label='original data'
label='original data',
color='C0'
)
stddev = np.sqrt((cur_dat['shearstress']*.005)**2 + 10**2)
ax.fill_between(
cur_dat['displacement'],
cur_dat['shearstress']-stddev,
cur_dat['shearstress']+stddev,
color='C0',
alpha=.5,
label='2$\sigma$ accuracy'
)
shear_smth = medfilt(
cur_dat['shearstress'], config.getint('parameters', 'smoothwindow')
......@@ -241,14 +268,33 @@ def _pick_base_plot(cur_dat, config):
cur_dat['displacement'],
shear_smth,
':',
label='filtered data'
label='filtered data',
color='C1'
)
ax.set_ylim(0)
ax.set_xlim(
np.min(cur_dat['displacement']),
np.max(cur_dat['displacement'])
)
ax.set_title('Normal stress: %i' % normstress)
if config.getint('options', 'pred_norm'):
diffstress = normstress - np.mean(cur_dat['normalstress'])
ax.set_title(
'Normal stress: %i\n(difference to measured=%i) Pa' % (
normstress, diffstress
)
)
if (np.abs(diffstress)/normstress) > 0.05:
ax.annotate(
'! Predefined normal stress difference > 5 % !',
(.5, .9),
xycoords='axes fraction',
horizontalalignment='center',
color='r'
)
else:
ax.set_title(
'Normal stress: %i Pa' % normstress
)
return (fig, ax)
......@@ -265,18 +311,24 @@ def _review_picks(fig, ax, picks, cur_dat, config):
# col_list = [base_col_list[i] for i in range(len(picks))]
(peak, dyn, stat, _, _) = _evaluate_picks(cur_dat, picks)
# fig.show()
pnt_peak = ax.plot(
peak[0], peak[1], 's',
pnt_peak = ax.errorbar(
peak[0], peak[1].nominal_value,
yerr=peak[1].std_dev,
marker='s',
color='r',
label='peak'
)
pnt_dyn = ax.plot(
dyn[0], dyn[1], 's',
pnt_dyn = ax.errorbar(
dyn[0], dyn[1].nominal_value,
yerr=dyn[1].std_dev,
marker='s',
color='y',
label='dyn'
)
pnt_stat = ax.plot(
stat[0], stat[1], 's',
pnt_stat = ax.errorbar(
stat[0], stat[1].nominal_value,
yerr=stat[1].std_dev,
marker='s',
color='b',
label='stat'
)
......@@ -314,24 +366,45 @@ def _review_picks(fig, ax, picks, cur_dat, config):
btn['peak']()
(peak, _, _, _, _) = _evaluate_picks(cur_dat, picks)
for p in pnt_peak:
if isinstance(p, mpl.lines.Line2D):
p.set_xdata(peak[0])
p.set_ydata(peak[1])
p.set_ydata(peak[1].nominal_value)
elif isinstance(p, tuple) and p:
new_segs = [np.array([
[peak[0], peak[1].nominal_value + peak[1].std_dev],
[peak[0], peak[1].nominal_value - peak[1].std_dev]
])]
p[0].set_segments(new_segs)
def _cb_dyn(event):
nonlocal pnt_dyn
btn['dyn']()
(_, dyn, _,