From 83c67348be306b34e1783ba44972675c9e33df30 Mon Sep 17 00:00:00 2001 From: Paul Brossier Date: Mon, 20 Feb 2006 11:37:53 +0000 Subject: [PATCH] added bench-window, bench-delay, benchonset added bench-window, bench-delay, benchonset --- python/test/bench/onset/Makefile.am | 34 +++- python/test/bench/onset/bench-delay | 72 ++++++++ python/test/bench/onset/bench-onset | 100 +++-------- python/test/bench/onset/bench-window | 63 +++++++ python/test/bench/onset/benchonset.py | 235 ++++++++++++++++++++++++++ 5 files changed, 425 insertions(+), 79 deletions(-) create mode 100755 python/test/bench/onset/bench-delay create mode 100755 python/test/bench/onset/bench-window create mode 100644 python/test/bench/onset/benchonset.py diff --git a/python/test/bench/onset/Makefile.am b/python/test/bench/onset/Makefile.am index 0720deb8..5528f165 100644 --- a/python/test/bench/onset/Makefile.am +++ b/python/test/bench/onset/Makefile.am @@ -2,22 +2,42 @@ export BASEDIR=../../../.. export PYTHONPATH=$(BASEDIR)/python export LD_LIBRARY_PATH=$(BASEDIR)/src/.libs:$(BASEDIR)/ext/.libs -SOURCE = \ - /archives/samples/DB/PercussivePhrases/CM18/Samba_Audio \ +DETAILSOURCE = \ /var/tmp/Onset-Mirex2005/poly_pitched \ /var/tmp/Onset-Mirex2005/solo_bars_and_bells \ - /var/tmp/Onset-Mirex2005/solo_brass \ /var/tmp/Onset-Mirex2005/solo_drums \ /var/tmp/Onset-Mirex2005/solo_plucked_strings \ /var/tmp/Onset-Mirex2005/solo_singing_voice \ /var/tmp/Onset-Mirex2005/solo_sustained_strings \ /var/tmp/Onset-Mirex2005/solo_winds \ - /var/tmp/Onset-Mirex2005/complex \ - /var/tmp/Onset-Mirex2005 + /var/tmp/Onset-Mirex2005/complex -test-aubiocut: $(patsubst %, %.aubiocut, $(SOURCE)) +SOURCE = /var/tmp/Onset-Mirex2005 + +TESTSOURCE = \ + /var/tmp/Onset-Mirex2005/solo_bars_and_bells \ + /var/tmp/Onset-Mirex2005/solo_winds \ + /archives/samples/DB/PercussivePhrases/CM18/Samba_Audio + +test-aubiocut: $(patsubst %, %.aubiocut, $(TESTSOURCE)) +test-aubiodelay: $(patsubst %, %.aubiodelay, $(TESTSOURCE)) +test-aubiowindow: $(patsubst %, %.aubiowindow, $(TESTSOURCE)) + +final-aubiocut: $(patsubst %, %.aubiocut, $(DETAILSOURCE) $(SOURCE)) +final-aubiodelay: $(patsubst %, %.aubiodelay, $(SOURCE)) +final-aubiowindow: $(patsubst %, %.aubiowindow, $(SOURCE)) %.aubiocut: % rm -f `basename $<`.aubiocut ./bench-onset $< | tee `basename $<`.aubiocut - diff `basename $<`.aubiocut.ref `basename $<`.aubiocut + -diff `basename $<`.aubiocut.ref `basename $<`.aubiocut + +%.aubiodelay: % + rm -f `basename $@` + ./bench-delay $< | tee `basename $@` + -diff `basename $@`.ref `basename $@` + +%.aubiowindow: % + rm -f `basename $@` + ./bench-window $< | tee `basename $@` + -diff `basename $@`.ref `basename $@` diff --git a/python/test/bench/onset/bench-delay b/python/test/bench/onset/bench-delay new file mode 100755 index 00000000..03febb08 --- /dev/null +++ b/python/test/bench/onset/bench-delay @@ -0,0 +1,72 @@ +#! /usr/bin/python + +from aubio.bench.node import * +from aubio.tasks import * + +from benchonset import mmean, stdev, benchonset + +class mybenchonset(benchonset): + + def run_bench(self,modes=['dual'],thresholds=[0.5]): + from os.path import dirname,basename + self.modes = modes + self.thresholds = thresholds + self.pretty_titles() + for mode in self.modes: + d = [] + outplot = "_-_".join(("delay",mode, + basename(self.datadir) )) + + self.params.onsetmode = mode + self.params.threshold = thresholds[0] + + self.params.localmin = False + self.params.delay = 0. + + self.dir_exec() + self.dir_eval() + self.pretty_print() + self.plotdiffs(d,plottitle="Causal") + + self.params.localmin = True + self.params.delay = 0. + self.dir_exec() + self.dir_eval() + self.pretty_print() + self.plotdiffs(d,plottitle="Local min") + + self.params.localmin = False + self.params.delay = 6. + self.dir_exec() + self.dir_eval() + self.pretty_print() + self.plotdiffs(d,plottitle="Fixed delay") + + self.plotplotdiffs(d) + self.plotplotdiffs(d,outplot=outplot,extension="png") + self.plotplotdiffs(d,outplot=outplot,extension="ps") + self.plotplotdiffs(d,outplot=outplot,extension="svg") + + +if __name__ == "__main__": + import sys + if len(sys.argv) > 1: datapath = sys.argv[1] + else: print "ERR: a path is required"; sys.exit(1) + modes = ['complex', 'energy', 'phase', 'specdiff', 'kl', 'mkl', 'dual'] + #modes = [ 'complex' ] + thresholds = [ 0.5] + #thresholds = [1.5] + + #datapath = "%s%s" % (DATADIR,'/onset/DB/*/') + respath = '/var/tmp/DB-testings' + + benchonset = mybenchonset(datapath,respath,checkres=True,checkanno=True) + benchonset.params = taskparams() + benchonset.task = taskonset + benchonset.valuesdict = {} + + try: + #benchonset.auto_learn2(modes=modes) + benchonset.run_bench(modes=modes,thresholds=thresholds) + except KeyboardInterrupt: + sys.exit(1) diff --git a/python/test/bench/onset/bench-onset b/python/test/bench/onset/bench-onset index 96303afb..11b42836 100755 --- a/python/test/bench/onset/bench-onset +++ b/python/test/bench/onset/bench-onset @@ -1,80 +1,18 @@ #! /usr/bin/python -from aubio.bench.node import * from aubio.tasks import * +from benchonset import mmean, stdev, benchonset - - -def mmean(l): - return sum(l)/float(len(l)) - -def stdev(l): - smean = 0 - lmean = mmean(l) - for i in l: - smean += (i-lmean)**2 - smean *= 1. / len(l) - return smean**.5 - -class benchonset(bench): - - """ list of values to store per file """ - valuenames = ['orig','missed','Tm','expc','bad','Td'] - """ list of lists to store per file """ - valuelists = ['l','labs'] - """ list of values to print per dir """ - printnames = [ 'mode', 'thres', 'dist', 'prec', 'recl', - 'Ttrue', 'Tfp', 'Tfn', 'Tm', 'Td', - 'aTtrue', 'aTfp', 'aTfn', 'aTm', 'aTd', - 'mean', 'smean', 'amean', 'samean'] - - """ per dir """ - formats = {'mode': "%12s" , 'thres': "%5.4s", - 'dist': "%5.4s", 'prec': "%5.4s", 'recl': "%5.4s", - 'Ttrue': "%5.4s", 'Tfp': "%5.4s", 'Tfn': "%5.4s", - 'Tm': "%5.4s", 'Td': "%5.4s", - 'aTtrue':"%5.4s", 'aTfp': "%5.4s", 'aTfn': "%5.4s", - 'aTm': "%5.4s", 'aTd': "%5.4s", - 'mean': "%5.40s", 'smean': "%5.40s", - 'amean': "%5.40s", 'samean': "%5.40s"} - - def dir_eval(self): - """ evaluate statistical data over the directory """ - totaltrue = sum(self.v['expc'])-sum(self.v['bad'])-sum(self.v['Td']) - totalfp = sum(self.v['bad'])+sum(self.v['Td']) - totalfn = sum(self.v['missed'])+sum(self.v['Tm']) - self.P = 100*float(totaltrue)/max(totaltrue + totalfp,1) - self.R = 100*float(totaltrue)/max(totaltrue + totalfn,1) - if self.R < 0: self.R = 0 - self.F = 2.* self.P*self.R / max(float(self.P+self.R),1) - N = float(len(self.reslist)) - self.v['mode'] = self.params.onsetmode - self.v['thres'] = self.params.threshold - self.v['thres'] = "%2.3f" % self.params.threshold - self.v['dist'] = "%2.3f" % self.F - self.v['prec'] = "%2.3f" % self.P - self.v['recl'] = "%2.3f" % self.R - self.v['Ttrue'] = totaltrue - self.v['Tfp'] = totalfp - self.v['Tfn'] = totalfn - self.v['aTtrue'] = totaltrue/N - self.v['aTfp'] = totalfp/N - self.v['aTfn'] = totalfn/N - self.v['aTm'] = sum(self.v['Tm'])/N - self.v['aTd'] = sum(self.v['Td'])/N - self.v['Tm'] = sum(self.v['Tm']) - self.v['Td'] = sum(self.v['Td']) - self.v['mean'] = mmean(self.v['l']) - self.v['smean'] = stdev(self.v['l']) - self.v['amean'] = mmean(self.v['labs']) - self.v['samean'] = stdev(self.v['labs']) +class mybenchonset(benchonset): def run_bench(self,modes=['dual'],thresholds=[0.5]): - self.modes = modes + from os.path import dirname,basename self.thresholds = thresholds self.pretty_titles() - for mode in self.modes: + d,e,f = [],[],[] + for mode in modes: + self.vlist = [] self.params.onsetmode = mode for threshold in self.thresholds: self.params.threshold = threshold @@ -82,6 +20,20 @@ class benchonset(bench): self.dir_eval() self.pretty_print() #print self.v + self.vlist.append(self.v) + self.plotroc(d) + self.plotfmeas(e) + self.plotpr(f) + #print vlist + #self.plotplotroc(d) + #self.plotplotfmeas(e) + #self.plotplotpr(f) + outplot = basename(self.datadir) + for ext in ("png","svg","ps"): + self.plotplotroc(d,outplot=outplot,extension=ext) + self.plotplotfmeas(e,outplot=outplot,extension=ext) + self.plotplotpr(f,outplot=outplot,extension=ext) + def auto_learn(self,modes=['dual'],thresholds=[0.1,1.5]): """ simple dichotomia like algorithm to optimise threshold """ @@ -106,6 +58,10 @@ class benchonset(bench): lessF = self.F for i in range(steps): + self.params.localmin = True + self.params.delay = 1. + self.dir_exec() + self.dir_eval() self.params.threshold = ( lesst + topt ) * .5 self.dir_exec() self.dir_eval() @@ -169,15 +125,15 @@ if __name__ == "__main__": import sys if len(sys.argv) > 1: datapath = sys.argv[1] else: print "ERR: a path is required"; sys.exit(1) - modes = ['complex', 'energy', 'phase', 'specdiff', 'kl', 'mkl', 'dual'] - #modes = [ 'mkl' ] + modes = ['complex', 'energy', 'phase', 'hfc', 'specdiff', 'kl', 'mkl', 'dual'] thresholds = [ 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2] - #thresholds = [1.5] + #modes = [ 'hfc' ] + #thresholds = [0.1, 1.5] #datapath = "%s%s" % (DATADIR,'/onset/DB/*/') respath = '/var/tmp/DB-testings' - benchonset = benchonset(datapath,respath,checkres=True,checkanno=True) + benchonset = mybenchonset(datapath,respath,checkres=True,checkanno=True) benchonset.params = taskparams() benchonset.task = taskonset benchonset.valuesdict = {} diff --git a/python/test/bench/onset/bench-window b/python/test/bench/onset/bench-window new file mode 100755 index 00000000..faec0cdf --- /dev/null +++ b/python/test/bench/onset/bench-window @@ -0,0 +1,63 @@ +#! /usr/bin/python + +from aubio.tasks import * + +from benchonset import mmean, stdev, plotdiffs, plotplotdiffs, benchonset + +class mybenchonset(benchonset): + + def run_bench(self,modes=['dual'],thresholds=[0.5]): + from os.path import dirname,basename + self.thresholds = thresholds + self.pretty_titles() + for mode in modes: + + self.params.onsetmode = mode + self.params.threshold = thresholds[0] + self.params.localmin = False + + for delay in (0., 4.): + d = [] + outplot = "_-_".join(("window",mode,"delay-%s" % delay, + basename(self.datadir) )) + self.params.delay = delay + + for buf in (2048, 1024, 512): + for hop in (buf/2, buf/4): + self.params.bufsize = buf + self.params.hopsize = hop + self.params.step = float(self.params.hopsize)/float(self.params.samplerate) + + self.dir_exec() + self.dir_eval() + self.pretty_print() + plotdiffs(self.v,d,plottitle="%s %s" % (buf,hop)) + + plotplotdiffs(d) + plotplotdiffs(d,outplot=outplot,extension="png") + plotplotdiffs(d,outplot=outplot,extension="ps") + plotplotdiffs(d,outplot=outplot,extension="svg") + + +if __name__ == "__main__": + import sys + if len(sys.argv) > 1: datapath = sys.argv[1] + else: print "ERR: a path is required"; sys.exit(1) + modes = ['complex', 'energy', 'phase', 'specdiff', 'kl', 'mkl', 'dual'] + #modes = [ 'complex' ] + thresholds = [ 0.5] + #thresholds = [1.5] + + #datapath = "%s%s" % (DATADIR,'/onset/DB/*/') + respath = '/var/tmp/DB-testings' + + benchonset = mybenchonset(datapath,respath,checkres=True,checkanno=True) + benchonset.params = taskparams() + benchonset.task = taskonset + benchonset.valuesdict = {} + + try: + #benchonset.auto_learn2(modes=modes) + benchonset.run_bench(modes=modes,thresholds=thresholds) + except KeyboardInterrupt: + sys.exit(1) diff --git a/python/test/bench/onset/benchonset.py b/python/test/bench/onset/benchonset.py new file mode 100644 index 00000000..c312cf35 --- /dev/null +++ b/python/test/bench/onset/benchonset.py @@ -0,0 +1,235 @@ +#! /usr/bin/python + +from aubio.bench.node import * +from os.path import dirname,basename + +def mmean(l): + return sum(l)/max(float(len(l)),1) + +def stdev(l): + smean = 0 + if not len(l): return smean + lmean = mmean(l) + for i in l: + smean += (i-lmean)**2 + smean *= 1. / len(l) + return smean**.5 + +class benchonset(bench): + + """ list of values to store per file """ + valuenames = ['orig','missed','Tm','expc','bad','Td'] + """ list of lists to store per file """ + valuelists = ['l','labs'] + """ list of values to print per dir """ + printnames = [ 'mode', 'thres', 'dist', 'prec', 'recl', + 'GD', 'FP', + 'Torig', 'Ttrue', 'Tfp', 'Tfn', 'TTm', 'TTd', + 'aTtrue', 'aTfp', 'aTfn', 'aTm', 'aTd', + 'mean', 'smean', 'amean', 'samean'] + + """ per dir """ + formats = {'mode': "%12s" , 'thres': "%5.4s", + 'dist': "%5.4s", 'prec': "%5.4s", 'recl': "%5.4s", + 'Torig': "%5.4s", 'Ttrue': "%5.4s", 'Tfp': "%5.4s", 'Tfn': "%5.4s", + 'TTm': "%5.4s", 'TTd': "%5.4s", + 'aTtrue':"%5.4s", 'aTfp': "%5.4s", 'aTfn': "%5.4s", + 'aTm': "%5.4s", 'aTd': "%5.4s", + 'mean': "%5.40s", 'smean': "%5.40s", + 'amean': "%5.40s", 'samean': "%5.40s", + "GD": "%5.4s", "FP": "%5.4s", + "GDm": "%5.4s", "FPd": "%5.4s"} + + def dir_eval(self): + """ evaluate statistical data over the directory """ + v = self.v + + v['mode'] = self.params.onsetmode + v['thres'] = self.params.threshold + + v['Torig'] = sum(v['orig']) + v['TTm'] = sum(v['Tm']) + v['TTd'] = sum(v['Td']) + v['Texpc'] = sum(v['expc']) + v['Tbad'] = sum(v['bad']) + v['Tmissed'] = sum(v['missed']) + v['aTm'] = mmean(v['Tm']) + v['aTd'] = mmean(v['Td']) + + v['mean'] = mmean(v['l']) + v['smean'] = stdev(v['l']) + + v['amean'] = mmean(v['labs']) + v['samean'] = stdev(v['labs']) + + # old type calculations + # good detection rate + v['GD'] = 100.*(v['Torig']-v['Tmissed']-v['TTm'])/v['Torig'] + # false positive rate + v['FP'] = 100.*(v['Tbad']+v['TTd'])/v['Torig'] + # good detection counting merged detections as good + v['GDm'] = 100.*(v['Torig']-v['Tmissed'])/v['Torig'] + # false positives counting doubled as good + v['FPd'] = 100.*v['Tbad']/v['Torig'] + + # mirex type annotations + totaltrue = v['Texpc']-v['Tbad']-v['TTd'] + totalfp = v['Tbad']+v['TTd'] + totalfn = v['Tmissed']+v['TTm'] + self.v['Ttrue'] = totaltrue + self.v['Tfp'] = totalfp + self.v['Tfn'] = totalfn + # average over the number of annotation files + N = float(len(self.reslist)) + self.v['aTtrue'] = totaltrue/N + self.v['aTfp'] = totalfp/N + self.v['aTfn'] = totalfn/N + + # F-measure + self.P = 100.*float(totaltrue)/max(totaltrue + totalfp,1) + self.R = 100.*float(totaltrue)/max(totaltrue + totalfn,1) + #if self.R < 0: self.R = 0 + self.F = 2.* self.P*self.R / max(float(self.P+self.R),1) + self.v['dist'] = self.F + self.v['prec'] = self.P + self.v['recl'] = self.R + + def plotroc(self,d,plottitle=""): + import Gnuplot, Gnuplot.funcutils + gd = [] + fp = [] + for i in self.vlist: + gd.append(i['GD']) + fp.append(i['FP']) + d.append(Gnuplot.Data(fp, gd, with='linespoints', + title="%s %s" % (plottitle,i['mode']) )) + + def plotplotroc(self,d,outplot=0,extension='ps'): + import Gnuplot, Gnuplot.funcutils + from sys import exit + g = Gnuplot.Gnuplot(debug=0, persist=1) + if outplot: + if extension == 'ps': ext, extension = '.ps' , 'postscript' + elif extension == 'png': ext, extension = '.png', 'png' + elif extension == 'svg': ext, extension = '.svg', 'svg' + else: exit("ERR: unknown plot extension") + g('set terminal %s' % extension) + g('set output \'roc-%s%s\'' % (outplot,ext)) + xmax = 30 #max(fp) + ymin = 50 + g('set xrange [0:%f]' % xmax) + g('set yrange [%f:100]' % ymin) + # grid set + g('set grid') + g('set xtics 0,5,%f' % xmax) + g('set ytics %f,5,100' % ymin) + g('set key 27,65') + #g('set format \"%g\"') + g.title(basename(self.datadir)) + g.xlabel('false positives (%)') + g.ylabel('correct detections (%)') + g.plot(*d) + + def plotpr(self,d,plottitle=""): + import Gnuplot, Gnuplot.funcutils + x = [] + y = [] + for i in self.vlist: + x.append(i['prec']) + y.append(i['recl']) + d.append(Gnuplot.Data(x, y, with='linespoints', + title="%s %s" % (plottitle,i['mode']) )) + + def plotplotpr(self,d,outplot=0,extension='ps'): + import Gnuplot, Gnuplot.funcutils + from sys import exit + g = Gnuplot.Gnuplot(debug=0, persist=1) + if outplot: + if extension == 'ps': ext, extension = '.ps' , 'postscript' + elif extension == 'png': ext, extension = '.png', 'png' + elif extension == 'svg': ext, extension = '.svg', 'svg' + else: exit("ERR: unknown plot extension") + g('set terminal %s' % extension) + g('set output \'pr-%s%s\'' % (outplot,ext)) + g.title(basename(self.datadir)) + g.xlabel('Recall (%)') + g.ylabel('Precision (%)') + g.plot(*d) + + def plotfmeas(self,d,plottitle=""): + import Gnuplot, Gnuplot.funcutils + x,y = [],[] + for i in self.vlist: + x.append(i['thres']) + y.append(i['dist']) + d.append(Gnuplot.Data(x, y, with='linespoints', + title="%s %s" % (plottitle,i['mode']) )) + + def plotplotfmeas(self,d,outplot="",extension='ps', title="F-measure"): + import Gnuplot, Gnuplot.funcutils + from sys import exit + g = Gnuplot.Gnuplot(debug=0, persist=1) + if outplot: + if extension == 'ps': terminal = 'postscript' + elif extension == 'png': terminal = 'png' + elif extension == 'svg': terminal = 'svg' + else: exit("ERR: unknown plot extension") + g('set terminal %s' % terminal) + g('set output \'fmeas-%s.%s\'' % (outplot,extension)) + g.xlabel('threshold \\delta') + g.ylabel('F-measure (%)') + g('set xrange [0:1.2]') + g('set yrange [0:100]') + g.title(basename(self.datadir)) + # grid set + #g('set grid') + #g('set xtics 0,5,%f' % xmax) + #g('set ytics %f,5,100' % ymin) + #g('set key 27,65') + #g('set format \"%g\"') + g.plot(*d) + + def plotdiffs(self,d,plottitle=""): + import Gnuplot, Gnuplot.funcutils + v = self.v + l = v['l'] + mean = v['mean'] + smean = v['smean'] + amean = v['amean'] + samean = v['samean'] + val = [] + per = [0] * 100 + for i in range(0,100): + val.append(i*.001-.05) + for j in l: + if abs(j-val[i]) <= 0.001: + per[i] += 1 + total = v['Torig'] + for i in range(len(per)): per[i] /= total/100. + + d.append(Gnuplot.Data(val, per, with='fsteps', + title="%s %s" % (plottitle,v['mode']) )) + #d.append('mean=%f,sigma=%f,eps(x) title \"\"'% (mean,smean)) + #d.append('mean=%f,sigma=%f,eps(x) title \"\"'% (amean,samean)) + + + def plotplotdiffs(self,d,outplot=0,extension='ps'): + import Gnuplot, Gnuplot.funcutils + from sys import exit + g = Gnuplot.Gnuplot(debug=0, persist=1) + if outplot: + if extension == 'ps': ext, extension = '.ps' , 'postscript' + elif extension == 'png': ext, extension = '.png', 'png' + elif extension == 'svg': ext, extension = '.svg', 'svg' + else: exit("ERR: unknown plot extension") + g('set terminal %s' % extension) + g('set output \'diffhist-%s%s\'' % (outplot,ext)) + g('eps(x) = 1./(sigma*(2.*3.14159)**.5) * exp ( - ( x - mean ) ** 2. / ( 2. * sigma ** 2. ))') + g.title(basename(self.datadir)) + g.xlabel('delay to hand-labelled onset (s)') + g.ylabel('% number of correct detections / ms ') + g('set xrange [-0.05:0.05]') + g('set yrange [0:50]') + g.plot(*d) + + -- 2.26.2