--- /dev/null
-#!/usr/bin/python
++#!/usr/bin/env python
+
+ import hooke.hooke
+
+ hooke.hooke.main()
<?xml version="1.0" ?>\r
<!-- To comment something, put dashes and ! like here -->\r
<config>\r
- <!-- Internal variabls. -->\r
- <display ext="1" colour_ext="None" ret="1" colour_ret="None" correct="1" colour_correct="None" contact_point="0" medfilt="0" xaxes="0" yaxes="0" flatten="1" fit_function="wlc" temperature="301" auto_fit_points="50" auto_slope_span="20" auto_delta_force="10" auto_fit_nm="5" auto_min_p="0.005" auto_max_p="10" baseline_clicks="0" auto_left_baseline="20" auto_right_baseline="20" force_multiplier="1" fc_showphase="0" fc_showimposed="0" fc_interesting="0" tccd_threshold="0" tccd_coincident="0" centerzero="1"/>\r
\r
- <!-- \r
- The following section defines your own work directory. Substitute your work directory.\r
- -->\r
- <workdir>\r
- insert directory\r
- </workdir>\r
+ \r
+ <!--\r
+ This section defines the Hooke installation. confpath is hardcoded,\r
+ since it's to find the config file(s) before your read them.\r
+ -->\r
+ <install>\r
+ <docpath>\r
+ ./doc/\r
+ </docpath>\r
+ </install>\r
+ \r
+ <!-- Internal variables. -->\r
+ <display\r
+ ext="1"\r
+ colour_ext="None"\r
+ ret="1"\r
+ colour_ret="None"\r
+ correct="1"\r
+ colour_correct="None"\r
+ contact_point="0"\r
+ medfilt="0"\r
+ xaxes="0"\r
+ yaxes="0"\r
+ flatten="1"\r
+ fit_function="wlc"\r
+ temperature="301"\r
+ auto_fit_points="50"\r
+ auto_slope_span="20"\r
+ auto_delta_force="10"\r
+ auto_fit_nm="5"\r
+ auto_min_p="0.005"\r
+ auto_max_p="10"\r
+ baseline_clicks="0"\r
+ auto_left_baseline="20"\r
+ auto_right_baseline="20"\r
+ force_multiplier="1"\r
+ fc_showphase="0"\r
+ fc_showimposed="0"\r
+ fc_interesting="0"\r
+ tccd_threshold="0"\r
- tccd_coincident="0"/>\r
++ tccd_coincident="0"\r
++ centerzero="1"\r
++/>\r
\r
<!--\r
The following section defines the default playlist to load at startup.\r
- -->\r
+ -->\r
<defaultlist>\r
- test.hkp\r
+ ./hooke/test/test.hkp\r
</defaultlist>\r
\r
<!--\r
This section defines which plugins have to be loaded by Hooke.\r
- -->\r
+ -->\r
<plugins>\r
<fit/>\r
+ <curvetools/>\r
<procplots/>\r
<flatfilts/>\r
<generalclamp/>\r
<pcluster/>\r
<generaltccd/>\r
<multidistance/>\r
- <cut/>\r
- <multifit/> \r
+ <jumpstat/>\r
+ <review/>\r
++ <multifit/>\r
</plugins>\r
\r
<!--\r
This section defines which drivers have to be loaded by Hooke.\r
-->\r
<drivers>\r
- <picoforce/>\r
- <!-- picoforcealt/ -->\r
+ <!--picoforce/-->\r
+ <picoforcealt/>\r
<hemingclamp/>\r
<csvdriver/>\r
<!-- tutorialdriver/ -->\r
<jpk/>\r
<mfp1dexport/>\r
<mcs/>\r
- <mfp3d/> \r
+ <!-- hdf5/ -->\r
++ <mfp3d/>\r
</drivers>\r
\r
<!--\r
<multiplier/>\r
<clamp/>\r
<threshold/>\r
-- <coincident/>\r
++ <coincident/>
+ <centerzero/>\r
</plotmanips>\r
\r
</config>\r
- \r
--- /dev/null
- import libhookecurve as lhc
- import libhooke as lh
+#!/usr/bin/env python
+
+'''
+hdf5.py
+
+Driver for text-exported HDF5 files from Igor pro
+
+Alberto Gomez-Casado (c) 2010
+Massimo Sandal (c) 2009
+'''
+
++from .. import libhookecurve as lhc
++from .. import libhooke as lh
+
+class hdf5Driver(lhc.Driver):
+
+ def __init__(self, filename):
+
+ self.filename=filename
+ self.filedata=open(filename,'rU')
+ self.lines=list(self.filedata.readlines())
+ self.filedata.close()
+
+ self.filetype='hdf5'
+ self.experiment='smfs'
+
+ def close_all(self):
+ self.filedata.close()
+
+ def is_me(self):
+ self.raw_header=self.lines[0]
+
+ if 'IGP-HDF5-Hooke' in self.raw_header:
+ return True
+ else:
+ return False
+
+ def _read_columns(self):
+
+ self.raw_columns=self.lines[4:]
+
+ kline=None
+ for line in self.lines:
+ if line[:7]=='SpringC':
+ kline=line
+ break
+
+ kline=kline.split(':')
+
+ self.k=float(kline[1])
+
+
+ xext=[]
+ xret=[]
+ yext=[]
+ yret=[]
+ for line in self.raw_columns:
+ spline=line.split()
+ xext.append(float(spline[0]))
+ yext.append(float(spline[1]))
+ xret.append(float(spline[2]))
+ yret.append(float(spline[3]))
+
+ return [[xext,yext],[xret,yret]]
+
+ def deflection(self):
+ self.data=self._read_columns()
+ return self.data[0][1],self.data[1][1]
+
+
+ def default_plots(self):
+ main_plot=lhc.PlotObject()
+ defl_ext,defl_ret=self.deflection()
+ yextforce=[i*self.k for i in defl_ext]
+ yretforce=[i*self.k for i in defl_ret]
+ main_plot.add_set(self.data[0][0],yextforce)
+ main_plot.add_set(self.data[1][0],yretforce)
+ main_plot.normalize_vectors()
+ main_plot.units=['Z','force'] #FIXME: if there's an header saying something about the time count, should be used
+ main_plot.destination=0
+ main_plot.title=self.filename
+ #main_plot.colors=['red','blue']
+ return [main_plot]
--- /dev/null
- import libhookecurve as lhc
+#!/usr/bin/env python
+
+'''
+mfp3d.py
+
+Driver for MFP-3D files.
+
+Copyright 2010 by Dr. Rolf Schmidt (Concordia University, Canada)
+This driver is based on the work of R. Naud and A. Seeholzer (see below)
+to read Igor binary waves. Code used with permission.
+
+Modified for usage with Hooke CLI by Alberto Gomez-Casado (University of Twente, The Netherlands)
+
+This program is released under the GNU General Public License version 2.
+'''
+
+# DEFINITION:
+# Reads Igor's (Wavemetric) binary wave format, .ibw, files.
+#
+# ALGORITHM:
+# Parsing proper to version 2, 3, or version 5 (see Technical notes TN003.ifn:
+# http://mirror.optus.net.au/pub/wavemetrics/IgorPro/Technical_Notes/) and data
+# type 2 or 4 (non complex, single or double precision vector, real values).
+#
+# AUTHORS:
+# Matlab version: R. Naud August 2008 (http://lcn.epfl.ch/~naud/Home.html)
+# Python port: A. Seeholzer October 2008
+#
+# VERSION: 0.1
+#
+# COMMENTS:
+# Only tested for version 2 Igor files for now, testing for 3 and 5 remains to be done.
+# More header data could be passed back if wished. For significance of ignored bytes see
+# the technical notes linked above.
+
+import numpy
+import os.path
+import struct
+
++from .. import libhookecurve as lhc
+
+
+__version__='0.0.0.20100310'
+
+
+class DataChunk(list):
+ #Dummy class to provide ext and ret methods to the data list.
+
+ def ext(self):
+ halflen=(len(self)/2)
+ return self[0:halflen]
+
+ def ret(self):
+ halflen=(len(self)/2)
+ return self[halflen:]
+
+class mfp3dDriver(lhc.Driver):
+
+ #Construction and other special methods
+
+ def __init__(self,filename):
+ '''
+ constructor method
+ '''
+
+ self.textfile =file(filename)
+ self.binfile=file(filename,'rb')
+ #unnecesary, but some other part of the program expects these to be open
+
+ self.forcechunk=0
+ self.distancechunk=1
+ #TODO eliminate the need to set chunk numbers
+
+ self.filepath=filename
+ self.debug=True
+
+ self.data = []
+ self.note = []
+ self.retract_velocity = None
+ self.spring_constant = None
+ self.filename = filename
+
+ self.filedata = open(filename,'rU')
+ self.lines = list(self.filedata.readlines())
+ self.filedata.close()
+
+ self.filetype = 'mfp3d'
+ self.experiment = 'smfs'
+
+
+ def _get_data_chunk(self,whichchunk):
+
+ data = None
+ f = open(self.filename, 'rb')
+ ####################### ORDERING
+ # machine format for IEEE floating point with big-endian
+ # byte ordering
+ # MacIgor use the Motorola big-endian 'b'
+ # WinIgor use Intel little-endian 'l'
+ # If the first byte in the file is non-zero, then the file is a WinIgor
+ firstbyte = struct.unpack('b', f.read(1))[0]
+ if firstbyte == 0:
+ format = '>'
+ else:
+ format = '<'
+ ####################### CHECK VERSION
+ f.seek(0)
+ version = struct.unpack(format+'h', f.read(2))[0]
+ ####################### READ DATA AND ACCOMPANYING INFO
+ if version == 2 or version == 3:
+ # pre header
+ wfmSize = struct.unpack(format+'i', f.read(4))[0] # The size of the WaveHeader2 data structure plus the wave data plus 16 bytes of padding.
+ noteSize = struct.unpack(format+'i', f.read(4))[0] # The size of the note text.
+ if version==3:
+ formulaSize = struct.unpack(format+'i', f.read(4))[0]
+ pictSize = struct.unpack(format+'i', f.read(4))[0] # Reserved. Write zero. Ignore on read.
+ checksum = struct.unpack(format+'H', f.read(2))[0] # Checksum over this header and the wave header.
+ # wave header
+ dtype = struct.unpack(format+'h', f.read(2))[0]
+ if dtype == 2:
+ dtype = numpy.float32(.0).dtype
+ elif dtype == 4:
+ dtype = numpy.double(.0).dtype
+ else:
+ assert False, "Wave is of type '%i', not supported" % dtype
+ dtype = dtype.newbyteorder(format)
+
+ ignore = f.read(4) # 1 uint32
+ bname = self._flatten(struct.unpack(format+'20c', f.read(20)))
+ ignore = f.read(4) # 2 int16
+ ignore = f.read(4) # 1 uint32
+ dUnits = self._flatten(struct.unpack(format+'4c', f.read(4)))
+ xUnits = self._flatten(struct.unpack(format+'4c', f.read(4)))
+ npnts = struct.unpack(format+'i', f.read(4))[0]
+ amod = struct.unpack(format+'h', f.read(2))[0]
+ dx = struct.unpack(format+'d', f.read(8))[0]
+ x0 = struct.unpack(format+'d', f.read(8))[0]
+ ignore = f.read(4) # 2 int16
+ fsValid = struct.unpack(format+'h', f.read(2))[0]
+ topFullScale = struct.unpack(format+'d', f.read(8))[0]
+ botFullScale = struct.unpack(format+'d', f.read(8))[0]
+ ignore = f.read(16) # 16 int8
+ modDate = struct.unpack(format+'I', f.read(4))[0]
+ ignore = f.read(4) # 1 uint32
+ # Numpy algorithm works a lot faster than struct.unpack
+ data = numpy.fromfile(f, dtype, npnts)
+
+ elif version == 5:
+ # pre header
+ checksum = struct.unpack(format+'H', f.read(2))[0] # Checksum over this header and the wave header.
+ wfmSize = struct.unpack(format+'i', f.read(4))[0] # The size of the WaveHeader2 data structure plus the wave data plus 16 bytes of padding.
+ formulaSize = struct.unpack(format+'i', f.read(4))[0]
+ noteSize = struct.unpack(format+'i', f.read(4))[0] # The size of the note text.
+ dataEUnitsSize = struct.unpack(format+'i', f.read(4))[0]
+ dimEUnitsSize = struct.unpack(format+'4i', f.read(16))
+ dimLabelsSize = struct.unpack(format+'4i', f.read(16))
+ sIndicesSize = struct.unpack(format+'i', f.read(4))[0]
+ optionSize1 = struct.unpack(format+'i', f.read(4))[0]
+ optionSize2 = struct.unpack(format+'i', f.read(4))[0]
+
+ # header
+ ignore = f.read(4)
+ CreationDate = struct.unpack(format+'I',f.read(4))[0]
+ modData = struct.unpack(format+'I',f.read(4))[0]
+ npnts = struct.unpack(format+'i',f.read(4))[0]
+ # wave header
+ dtype = struct.unpack(format+'h',f.read(2))[0]
+ if dtype == 2:
+ dtype = numpy.float32(.0).dtype
+ elif dtype == 4:
+ dtype = numpy.double(.0).dtype
+ else:
+ assert False, "Wave is of type '%i', not supported" % dtype
+ dtype = dtype.newbyteorder(format)
+
+ ignore = f.read(2) # 1 int16
+ ignore = f.read(6) # 6 schar, SCHAR = SIGNED CHAR? ignore = fread(fid,6,'schar'); #
+ ignore = f.read(2) # 1 int16
+ bname = self._flatten(struct.unpack(format+'32c',f.read(32)))
+ ignore = f.read(4) # 1 int32
+ ignore = f.read(4) # 1 int32
+ ndims = struct.unpack(format+'4i',f.read(16)) # Number of of items in a dimension -- 0 means no data.
+ sfA = struct.unpack(format+'4d',f.read(32))
+ sfB = struct.unpack(format+'4d',f.read(32))
+ dUnits = self._flatten(struct.unpack(format+'4c',f.read(4)))
+ xUnits = self._flatten(struct.unpack(format+'16c',f.read(16)))
+ fsValid = struct.unpack(format+'h',f.read(2))
+ whpad3 = struct.unpack(format+'h',f.read(2))
+ ignore = f.read(16) # 2 double
+ ignore = f.read(40) # 10 int32
+ ignore = f.read(64) # 16 int32
+ ignore = f.read(6) # 3 int16
+ ignore = f.read(2) # 2 char
+ ignore = f.read(4) # 1 int32
+ ignore = f.read(4) # 2 int16
+ ignore = f.read(4) # 1 int32
+ ignore = f.read(8) # 2 int32
+
+ data = numpy.fromfile(f, dtype, npnts)
+ note_str = f.read(noteSize)
+ note_lines = note_str.split('\r')
+ self.note = {}
+ for line in note_lines:
+ if ':' in line:
+ key, value = line.split(':', 1)
+ self.note[key] = value
+ self.retract_velocity = float(self.note['Velocity'])
+ self.spring_constant = float(self.note['SpringConstant'])
+ else:
+ assert False, "Fileversion is of type '%i', not supported" % dtype
+ data = []
+
+ f.close()
+ if len(data) > 0:
+ #we have 3 columns: deflection, LVDT, raw
+ #TODO detect which is each one
+ count = npnts / 3
+ lvdt = data[:count]
+ deflection = data[count:2 * count]
+ #every column contains data for extension and retraction
+ #we assume the same number of points for each
+ #we could possibly extract this info from the note
+ count = npnts / 6
+
+ forcechunk=deflection*self.spring_constant
+ distancechunk=lvdt
+
+ if whichchunk==self.forcechunk:
+ return forcechunk
+ if whichchunk==self.distancechunk:
+ return distancechunk
+ else:
+ return None
+
+ def _force(self):
+ #returns force vector
+ Kspring=self.spring_constant
+ return DataChunk([(meter*Kspring) for meter in self._deflection()])
+
+ def _deflection(self):
+ #for internal use (feeds _force)
+ deflect=self.data_chunks[self.forcechunk]/self.spring_constant
+ return deflect
+
+ def _flatten(self, tup):
+ out = ''
+ for ch in tup:
+ out += ch
+ return out
+
+ def _Z(self):
+ return DataChunk(self.data_chunks[self.distancechunk])
+
+ def is_me(self):
+ if len(self.lines) < 34:
+ return False
+
+ name, extension = os.path.splitext(self.filename)
+ if extension == '.ibw':
+ for line in self.lines:
+ if line.startswith('ForceNote:'):
+ self.data_chunks=[self._get_data_chunk(num) for num in [0,1,2]]
+ return True
+ else:
+ return False
+ else:
+ return False
+
+ def close_all(self):
+ '''
+ Explicitly closes all files
+ '''
+ self.textfile.close()
+ self.binfile.close()
+
+ def default_plots(self):
+ '''
+ creates the default PlotObject
+ '''
+ force=self._force()
+ zdomain=self._Z()
+ main_plot=lhc.PlotObject()
+ main_plot.vectors=[[zdomain.ext(), force.ext()],[zdomain.ret(), force.ret()]]
+ main_plot.normalize_vectors()
+ main_plot.units=['meters','newton']
+ main_plot.destination=0
+ main_plot.title=self.filepath
+
+
+ return [main_plot]
+
+ def deflection(self):
+ #interface for correct plotmanip and others
+ deflectionchunk=DataChunk(self._deflection())
+ return deflectionchunk.ext(),deflectionchunk.ret()
#!/usr/bin/env python\r
+# -*- coding: utf-8 -*-\r
\r
'''\r
HOOKE - A force spectroscopy review & analysis tool\r
This program is released under the GNU General Public License version 2.\r
'''\r
\r
- from libhooke import HOOKE_VERSION\r
- from libhooke import WX_GOOD\r
+ from .libhooke import HOOKE_VERSION, WX_GOOD\r
\r
import os\r
\r
import matplotlib.numerix as nx\r
import scipy as sp\r
\r
- from threading import *\r
+ from threading import Thread\r
import Queue\r
\r
- from hooke_cli import HookeCli\r
- from libhooke import *\r
- import libhookecurve as lhc\r
+ from .hooke_cli import HookeCli\r
+ from .libhooke import HookeConfig, ClickedPoint\r
+ from . import libhookecurve as lhc\r
\r
#import file versions, just to know with what we're working...\r
from hooke_cli import __version__ as hookecli_version\r
plugin_gui_namespaces=[]\r
for plugin_name in config['plugins']:\r
try:\r
- plugin=__import__(plugin_name)\r
+ hooke_module=__import__('hooke.plugin.'+plugin_name)\r
+ plugin = getattr(hooke_module.plugin, plugin_name)\r
try:\r
- eval('CLI_PLUGINS.append(plugin.'+plugin_name+'Commands)') #take Command plugin classes\r
- plugin_commands_namespaces.append(dir(eval('plugin.'+plugin_name+'Commands')))\r
+ #take Command plugin classes\r
+ commands = getattr(plugin, plugin_name+'Commands')\r
+ CLI_PLUGINS.append(commands)\r
+ plugin_commands_namespaces.append(dir(commands))\r
except:\r
pass\r
try:\r
- eval('GUI_PLUGINS.append(plugin.'+plugin_name+'Gui)') #take Gui plugin classes\r
- plugin_gui_namespaces.append(dir(eval('plugin.'+plugin_name+'Gui')))\r
+ #take Gui plugin classes\r
+ gui = getattr(plugin, plugin_name+'Gui')\r
+ GUI_PLUGINS.append(gui)\r
+ plugin_gui_namespaces.append(dir(gui))\r
except:\r
pass\r
except ImportError:\r
print 'Imported plugin ',plugin_name\r
\r
#eliminate names common to all namespaces\r
- for i in range(len(plugin_commands_namespaces)):\r
- plugin_commands_namespaces[i]=[item for item in plugin_commands_namespaces[i] if (item != '__doc__' and item != '__module__' and item != '_plug_init')]\r
+ for i,namespace in enumerate(plugin_commands_namespaces):\r
+ plugin_commands_namespaces[i] = \\r
+ filter(lambda item : not (item.startswith('__')\r
+ or item == '_plug_init'),\r
+ namespace)\r
#check for conflicts in namespaces between plugins\r
#FIXME: only in commands now, because I don't have Gui plugins to check\r
#FIXME: how to check for plugin-defined variables (self.stuff) ??\r
for item in namespace:\r
if item in plugin_commands_names:\r
i=plugin_commands_names.index(item) #we exploit the fact index gives the *first* occurrence of a name...\r
- print 'Error. Plugin ',plugin_name,' defines a function already defined by ',whatplugin_defines[i],'!'\r
+ print 'Error. Plugin %s defines a function %s already defined by %s!' \\r
+ % (plugin_name, item, whatplugin_defines[i])\r
print 'This should not happen. Please disable one or both plugins and contact the plugin authors to solve the conflict.'\r
print 'Hooke cannot continue.'\r
exit()\r
LOADED_DRIVERS=[]\r
for driver_name in config['drivers']:\r
try:\r
- driver=__import__(driver_name)\r
+ hooke_module=__import__('hooke.driver.'+driver_name)\r
+ driver = getattr(hooke_module.driver, driver_name)\r
try:\r
- eval('FILE_DRIVERS.append(driver.'+driver_name+'Driver)')\r
+ FILE_DRIVERS.append(getattr(driver, driver_name+'Driver'))\r
except:\r
pass\r
except ImportError:\r
self.figures=[control.get_figure() for control in self.controls]\r
self.axes=[figure.gca() for figure in self.figures]\r
\r
+ for i in range(len(self.axes)):\r
+ self.axes[i].xaxis.set_major_formatter(EngrFormatter())\r
+ self.axes[i].yaxis.set_major_formatter(EngrFormatter(2))\r
+\r
+\r
self.cpanels[1].Hide()\r
self.mainpanel.splitter.Initialize(self.cpanels[0])\r
\r
#make sure we execute _plug_init() for every command line plugin we import\r
for plugin_name in config['plugins']:\r
try:\r
- plugin=__import__(plugin_name)\r
+ hooke_module=__import__('hooke.plugin.'+plugin_name)\r
+ plugin = getattr(hooke_module.plugin, plugin_name)\r
try:\r
eval('plugin.'+plugin_name+'Gui._plug_init(self)')\r
pass\r
ylim=self.axes[dest].get_ylim() \r
self.axes[dest].set_ylim((ylim[1],ylim[0])) \r
\r
+ for i in range(len(self.axes)):\r
+ self.axes[i].xaxis.set_major_formatter(EngrFormatter())\r
+ self.axes[i].yaxis.set_major_formatter(EngrFormatter(2))\r
+\r
+\r
self.controls[dest].draw()\r
\r
\r
'''\r
if dest==None:\r
dest=self.current_plot_dest\r
-\r
- plot=None\r
- for aplot in self.plots:\r
- if aplot.destination == dest:\r
- plot=aplot\r
- return plot\r
+ try:\r
+ plot=None\r
+ for aplot in self.plots:\r
+ if aplot.destination == dest:\r
+ plot=aplot\r
+ return plot\r
+ except:\r
+ print "No curve available"\r
+ return None\r
\r
def _replot(self):\r
'''\r
# This is a recipe to a the screen.\r
# Modify the following variables as necessary.\r
#aBitmap = wx.Image(name = "wxPyWiki.jpg").ConvertToBitmap()\r
- aBitmap=wx.Image(name='hooke.jpg').ConvertToBitmap()\r
+ aBitmap=wx.Image(name=os.path.join(\r
+ config['install']['docpath'],\r
+ 'hooke.jpg')).ConvertToBitmap()\r
splashStyle = wx.SPLASH_CENTRE_ON_SCREEN | wx.SPLASH_TIMEOUT\r
splashDuration = 2000 # milliseconds\r
splashCallback = None\r
#------------------------------------------------------------------------------\r
\r
def main():\r
- \r
- #save the directory where Hooke is located\r
- config['hookedir']=os.getcwd()\r
- \r
- #now change to the working directory.\r
- try:\r
- os.chdir(config['workdir'])\r
- except OSError:\r
- print "Warning: Invalid work directory."\r
- \r
app=wx.PySimpleApp()\r
\r
def make_gui_class(*bases):\r
my_cmdline=CliThread(main_frame, list_of_events)\r
my_cmdline.start()\r
\r
- \r
app.MainLoop()\r
\r
- main()\r
+ if __name__ == '__main__':\r
+ main()\r
#!/usr/bin/env python
+# -*- coding: utf-8 -*-
'''
hooke_cli.py
This program is released under the GNU General Public License version 2.
'''
-
- from libhooke import * #FIXME
- import libhookecurve as lhc
-
- import libinput as linp
- import liboutlet as lout
-
- from libhooke import WX_GOOD
- from libhooke import HOOKE_VERSION
+ from .libhooke import HOOKE_VERSION, WX_GOOD
import wxversion
wxversion.select(WX_GOOD)
from sys import version as python_version
import platform
+ from .libhooke import PlaylistXML
+ from . import libhookecurve as lhc
+ from . import libinput as linp
+ from . import liboutlet as lout
+
+
+ class HookeCli(cmd.Cmd, object):
- class HookeCli(cmd.Cmd):
-
def __init__(self,frame,list_of_events,events_from_gui,config,drivers):
cmd.Cmd.__init__(self)
-
+
self.prompt = 'hooke: '
-
-
self.current_list=[] #the playlist we're using
-
- self.current=None #the current curve under analysis.
+ self.current=None #the current curve under analysis.
self.plots=None
'''
The actual hierarchy of the "current curve" is a bit complex:
-
+
self.current = the lhc.HookeCurve container object of the current curve
self.current.curve = the current "real" curve object as defined in the filetype driver class
self.current.curve.default_plots() = the default plots of the filetype driver.
-
- The plot objects obtained by mean of self.current.curve.default_plots()
+
+ The plot objects obtained by mean of self.current.curve.default_plots()
then undergoes modifications by the plotmanip
- modifier functions. The modified plot is saved in self.plots and used if needed by other functions.
+ modifier functions. The modified plot is saved in self.plots and used if needed by other functions.
'''
-
-
self.pointer=0 #a pointer to navigate the current list
-
+
#Things that come from outside
self.frame=frame #the wx frame we refer to
self.list_of_events=list_of_events #a list of wx events we use to interact with the GUI
self.events_from_gui=events_from_gui #the Queue object we use to have messages from the GUI
self.config=config #the configuration dictionary
self.drivers=drivers #the file format drivers
-
+
#get plot manipulation functions
plotmanip_functions=[]
for object_name in dir(self):
self.plotmanip[nameindex] = item
else:
pass
-
-
+
+
self.playlist_saved=0 #Did we save the playlist?
self.playlist_name='' #Name of playlist
self.notes_saved=1 #Did we save the notes?
#create outlet
self.outlet=lout.Outlet()
-
+
#Data that must be saved in the playlist, related to the whole playlist (not individual curves)
- self.playlist_generics={}
-
+ self.playlist_generics={}
+
#make sure we execute _plug_init() for every command line plugin we import
for plugin_name in self.config['plugins']:
try:
#load default list, if possible
self.do_loadlist(self.config['defaultlist'])
-
+
#HELPER FUNCTIONS
#Everything sending an event should be here
def _measure_N_points(self, N, whatset=1):
except Empty:
pass
return points
-
+
def _get_displayed_plot(self,dest=0):
'''
returns the currently displayed plot.
if displayed_plot:
break
return displayed_plot
-
+
def _send_plot(self,plots):
'''
sends a plot to the GUI
'''
wx.PostEvent(self.frame, self.list_of_events['plot_graph'](plots=plots))
return
-
+
def _find_plotmanip(self, name):
'''
returns a plot manipulator function from its name
'''
return self.plotmanip[self.config['plotmanips'].index(name)]
-
+
def _clickize(self, xvector, yvector, index):
'''
- returns a ClickedPoint() object from an index and vectors of x, y coordinates
+ returns a ClickedPoint() object from an index and vectors of x, y coordinates
'''
point=ClickedPoint()
point.index=index
point.absolute_coords=xvector[index],yvector[index]
point.find_graph_coords(xvector,yvector)
return point
-
+
#HERE COMMANDS BEGIN
-
+
def help_set(self):
print '''
SET
value=None
else:
value=args[1]
-
+
self.config[key]=value
self.do_plot(0)
-
+
#PLAYLIST MANAGEMENT AND NAVIGATION
#------------------------------------
-
+
def help_loadlist(self):
print '''
LOADLIST
#checking for args: if nothing is given as input, we warn and exit.
while len(args)==0:
args=linp.safeinput('File to load?')
-
+
arglist=args.split()
play_to_load=arglist[0]
-
+
#We assume a Hooke playlist has the extension .hkp
if play_to_load[-4:] != '.hkp':
play_to_load+='.hkp'
-
- try:
+
+ try:
playxml=PlaylistXML()
self.current_list, self.playlist_generics=playxml.load(play_to_load)
self.current_playxml=playxml
except IOError:
- print 'File not found.'
+ print 'File not found.', play_to_load
return
-
- print 'Loaded %s curves' %len(self.current_list)
-
+
+ print 'Loaded %s curves from %s' \
+ % (len(self.current_list), play_to_load)
+
if 'pointer' in self.playlist_generics.keys():
self.pointer=int(self.playlist_generics['pointer'])
else:
#if no pointer is found, set the current curve as the first curve of the loaded playlist
self.pointer=0
print 'Starting at curve ',self.pointer
-
+
self.current=self.current_list[self.pointer]
-
+
#resets saved/notes saved state
self.playlist_saved=0
self.playlist_name=''
- self.notes_saved=0
-
+ self.notes_saved=0
+
self.do_plot(0)
-
-
+
+
def help_genlist(self):
print '''
GENLIST
are all equivalent syntax.
------------
Syntax: genlist [input files]
-
+
'''
def do_genlist(self,args):
#args list is: input path, output name
if len(args)==0:
args=linp.safeinput('Input files?')
-
- arglist=args.split()
+
+ arglist=args.split()
list_path=arglist[0]
-
+
#if it's a directory, is like /directory/*.*
#FIXME: probably a bit kludgy.
- if os.path.isdir(list_path):
+ if os.path.isdir(list_path):
if platform.system == 'Windows':
SLASH="\\"
else:
SLASH="/"
if list_path[-1] == SLASH:
- list_path=list_path+'*.*'
- else:
- list_path=list_path+SLASH+'*.*'
-
- #expanding correctly the input list with the glob module :)
+ list_path=list_path+'*'
+ else:
+ list_path=list_path+SLASH+'*'
+
+ #expanding correctly the input list with the glob module :)
list_files=glob.glob(list_path)
list_files.sort()
for item in list_files:
try:
if os.path.isfile(item):
- self.current_list.append(lhc.HookeCurve(os.path.abspath(item)))
+ self.current_list.append(lhc.HookeCurve(os.path.abspath(item)))
except:
pass
-
- self.pointer=0
+
+ self.pointer=0
if len(self.current_list)>0:
self.current=self.current_list[self.pointer]
else:
print 'Empty list!'
return
-
+
#resets saved/notes saved state
self.playlist_saved=0
self.playlist_name=''
- self.notes_saved=0
-
+ self.notes_saved=0
+
self.do_plot(0)
-
-
+
+
def do_savelist(self,args):
'''
SAVELIST
'''
while len(args)==0:
args=linp.safeinput('Output file?',['savedlist.txt'])
-
+
output_filename=args
-
+
self.playlist_generics['pointer']=self.pointer
-
+
#autocomplete filename if not specified
if output_filename[-4:] != '.hkp':
output_filename+='.hkp'
-
+
playxml=PlaylistXML()
playxml.export(self.current_list, self.playlist_generics)
- playxml.save(output_filename)
-
+ playxml.save(output_filename)
+
#remembers we have saved playlist
self.playlist_saved=1
-
+
def help_addtolist(self):
print '''
ADDTOLIST
print 'You must give the input filename you want to add'
self.help_addtolist()
return
-
+
filenames=glob.glob(args)
-
+
for filename in filenames:
self.current_list.append(lhc.HookeCurve(os.path.abspath(filename)))
#we need to save playlist
self.playlist_saved=0
-
+
def help_printlist(self):
print '''
PRINTLIST
def do_printlist(self,args):
for item in self.current_list:
print item.path
-
-
+
+
def help_jump(self):
print '''
JUMP
Syntax: jump {$curve}
If the curve is not in the current playlist, it politely asks if we want to add it.
- '''
+ '''
def do_jump(self,filename):
'''
jumps to the curve with the given filename.
if the filename is not in the playlist, it asks if we must add it or not.
'''
-
+
if filename=='':
filename=linp.safeinput('Jump to?')
-
+
filepath=os.path.abspath(filename)
print filepath
-
+
c=0
item_not_found=1
while item_not_found:
try:
-
+
if self.current_list[c].path == filepath:
self.pointer=c
self.current=self.current_list[self.pointer]
item_not_found=0
self.do_plot(0)
else:
- c+=1
+ c+=1
except IndexError:
#We've found the end of the list.
answer=linp.safeinput('Curve not found in playlist. Add it to list?',['y'])
self.current=self.current_list[-1]
self.pointer=(len(current_list)-1)
self.do_plot(0)
-
+
item_not_found=0
-
-
+
+
def do_index(self,args):
'''
INDEX
-----
Syntax: index
'''
- print self.pointer+1, 'of', len(self.current_list)
-
-
+ print self.pointer+1, 'of', len(self.current_list)
+
+
def help_next(self):
print '''
NEXT
print 'No curve file loaded, currently!'
print 'This should not happen, report to http://code.google.com/p/hooke'
return
-
+
if self.pointer == (len(self.current_list)-1):
self.pointer=0
print 'Playlist finished; back to first curve.'
else:
self.pointer+=1
-
+
self.current=self.current_list[self.pointer]
self.do_plot(0)
-
-
+
+
def help_n(self):
self.help_next()
def do_n(self,args):
self.do_next(args)
-
+
def help_previous(self,args):
print '''
PREVIOUS
return
if self.pointer == 0:
self.pointer=(len(self.current_list)-1)
- print 'Start of playlist; jump to last curve.'
+ print 'Start of playlist; jump to last curve.'
else:
self.pointer-=1
-
+
self.current=self.current_list[self.pointer]
self.do_plot(args)
-
-
+
+
def help_p(self):
self.help_previous()
def do_p(self,args):
self.do_previous(args)
-
+
#PLOT INTERACTION COMMANDS
- #-------------------------------
+ #-------------------------------
def help_plot(self):
print '''
PLOT
Syntax: plot
'''
def do_plot(self,args):
-
- self.current.identify(self.drivers)
+ if self.current.identify(self.drivers) == False:
+ return
self.plots=self.current.curve.default_plots()
try:
self.plots=self.current.curve.default_plots()
print 'Unexpected error occurred in do_plot().'
print e
return
-
+
#apply the plotmanip functions eventually present
nplots=len(self.plots)
c=0
while c<nplots:
for function in self.plotmanip: #FIXME: something strange happens about self.plotmanip[0]
self.plots[c]=function(self.plots[c], self.current)
-
+
self.plots[c].xaxes=self.config['xaxes'] #FIXME: in the future, xaxes and yaxes should be set per-plot
self.plots[c].yaxes=self.config['yaxes']
-
+
c+=1
self._send_plot(self.plots)
-
+
def _delta(self, set=1):
'''
calculates the difference between two clicked points
unitx=self.plots[points[0].dest].units[0]
unity=self.plots[points[0].dest].units[1]
return dx,unitx,dy,unity
-
+
def do_delta(self,args):
'''
DELTA
-
+
Measures the delta X and delta Y between two points.
----
Syntax: delta
dx,unitx,dy,unity=self._delta()
print str(dx)+' '+unitx
print str(dy)+' '+unity
-
+
def _point(self, set=1):
'''calculates the coordinates of a single clicked point'''
print 'Click one point'
point=self._measure_N_points(N=1, whatset=set)
-
+
x=point[0].graph_coords[0]
y=point[0].graph_coords[1]
unitx=self.plots[point[0].dest].units[0]
unity=self.plots[point[0].dest].units[1]
return x,unitx,y,unity
-
+
def do_point(self,args):
'''
POINT
-
+
Returns the coordinates of a point on the graph.
----
Syntax: point
print str(x)+' '+unitx
print str(y)+' '+unity
to_dump='point '+self.current.path+' '+str(x)+' '+unitx+', '+str(y)+' '+unity
- self.outlet.push(to_dump)
-
-
+ self.outlet.push(to_dump)
+
+
def do_close(self,args=None):
'''
CLOSE
to_close=1
else:
to_close=1
-
+
close_plot=self.list_of_events['close_plot']
wx.PostEvent(self.frame, close_plot(to_close=to_close))
-
+
def do_show(self,args=None):
'''
SHOW
Shows both plots.
- '''
+ '''
show_plots=self.list_of_events['show_plots']
wx.PostEvent(self.frame, show_plots())
-
-
-
+
+
+
#PLOT EXPORT AND MANIPULATION COMMANDS
def help_export(self):
print '''
'''
def do_export(self,args):
#FIXME: the bottom plot doesn't have the title
-
+
dest=0
-
+
if len(args)==0:
#FIXME: We have to go into the libinput stuff and fix it, for now here's a dummy replacement...
#name=linp.safeinput('Filename?',[self.current.path+'.png'])
args=args.split()
name=args[0]
if len(args) > 1:
- dest=int(args[1])
-
+ dest=int(args[1])
+
export_image=self.list_of_events['export_image']
wx.PostEvent(self.frame, export_image(name=name, dest=dest))
-
-
+
+
def help_txt(self):
print '''
TXT
X1 , Y1 , X2 , Y2 , X3 , Y3 ...
-------------
- Syntax: txt [filename] {plot to export} or
- txt [filename] all
- all : To save all the curves in different windows in a single file.
+ Syntax: txt [filename] {plot to export}
'''
def do_txt(self,args):
-
+
def transposed2(lists, defval=0):
'''
transposes a list of lists, i.e. from [[a,b,c],[x,y,z]] to [[a,x],[b,y],[c,z]] without losing
'''
if not lists: return []
return map(lambda *row: [elem or defval for elem in row], *lists)
-
+
whichplot=0
args=args.split()
if len(args)==0:
else:
filename=linp.checkalphainput(args[0],self.current.path+'.txt',[])
try:
- if args[1]=="all":
- whichplot="all"
- else:
- whichplot=int(args[1])
+ whichplot=int(args[1])
except:
pass
-
- if whichplot!="all":
- try:
- outofplot=self.plots[whichplot].vectors
- except:
- print "Plot index out of range."
- return 0
- columns=[]
- for dataset in self.plots[whichplot].vectors:
- for i in range(0,len(dataset)):
- columns.append([])
- for value in dataset[i]:
- #columns[-1].append(str(value*(10**9)))
- columns[-1].append(str(value))
- rows=transposed2(columns, 'nan')
- rows=[' , '.join(item) for item in rows]
- text='\n'.join(rows)
-
- txtfile=open(filename,'w+')
- #Save units of measure in header
- txtfile.write('X:'+self.plots[whichplot].units[0]+'\n')
- txtfile.write('Y:'+self.plots[whichplot].units[1]+'\n')
- txtfile.write(text)
- txtfile.close()
- else:
- columns=[]
- for wp in range(len(self.plots)):
- for dataset in self.plots[wp].vectors:
- for i in range(0,len(dataset)):
- columns.append([])
- for value in dataset[i]:
- #columns[-1].append(str(value*(10**9)))
- columns[-1].append(str(value))
- rows=transposed2(columns, 'nan')
- rows=[' , '.join(item) for item in rows]
- text='\n'.join(rows)
-
- txtfile=open(filename,'w+')
- #Save units of measure in header
- for i in range(len(self.plots)):
- txtfile.write('X:'+self.plots[i].units[0]+'\n')
- txtfile.write('Y:'+self.plots[i].units[1]+'\n')
- txtfile.write(text)
- txtfile.close()
-
-
- columns=[]
++ try:
++ outofplot=self.plots[whichplot].vectors
++ except:
++ print "Plot index out of range."
++ return 0
++
++ columns=[]
+ for dataset in self.plots[whichplot].vectors:
+ for i in range(0,len(dataset)):
+ columns.append([])
+ for value in dataset[i]:
+ columns[-1].append(str(value))
+
+ rows=transposed2(columns, 'nan')
+ rows=[' , '.join(item) for item in rows]
+ text='\n'.join(rows)
+
+ txtfile=open(filename,'w+')
+ #Save units of measure in header
+ txtfile.write('X:'+self.plots[whichplot].units[0]+'\n')
+ txtfile.write('Y:'+self.plots[whichplot].units[1]+'\n')
+ txtfile.write(text)
+ txtfile.close()
+
+
#LOGGING, REPORTING, NOTETAKING
-
+
def do_note_old(self,args):
'''
NOTE_OLD
**deprecated**: Use note instead. Will be removed in 0.9
-
+
Writes or displays a note about the current curve.
If [anything] is empty, it displays the note, otherwise it adds a note.
The note is then saved in the playlist if you issue a savelist command
---------------
- Syntax: note_old [anything]
+ Syntax: note_old [anything]
'''
if args=='':
args=args.decode('ascii','ignore')
if len(args)==0:
args='?'
-
+
self.current_list[self.pointer].notes=args
self.notes_saved=0
-
-
+
+
def do_note(self,args):
'''
NOTE
-
+
Writes or displays a note about the current curve.
If [anything] is empty, it displays the note, otherwise it adds a note.
The note is then saved in the playlist if you issue a savelist command.
---------------
- Syntax: note_old [anything]
+ Syntax: note_old [anything]
'''
if args=='':
print self.current_list[self.pointer].notes
else:
if self.notes_filename == None:
+ if not os.path.exists(os.path.realpath('output')):
+ os.mkdir('output')
self.notes_filename=raw_input('Notebook filename? ')
+ self.notes_filename=os.path.join(os.path.realpath('output'),self.notes_filename)
title_line='Notes taken at '+time.asctime()+'\n'
- f=open(self.notes_filename,'w')
+ f=open(self.notes_filename,'a')
f.write(title_line)
f.close()
-
- #bypass UnicodeDecodeError troubles
+
+ #bypass UnicodeDecodeError troubles
try:
args=args.decode('ascii')
except:
if len(args)==0:
args='?'
self.current_list[self.pointer].notes=args
-
+
f=open(self.notes_filename,'a+')
note_string=(self.current.path+' | '+self.current.notes+'\n')
f.write(note_string)
f.close()
-
+
def help_notelog(self):
print '''
NOTELOG
Writes a log of the notes taken during the session for the current
playlist
- --------------
+ --------------
Syntax notelog [filename]
- '''
+ '''
def do_notelog(self,args):
-
+
if len(args)==0:
args=linp.safeinput('Notelog filename?',['notelog.txt'])
-
+
note_lines='Notes taken at '+time.asctime()+'\n'
for item in self.current_list:
if len(item.notes)>0:
#FIXME: file path should be truncated...
note_string=(item.path+' | '+item.notes+'\n')
note_lines+=note_string
-
+
try:
f=open(args,'a+')
f.write(note_lines)
except IOError, (ErrorNumber, ErrorMessage):
print 'Error: notes cannot be saved. Catched exception:'
print ErrorMessage
-
+
self.notes_saved=1
def help_copylog(self):
Syntax copylog [directory]
'''
def do_copylog(self,args):
-
+
if len(args)==0:
args=linp.safeinput('Destination directory?') #TODO default
-
+
mydir=os.path.abspath(args)
if not os.path.isdir(mydir):
print 'Destination is not a directory.'
return
-
+
for item in self.current_list:
if len(item.notes)>0:
try:
self.outlet.delete(args)
#OS INTERACTION COMMANDS
- #-----------------
+ #-----------------
def help_dir(self):
print '''
DIR, LS
ls [path]
'''
def do_dir(self,args):
-
+
if len(args)==0:
args='*'
print glob.glob(args)
-
+
def help_ls(self):
self.help_dir(self)
def do_ls(self,args):
self.do_dir(args)
-
+
def help_pwd(self):
print '''
PWD
Syntax: pwd
'''
def do_pwd(self,args):
- print os.getcwd()
-
+ print os.getcwd()
+
def help_cd(self):
print '''
CD
os.chdir(mypath)
except OSError:
print 'I cannot access that directory.'
-
-
+
+
def help_system(self):
print '''
SYSTEM
'''
pass
def do_system(self,args):
- waste=os.system(args)
-
+ waste=os.system(args)
+
def do_debug(self,args):
'''
this is a dummy command where I put debugging things
'''
print self.config['plotmanips']
pass
-
+
def help_current(self):
print '''
CURRENT
'''
def do_current(self,args):
print self.current.path
-
+
def do_info(self,args):
'''
INFO
for set in plot.vectors:
lengths=[len(item) for item in set]
print 'Data set size: ',lengths
-
+
def do_version(self,args):
'''
VERSION
------
Prints the current version and codename, plus library version. Useful for debugging.
- '''
+ '''
print 'Hooke '+__version__+' ('+__codename__+')'
print 'Released on: '+__releasedate__
print '---'
print 'Platform: '+str(platform.uname())
print '---'
print 'Loaded plugins:',self.config['loaded_plugins']
-
+
def help_exit(self):
print '''
EXIT, QUIT
------
Syntax: exit
Syntax: quit
- '''
+ '''
def do_exit(self,args):
we_exit='N'
-
+
if (not self.playlist_saved) or (not self.notes_saved):
we_exit=linp.safeinput('You did not save your playlist and/or notes. Exit?',['n'])
else:
we_exit=linp.safeinput('Exit?',['y'])
-
+
if we_exit[0].upper()=='Y':
wx.CallAfter(self.frame.Close)
sys.exit(0)
else:
return
-
+
def help_quit(self):
self.help_exit()
def do_quit(self,args):
self.do_exit(args)
-
-
-
if __name__ == '__main__':
mycli=HookeCli(0)
mycli.cmdloop()
#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
'''
libhooke.py
This program is released under the GNU General Public License version 2.
'''
-
-
- import libhookecurve as lhc
-
import scipy
- import scipy.signal
- import scipy.optimize
- import scipy.stats
import numpy
import xml.dom.minidom
import os
+ import os.path
import string
import csv
+from matplotlib.ticker import ScalarFormatter
+
+ from . import libhookecurve as lhc
+
HOOKE_VERSION=['0.8.3_devel', 'Seinei', '2008-04-16']
- WX_GOOD=['2.6','2.8']
-
- class PlaylistXML:
+ WX_GOOD=['2.6','2.8']
+
+ class PlaylistXML(object):
'''
This module allows for import/export of an XML playlist into/out of a list of HookeCurve objects
'''
-
+
def __init__(self):
-
+
self.playlist=None #the DOM object representing the playlist data structure
self.playpath=None #the path of the playlist XML file
self.plaything=None
self.hidden_attributes=['curve'] #This list contains hidden attributes that we don't want to go into the playlist.
-
+
def export(self, list_of_hooke_curves, generics):
'''
Creates an initial playlist from a list of files.
<element path="/my/file/path/"/ attribute="attribute">
<element path="...">
</playlist>
- '''
-
+ '''
+
#create the output playlist, a simple XML document
impl=xml.dom.minidom.getDOMImplementation()
#create the document DOM object and the root element
newdoc=impl.createDocument(None, "playlist",None)
top_element=newdoc.documentElement
-
+
#save generics variables
playlist_generics=newdoc.createElement("generics")
top_element.appendChild(playlist_generics)
for key in generics.keys():
newdoc.createAttribute(key)
playlist_generics.setAttribute(key,str(generics[key]))
-
+
#save curves and their attributes
for item in list_of_hooke_curves:
#playlist_element=newdoc.createElement("curve")
for key in item.__dict__:
if not (key in self.hidden_attributes):
newdoc.createAttribute(key)
- playlist_element.setAttribute(key,str(item.__dict__[key]))
-
+ playlist_element.setAttribute(key,str(item.__dict__[key]))
+
self.playlist=newdoc
-
+
def load(self,filename):
'''
loads a playlist file
'''
myplay=file(filename)
self.playpath=filename
-
+
#the following 3 lines are needed to strip newlines. otherwise, since newlines
#are XML elements too (why?), the parser would read them (and re-save them, multiplying
#newlines...)
the_file=myplay.read()
the_file_lines=the_file.split('\n')
the_file=''.join(the_file_lines)
-
- self.playlist=xml.dom.minidom.parseString(the_file)
-
+
+ self.playlist=xml.dom.minidom.parseString(the_file)
+
#inner parsing functions
def handlePlaylist(playlist):
list_of_files=playlist.getElementsByTagName("element")
generics=playlist.getElementsByTagName("generics")
return handleFiles(list_of_files), handleGenerics(generics)
-
+
def handleGenerics(generics):
generics_dict={}
if len(generics)==0:
return generics_dict
-
+
for attribute in generics[0].attributes.keys():
generics_dict[attribute]=generics[0].getAttribute(attribute)
return generics_dict
-
+
def handleFiles(list_of_files):
new_playlist=[]
for myfile in list_of_files:
#rebuild a data structure from the xml attributes
- the_curve=lhc.HookeCurve(myfile.getAttribute('path'))
- for attribute in myfile.attributes.keys(): #extract attributes for the single curve
+ the_curve=lhc.HookeCurve(
+ os.path.join(os.path.dirname(self.playpath),
+ myfile.getAttribute('path')))
+ for attribute in myfile.attributes.keys():
+ #extract attributes for the single curve
+ if attribute == 'path':
+ continue # we already added this attribute
the_curve.__dict__[attribute]=myfile.getAttribute(attribute)
new_playlist.append(the_curve)
-
+
return new_playlist #this is the true thing returned at the end of this function...(FIXME: clarity)
-
+
return handlePlaylist(self.playlist)
-
+
def save(self,output_filename):
'''
except IOError:
print 'libhooke.py : Cannot save playlist. Wrong path or filename'
return
-
+
self.playlist.writexml(outfile,indent='\n')
outfile.close()
+ def config_file_path(filename, config_dir=None):
+ if config_dir == None:
+ config_dir = os.path.abspath(
+ os.path.join(os.path.dirname(os.path.dirname(__file__)), 'conf'))
+ return os.path.join(config_dir, filename)
- class HookeConfig:
+ class HookeConfig(object):
'''
Handling of Hooke configuration file
-
+
Mostly based on the simple-yet-useful examples of the Python Library Reference
about xml.dom.minidom
-
+
FIXME: starting to look a mess, should require refactoring
'''
-
- def __init__(self):
+
+ def __init__(self, config_dir=None):
self.config={}
+ self.config['install']={}
self.config['plugins']=[]
self.config['drivers']=[]
self.config['plotmanips']=[]
-
+ self.config_dir = config_dir
+
def load_config(self, filename):
- myconfig=file(filename)
-
+ myconfig=file(config_file_path(filename, config_dir=self.config_dir))
+
#the following 3 lines are needed to strip newlines. otherwise, since newlines
#are XML elements too, the parser would read them (and re-save them, multiplying
#newlines...)
the_file=myconfig.read()
the_file_lines=the_file.split('\n')
the_file=''.join(the_file_lines)
-
- self.config_tree=xml.dom.minidom.parseString(the_file)
-
+
+ self.config_tree=xml.dom.minidom.parseString(the_file)
+
def getText(nodelist):
#take the text from a nodelist
#from Python Library Reference 13.7.2
if node.nodeType == node.TEXT_NODE:
rc += node.data
return rc
-
+
def handleConfig(config):
+ install_elements=config.getElementsByTagName("install")
display_elements=config.getElementsByTagName("display")
plugins_elements=config.getElementsByTagName("plugins")
drivers_elements=config.getElementsByTagName("drivers")
- workdir_elements=config.getElementsByTagName("workdir")
defaultlist_elements=config.getElementsByTagName("defaultlist")
plotmanip_elements=config.getElementsByTagName("plotmanips")
+ handleInstall(install_elements)
handleDisplay(display_elements)
handlePlugins(plugins_elements)
handleDrivers(drivers_elements)
- handleWorkdir(workdir_elements)
handleDefaultlist(defaultlist_elements)
handlePlotmanip(plotmanip_elements)
-
+
+ def handleInstall(install_elements):
+ for install in install_elements:
+ for node in install.childNodes:
+ if node.nodeType == node.TEXT_NODE:
+ continue
+ path = os.path.abspath(getText(node.childNodes).strip())
+ self.config['install'][str(node.tagName)] = path
+
def handleDisplay(display_elements):
for element in display_elements:
for attribute in element.attributes.keys():
self.config[attribute]=element.getAttribute(attribute)
-
+
def handlePlugins(plugins):
for plugin in plugins[0].childNodes:
try:
self.config['drivers'].append(str(driver.tagName))
except: #if we allow fancy formatting of xml, there is a text node, so tagName fails for it...
pass
-
+
def handlePlotmanip(plotmanips):
for plotmanip in plotmanips[0].childNodes:
try:
self.config['plotmanips'].append(str(plotmanip.tagName))
except: #if we allow fancy formatting of xml, there is a text node, so tagName fails for it...
pass
-
- def handleWorkdir(workdir):
- '''
- default working directory
- '''
- wdir=getText(workdir[0].childNodes)
- self.config['workdir']=wdir.strip()
-
+
def handleDefaultlist(defaultlist):
'''
default playlist
'''
dflist=getText(defaultlist[0].childNodes)
self.config['defaultlist']=dflist.strip()
-
+
handleConfig(self.config_tree)
#making items in the dictionary more machine-readable
for item in self.config.keys():
self.config[item]=None
else:
pass
-
+
return self.config
-
-
+
+
def save_config(self, config_filename):
print 'Not Implemented.'
- pass
+ pass
-class ClickedPoint(object):
+class EngrFormatter(ScalarFormatter):
+ """A variation of the standard ScalarFormatter, using only multiples of
+three
+in the mantissa. A fixed number of decimals can be displayed with the optional
+parameter `ndec` . If `ndec` is None (default), the number of decimals is
+defined
+from the current ticks.
+ """
+ def __init__(self, ndec=None, useOffset=True, useMathText=False):
+ ScalarFormatter.__init__(self, useOffset, useMathText)
+ if ndec is None or ndec < 0:
+ self.format = None
+ elif ndec == 0:
+ self.format = "%d"
+ else:
+ self.format = "%%1.%if" % ndec
+ #........................
+
+ def _set_orderOfMagnitude(self, mrange):
+ """Sets the order of magnitude."""
+ locs = numpy.absolute(self.locs)
+ if self.offset:
+ oom = numpy.floor(numpy.log10(mrange))
+ else:
+ if locs[0] > locs[-1]:
+ val = locs[0]
+ else:
+ val = locs[-1]
+ if val == 0:
+ oom = 0
+ else:
+ oom = numpy.floor(numpy.log10(val))
+ if oom <= -3:
+ self.orderOfMagnitude = 3*(oom//3)
+ elif oom <= -1:
+ self.orderOfMagnitude = -3
+ elif oom >= 4:
+ self.orderOfMagnitude = 3*(oom//3)
+ else:
+ self.orderOfMagnitude = 0
+
+
+ #........................
+ def _set_format(self):
+ """Sets the format string to format all ticklabels."""
+ # set the format string to format all the ticklabels
+ locs = (numpy.array(self.locs)-self.offset) / 10**self.orderOfMagnitude+1e-15
+ sigfigs = [len(str('%1.3f'% loc).split('.')[1].rstrip('0')) \
+ for loc in locs]
+ sigfigs.sort()
+ if self.format is None:
+ self.format = '%1.' + str(sigfigs[-1]) + 'f'
+ if self._usetex or self._useMathText: self.format = '$%s$'%self.format
+
+
+
+class ClickedPoint:
'''
this class defines what a clicked point on the curve plot is
'''
def __init__(self):
-
+
self.is_marker=None #boolean ; decides if it is a marker
self.is_line_edge=None #boolean ; decides if it is the edge of a line (unused)
self.absolute_coords=(None,None) #(float,float) ; the absolute coordinates of the clicked point on the graph
self.graph_coords=(None,None) #(float,float) ; the coordinates of the plot that are nearest in X to the clicked point
self.index=None #integer ; the index of the clicked point with respect to the vector selected
self.dest=None #0 or 1 ; 0=top plot 1=bottom plot
-
-
+
+
def find_graph_coords_old(self, xvector, yvector):
'''
Given a clicked point on the plot, finds the nearest point in the dataset (in X) that
corresponds to the clicked point.
OLD & DEPRECATED - to be removed
'''
-
+
#FIXME: a general algorithm using min() is needed!
- print '---DEPRECATED FIND_GRAPH_COORDS_OLD---'
+ #print '---DEPRECATED FIND_GRAPH_COORDS_OLD---'
best_index=0
best_dist=10**9 #should be more than enough given the scale
-
+
for index in scipy.arange(1,len(xvector),1):
dist=((self.absolute_coords[0]-xvector[index])**2)+(100*((self.absolute_coords[1]-yvector[index])))**2
#TODO, generalize? y coordinate is multiplied by 100 due to scale differences in the plot
if dist<best_dist:
best_index=index
best_dist=dist
-
+
self.index=best_index
self.graph_coords=(xvector[best_index],yvector[best_index])
return
-
+
def find_graph_coords(self,xvector,yvector):
'''
Given a clicked point on the plot, finds the nearest point in the dataset that
dists=[]
for index in scipy.arange(1,len(xvector),1):
dists.append(((self.absolute_coords[0]-xvector[index])**2)+((self.absolute_coords[1]-yvector[index])**2))
-
+
self.index=dists.index(min(dists))
self.graph_coords=(xvector[self.index],yvector[self.index])
#-----------------------------------------
- #CSV-HELPING FUNCTIONS
-
+ #CSV-HELPING FUNCTIONS
+
def transposed2(lists, defval=0):
'''
transposes a list of lists, i.e. from [[a,b,c],[x,y,z]] to [[a,x],[b,y],[c,z]] without losing
'''
if not lists: return []
return map(lambda *row: [elem or defval for elem in row], *lists)
-
+
def csv_write_dictionary(f, data, sorting='COLUMNS'):
'''
Writes a CSV file from a dictionary, with keys as first column or row
Keys are in "random" order.
-
+
Keys should be strings
Values should be lists or other iterables
'''
writer.writerow(keys)
for item in t_values:
writer.writerow(item)
-
+
if sorting=='ROWS':
print 'Not implemented!' #FIXME: implement it.
- #-----------------------------------------
-
+ #-----------------------------------------
+
def debug():
'''
debug stuff from latest rewrite of hooke_playlist.py
- #!/usr/bin/env python
# -*- coding: utf-8 -*-
- from libhooke import WX_GOOD, ClickedPoint
+ from hooke.libhooke import WX_GOOD, ClickedPoint
+
import wxversion
wxversion.select(WX_GOOD)
from wx import PostEvent
warnings.simplefilter('ignore',np.RankWarning)
- class autopeakCommands:
-
+ class autopeakCommands(object):
+
def do_autopeak(self,args):
'''
AUTOPEAK
- measures peak maximum forces with a baseline
- measures slope in proximity of peak maximum
Requires flatten plotmanipulator , fit.py plugin , flatfilts.py plugin with convfilt
-
+
Syntax:
- autopeak [rebase] [pl=value] [manual=value] [t=value] [noauto] [reclick]
-
+ autopeak [rebase] [pl=value] [t=value] [noauto] [reclick]
+
rebase : Re-asks baseline interval
-
- pl=[value] : Use a fixed persistent length for the fit. If pl is not given,
- the fit will be a 2-variable
+
+ pl=[value] : Use a fixed persistent length for the fit. If pl is not given,
+ the fit will be a 2-variable
fit. DO NOT put spaces between 'pl', '=' and the value.
- The value must be in meters.
+ The value must be in meters.
Scientific notation like 0.35e-9 is fine.
- manual=[value]: Allow to choose the peaks to analyze. It need, as a value, the number
- of the peaks to analyze. NOTE: It have to be used with the manual selection of the baseline.
-
t=[value] : Use a user-defined temperature. The value must be in
kelvins; by default it is 293 K.
DO NOT put spaces between 't', '=' and the value.
-
- noauto : allows for clicking the contact point by
+
+ noauto : allows for clicking the contact point by
hand (otherwise it is automatically estimated) the first time.
If subsequent measurements are made, the same contact point
clicked the first time is used
-
+
reclick : redefines by hand the contact point, if noauto has been used before
but the user is unsatisfied of the previously choosen contact point.
-
+
usepoints : fit interval by number of points instead than by nanometers
-
+
noflatten : does not use the "flatten" plot manipulator
-
+
When you first issue the command, it will ask for the filename. If you are giving the filename
of an existing file, autopeak will resume it and append measurements to it. If you are giving
a new filename, it will create the file and append to it until you close Hooke.
-
-
+
+
Useful variables (to set with SET command):
---
fit_function = type of function to use for elasticity. If "wlc" worm-like chain is used, if "fjc" freely jointed
chain is used
-
+
temperature= temperature of the system for wlc/fjc fit (in K)
-
+
auto_slope_span = number of points on which measure the slope, for slope
-
+
auto_fit_nm = number of nm to fit before the peak maximum, for WLC/FJC (if usepoints false)
auto_fit_points = number of points to fit before the peak maximum, for WLC/FJC (if usepoints true)
-
+
baseline_clicks = -1: no baseline, f=0 at the contact point (whether hand-picked or automatically found)
0: automatic baseline
1: decide baseline with a single click and length defined in auto_left_baseline
2: let user click points of baseline
auto_left_baseline = length in nm to use as baseline from the right point (if baseline_clicks=0 , 1)
auto_right_baseline = distance in nm of peak-most baseline point from last peak (if baseline_clicks = 0)
-
+
auto_min_p ; auto_max_p = Minimum and maximum persistence length (if using WLC) or Kuhn length (if using FJC)
outside of which the peak is automatically discarded (in nm)
'''
-
+
- #MACROS.
- #FIXME: to move outside function
- def fit_interval_nm(start_index,plot,nm,backwards):
- '''
- Calculates the number of points to fit, given a fit interval in nm
- start_index: index of point
- plot: plot to use
- backwards: if true, finds a point backwards.
- '''
- whatset=1 #FIXME: should be decidable
- x_vect=plot.vectors[1][0]
-
- c=0
- i=start_index
- start=x_vect[start_index]
- maxlen=len(x_vect)
- while abs(x_vect[i]-x_vect[start_index])*(10**9) < nm:
- if i==0 or i==maxlen-1: #we reached boundaries of vector!
- return c
-
- if backwards:
- i-=1
- else:
- i+=1
- c+=1
- return c
-
- def pickup_contact_point():
- '''macro to pick up the contact point by clicking'''
- contact_point=self._measure_N_points(N=1, whatset=1)[0]
- contact_point_index=contact_point.index
- self.wlccontact_point=contact_point
- self.wlccontact_index=contact_point.index
- self.wlccurrent=self.current.path
- return contact_point, contact_point_index
-
- def find_current_peaks(noflatten):
- #Find peaks.
- defplot=self.current.curve.default_plots()[0]
- if not noflatten:
- flatten=self._find_plotmanip('flatten') #Extract flatten plotmanip
- defplot=flatten(defplot, self.current, customvalue=1) #Flatten curve before feeding it to has_peaks
- peak_location,peak_size=self.has_peaks(defplot, self.convfilt_config['mindeviation'])
- return peak_location, peak_size
-
#default fit etc. variables
pl_value=None
T=self.config['temperature']
-
+
slope_span=int(self.config['auto_slope_span'])
delta_force=10
rebase=False #if true=we select rebase
noflatten=False #if true=we avoid flattening
-
+
#initialize output data vectors
c_lengths=[]
p_lengths=[]
sigma_p_lengths=[]
forces=[]
slopes=[]
- qstds=[]
- manualpoints=0
#pick up plot
displayed_plot=self._get_displayed_plot(0)
-
+
#COMMAND LINE PARSING
#--Using points instead of nm interval
if 'usepoints' in args.split():
usepoints=False
#--Recalculate baseline
if 'rebase' in args or (self.basecurrent != self.current.path):
- rebase=True
-
+ rebase=True
+
if 'noflatten' in args:
noflatten=True
-
+
#--Custom persistent length / custom temperature
for arg in args.split():
#look for a persistent length argument.
pl_expression=arg.split('=')
pl_value=float(pl_expression[1]) #actual value
#look for a T argument. FIXME: spaces are not allowed between 'pl' and value
- if ('t=' in arg[0:3]) or ('T=' in arg[0:3]):
+ if ('t=' in arg[0:2]) or ('T=' in arg[0:2]):
t_expression=arg.split('=')
T=float(t_expression[1])
- if ('manual=' in arg):
- print "Manual selection of the interesting peaks."
- manualpoints_expression=arg.split('=')
- manualpoints=int(manualpoints_expression[1])
#--Contact point arguments
if 'reclick' in args.split():
print 'Click contact point'
- contact_point, contact_point_index = pickup_contact_point()
+ contact_point, contact_point_index = self.pickup_contact_point()
elif 'noauto' in args.split():
if self.wlccontact_index==None or self.wlccurrent != self.current.path:
print 'Click contact point'
- contact_point , contact_point_index = pickup_contact_point()
+ contact_point , contact_point_index = self.pickup_contact_point()
else:
contact_point=self.wlccontact_point
contact_point_index=self.wlccontact_index
cindex=self.find_contact_point()
contact_point=self._clickize(displayed_plot.vectors[1][0], displayed_plot.vectors[1][1], cindex)
#--END COMMAND LINE PARSING--
-
-
- peak_location, peak_size = find_current_peaks(noflatten)
-
+
+
-
- if not manualpoints:
- peak_location, peak_size = self.find_current_peaks(noflatten)
- else:
- peak_location=[]
- for i in range(manualpoints):
- print "Select manually the peaks:"
- peak_location.append( (self._measure_N_points(N=1, whatset=1)[0]).index )
++ peak_location, peak_size = self.find_current_peaks(noflatten)
+
if len(peak_location) == 0:
print 'No peaks to fit.'
return
-
+
fitplot=copy.deepcopy(displayed_plot)
-
+
#Pick up force baseline
if rebase:
- clicks=self.config['baseline_clicks']
- if clicks==0:
- self.basepoints=[]
- base_index_0=peak_location[-1]+fit_interval_nm(peak_location[-1], displayed_plot, self.config['auto_right_baseline'],False)
- self.basepoints.append(self._clickize(displayed_plot.vectors[1][0],displayed_plot.vectors[1][1],base_index_0))
- base_index_1=self.basepoints[0].index+fit_interval_nm(self.basepoints[0].index, displayed_plot, self.config['auto_left_baseline'],False)
- self.basepoints.append(self._clickize(displayed_plot.vectors[1][0],displayed_plot.vectors[1][1],base_index_1))
- elif clicks>0:
- print 'Select baseline'
- if clicks==1:
- self.basepoints=self._measure_N_points(N=1, whatset=whatset)
- base_index_1=self.basepoints[0].index+fit_interval_nm(self.basepoints[0].index, displayed_plot, self.config['auto_left_baseline'], False)
- self.basepoints.append(self._clickize(displayed_plot.vectors[1][0],displayed_plot.vectors[1][1],base_index_1))
- else:
- self.basepoints=self._measure_N_points(N=2, whatset=whatset)
-
- self.basecurrent=self.current.path
-
+ self.basepoints=self.baseline_points(peak_location, displayed_plot)
+
boundaries=[self.basepoints[0].index, self.basepoints[1].index]
boundaries.sort()
to_average=displayed_plot.vectors[1][1][boundaries[0]:boundaries[1]] #y points to average
avg=np.mean(to_average)
-
+
clicks=self.config['baseline_clicks']
if clicks==-1:
try:
avg=displayed_plot.vectors[1][1][contact_point_index]
except:
avg=displayed_plot.vectors[1][1][cindex]
-
+
for peak in peak_location:
#WLC FITTING
#define fit interval
if not usepoints:
- fit_points=fit_interval_nm(peak, displayed_plot, self.config['auto_fit_nm'], True)
+ fit_points=self.fit_interval_nm(peak, displayed_plot, self.config['auto_fit_nm'], True)
peak_point=self._clickize(displayed_plot.vectors[1][0],displayed_plot.vectors[1][1],peak)
other_fit_point=self._clickize(displayed_plot.vectors[1][0],displayed_plot.vectors[1][1],peak-fit_points)
-
+
#points for the fit
points=[contact_point, peak_point, other_fit_point]
-
+
if abs(peak_point.index-other_fit_point.index) < 2:
continue
-
+
if self.config['fit_function']=='wlc':
-
- params, yfit, xfit, fit_errors, qstd = self.wlc_fit(points, displayed_plot.vectors[1][0], displayed_plot.vectors[1][1], pl_value, T, return_errors=True)
+
+ params, yfit, xfit, fit_errors = self.wlc_fit(points, displayed_plot.vectors[1][0], displayed_plot.vectors[1][1], pl_value, T, return_errors=True)
elif self.config['fit_function']=='fjc':
- params, yfit, xfit, fit_errors, qstd = self.fjc_fit(points, displayed_plot.vectors[1][0], displayed_plot.vectors[1][1], pl_value, T, return_errors=True)
+ params, yfit, xfit, fit_errors = self.fjc_fit(points, displayed_plot.vectors[1][0], displayed_plot.vectors[1][1], pl_value, T, return_errors=True)
else:
print 'Unknown fit function'
print 'Please set fit_function as wlc or fjc'
return
-
-
+
+
#Measure forces
delta_to_measure=displayed_plot.vectors[1][1][peak-delta_force:peak+delta_force]
y=min(delta_to_measure)
- #save force values (pN)
+ #save force values (pN)
#Measure slopes
slope=self.linefit_between(peak-slope_span,peak)[0]
-
-
+
+
#check fitted data and, if right, add peak to the measurement
#FIXME: code duplication
if len(params)==1: #if we did choose 1-value fit
sigma_c_lengths.append(fit_errors[0]*(1.0e+9))
forces.append(abs(y-avg)*(1.0e+12))
slopes.append(slope)
- qstds.append(qstd)
#Add WLC fit lines to plot
fitplot.add_set(xfit,yfit)
if len(fitplot.styles)==0:
p_leng=params[1]*(1.0e+9)
#check if persistent length makes sense. otherwise, discard peak.
if p_leng>self.config['auto_min_p'] and p_leng<self.config['auto_max_p']:
- p_lengths.append(p_leng)
+ p_lengths.append(p_leng)
c_lengths.append(params[0]*(1.0e+9))
sigma_c_lengths.append(fit_errors[0]*(1.0e+9))
sigma_p_lengths.append(fit_errors[1]*(1.0e+9))
forces.append(abs(y-avg)*(1.0e+12))
slopes.append(slope)
- qstds.append(qstd)
-
+
#Add WLC fit lines to plot
fitplot.add_set(xfit,yfit)
if len(fitplot.styles)==0:
fitplot.colors.append(None)
else:
pass
-
-
+
+
#add basepoints to fitplot
- fitplot.add_set([self.basepoints[0].graph_coords[0],self.basepoints[1].graph_coords[0]],[self.basepoints[0].graph_coords[1],self.basepoints[1].graph_coords[1]])
+ fitplot.add_set([self.basepoints[0].graph_coords[0],self.basepoints[1].graph_coords[0]],[self.basepoints[0].graph_coords[1],self.basepoints[1].graph_coords[1]])
fitplot.styles.append('scatter')
fitplot.colors.append(None)
-
+
#Show wlc fits and peak locations
self._send_plot([fitplot])
- #self.do_peaks('')
-
+
print 'Using fit function: ',self.config['fit_function']
print 'Measurements for all peaks detected:'
- print 'contour (nm)', self.print_prec(c_lengths,1)
- print 'sigma contour (nm)',self.print_prec(sigma_c_lengths,2)
- print 'p (nm)',self.print_prec(p_lengths,3)
- print 'sigma p (nm)',self.print_prec(sigma_p_lengths,3)
- print 'forces (pN)',self.print_prec(forces,1)
- print 'slopes (N/m)',self.print_prec(slopes,3)
-
+ print 'contour (nm)', c_lengths
+ print 'sigma contour (nm)',sigma_c_lengths
+ print 'p (nm)',p_lengths
+ print 'sigma p (nm)',sigma_p_lengths
+ print 'forces (pN)',forces
+ print 'slopes (N/m)',slopes
+
controller=False
while controller==False:
#Ask the user what peaks to ignore from analysis.
print 'Bad input.'
controller=False
- #Clean data vectors from ignored peaks
+ #Clean data vectors from ignored peaks
#FIXME:code duplication
c_lengths=[item for item in c_lengths if item != None]
p_lengths=[item for item in p_lengths if item != None]
forces=[item for item in forces if item != None]
- slopes=[item for item in slopes if item != None]
- sigma_c_lengths=[item for item in sigma_c_lengths if item != None]
- sigma_p_lengths=[item for item in sigma_p_lengths if item != None]
-
+ slopes=[item for item in slopes if item != None]
+ sigma_c_lengths=[item for item in sigma_c_lengths if item != None]
+ sigma_p_lengths=[item for item in sigma_p_lengths if item != None]
+
print 'Measurements for chosen peaks:'
- print 'contour (nm)', self.print_prec(c_lengths,1)
- print 'sigma contour (nm)',self.print_prec(sigma_c_lengths,2)
- print 'p (nm)',self.print_prec(p_lengths,3)
- print 'sigma p (nm)',self.print_prec(sigma_p_lengths,3)
- print 'forces (pN)',self.print_prec(forces,1)
- print 'slopes (N/m)',self.print_prec(slopes,3)
-
+ print 'contour (nm)',c_lengths
+ print 'sigma contour (nm)',sigma_c_lengths
+ print 'p (nm)',p_lengths
+ print 'sigma p (nm)',sigma_p_lengths
+ print 'forces (pN)',forces
+ print 'slopes (N/m)',slopes
+
#Save file info
if self.autofile=='':
self.autofile=raw_input('Autopeak filename? (return to ignore) ')
if self.autofile=='':
print 'Not saved.'
return
-
+
if not os.path.exists(self.autofile):
f=open(self.autofile,'w+')
f.write('Analysis started '+time.asctime()+'\n')
f.write('----------------------------------------\n')
f.write('; Contour length (nm) ; Persistence length (nm) ; Max.Force (pN) ; Slope (N/m) ; Sigma contour (nm) ; Sigma persistence (nm)\n')
f.close()
-
+
print 'Saving...'
f=open(self.autofile,'a+')
-
+
f.write(self.current.path+'\n')
for i in range(len(c_lengths)):
f.write(' ; '+str(c_lengths[i])+' ; '+str(p_lengths[i])+' ; '+str(forces[i])+' ; '+str(slopes[i])+' ; '+str(sigma_c_lengths[i])+' ; '+str(sigma_p_lengths[i])+'\n')
-
+
f.close()
self.do_note('autopeak')
-
--- /dev/null
- from libhooke import WX_GOOD, ClickedPoint
+# -*- coding: utf-8 -*-
- def print_prec(self, arr, prec):
- try:
- nparr=np.array(arr)
- np.set_printoptions(precision=prec)
- #we remove the parentesis if the array is longer that 1
- if len(nparr)>1:
- strvals=str(nparr)[1:-1]
- return strvals
- except:
- return "Error in the array."
-
++from ..libhooke import WX_GOOD, ClickedPoint
++
+import wxversion
+wxversion.select(WX_GOOD)
+from wx import PostEvent
+import numpy as np
+import scipy as sp
+import copy
+import os.path
+import time
+
+
+class curvetoolsCommands:
+
+ def fit_interval_nm(self,start_index,plot,nm,backwards):
+ '''
+ Calculates the number of points to fit, given a fit interval in nm
+ start_index: index of point
+ plot: plot to use
+ backwards: if true, finds a point backwards.
+ '''
+ whatset=1 #FIXME: should be decidable
+ x_vect=plot.vectors[1][0]
+
+ c=0
+ i=start_index
+ start=x_vect[start_index]
+ maxlen=len(x_vect)
+ while abs(x_vect[i]-x_vect[start_index])*(10**9) < nm:
+ if i==0 or i==maxlen-1: #we reached boundaries of vector!
+ return c
+
+ if backwards:
+ i-=1
+ else:
+ i+=1
+ c+=1
+ return c
+
+
+
+ def find_current_peaks(self,noflatten, a=True, maxpeak=True):
+ #Find peaks.
+ if a==True:
+ a=self.convfilt_config['mindeviation']
+ try:
+ abs_devs=float(a)
+ except:
+ print "Bad input, using default."
+ abs_devs=self.convfilt_config['mindeviation']
+
+ defplot=self.current.curve.default_plots()[0]
+ if not noflatten:
+ flatten=self._find_plotmanip('flatten') #Extract flatten plotmanip
+ defplot=flatten(defplot, self.current, customvalue=1) #Flatten curve before feeding it to has_peaks
+ pk_location,peak_size=self.has_peaks(defplot, abs_devs, maxpeak)
+ return pk_location, peak_size
+
+
+ def pickup_contact_point(self,N=1,whatset=1):
+ '''macro to pick up the contact point by clicking'''
+ contact_point=self._measure_N_points(N=1, whatset=1)[0]
+ contact_point_index=contact_point.index
+ self.wlccontact_point=contact_point
+ self.wlccontact_index=contact_point.index
+ self.wlccurrent=self.current.path
+ return contact_point, contact_point_index
+
+
+
+ def baseline_points(self,peak_location, displayed_plot):
+ clicks=self.config['baseline_clicks']
+ if clicks==0:
+ self.basepoints=[]
+ base_index_0=peak_location[-1]+self.fit_interval_nm(peak_location[-1], displayed_plot, self.config['auto_right_baseline'],False)
+ self.basepoints.append(self._clickize(displayed_plot.vectors[1][0],displayed_plot.vectors[1][1],base_index_0))
+ base_index_1=self.basepoints[0].index+self.fit_interval_nm(self.basepoints[0].index, displayed_plot, self.config['auto_left_baseline'],False)
+ self.basepoints.append(self._clickize(displayed_plot.vectors[1][0],displayed_plot.vectors[1][1],base_index_1))
+ elif clicks>0:
+ print 'Select baseline'
+ if clicks==1:
+ self.basepoints=self._measure_N_points(N=1, whatset=1)
+ base_index_1=self.basepoints[0].index+self.fit_interval_nm(self.basepoints[0].index, displayed_plot, self.config['auto_left_baseline'], False)
+ self.basepoints.append(self._clickize(displayed_plot.vectors[1][0],displayed_plot.vectors[1][1],base_index_1))
+ else:
+ self.basepoints=self._measure_N_points(N=2, whatset=1)
+
+ self.basecurrent=self.current.path
+ return self.basepoints
+
+
+
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
'''
FIT
Non-standard Dependencies:
procplots.py (plot processing plugin)
'''
- from libhooke import WX_GOOD, ClickedPoint
-from hooke.libhooke import WX_GOOD, ClickedPoint
++from ..libhooke import WX_GOOD, ClickedPoint
+
import wxversion
wxversion.select(WX_GOOD)
#from wx import PostEvent
#from wx.lib.newevent import NewEvent
import scipy
import scipy.odr
+ import scipy.stats
import numpy as np
import copy
import Queue
events_from_fit=Queue.Queue() #GUI ---> CLI COMMUNICATION
- class fitCommands:
-
+ class fitCommands(object):
+
def _plug_init(self):
self.wlccurrent=None
self.wlccontact_point=None
self.wlccontact_index=None
-
+
+ def dist2fit(self):
+ '''Calculates the average distance from data to fit, scaled by the standard deviation
+ of the free cantilever area (thermal noise)
+ '''
+
+
+
def wlc_fit(self,clicked_points,xvector,yvector, pl_value, T=293, return_errors=False):
'''
Worm-like chain model fitting.
The function is the simple polynomial worm-like chain as proposed by C.Bustamante, J.F.Marko, E.D.Siggia
and S.Smith (Science. 1994 Sep 9;265(5178):1599-600.)
'''
-
+
'''
clicked_points[0] = contact point (calculated or hand-clicked)
clicked_points[1] and [2] are edges of chunk
'''
-
+
#STEP 1: Prepare the vectors to apply the fit.
-
-
if pl_value is not None:
pl_value=pl_value/(10**9)
-
+
#indexes of the selected chunk
first_index=min(clicked_points[1].index, clicked_points[2].index)
last_index=max(clicked_points[1].index, clicked_points[2].index)
-
+
#getting the chunk and reverting it
xchunk,ychunk=xvector[first_index:last_index],yvector[first_index:last_index]
xchunk.reverse()
- ychunk.reverse()
+ ychunk.reverse()
#put contact point at zero and flip around the contact point (the fit wants a positive growth for extension and force)
xchunk_corr_up=[-(x-clicked_points[0].graph_coords[0]) for x in xchunk]
ychunk_corr_up=[-(y-clicked_points[0].graph_coords[1]) for y in ychunk]
-
+
#make them arrays
xchunk_corr_up=scipy.array(xchunk_corr_up)
ychunk_corr_up=scipy.array(ychunk_corr_up)
-
-
+
+
#STEP 2: actually do the fit
-
+
#Find furthest point of chunk and add it a bit; the fit must converge
#from an excess!
xchunk_high=max(xchunk_corr_up)
xchunk_high+=(xchunk_high/10)
-
+
#Here are the linearized start parameters for the WLC.
#[lambd=1/Lo , pii=1/P]
-
+
p0=[(1/xchunk_high),(1/(3.5e-10))]
p0_plfix=[(1/xchunk_high)]
'''
ODR STUFF
fixme: remove these comments after testing
'''
-
-
+ def dist(px,py,linex,liney):
+ distancesx=scipy.array([(px-x)**2 for x in linex])
+ minindex=np.argmin(distancesx)
+ print px, linex[0], linex[-1]
+ return (py-liney[minindex])**2
+
def f_wlc(params,x,T=T):
'''
wlc function for ODR fitting
therm=Kb*T
y=(therm*pii/4.0) * (((1-(x*lambd))**-2) - 1 + (4*x*lambd))
return y
-
+
def f_wlc_plfix(params,x,pl_value=pl_value,T=T):
'''
wlc function for ODR fitting
therm=Kb*T
y=(therm*pii/4.0) * (((1-(x*lambd))**-2) - 1 + (4*x*lambd))
return y
-
+
#make the ODR fit
realdata=scipy.odr.RealData(xchunk_corr_up,ychunk_corr_up)
if pl_value:
else:
model=scipy.odr.Model(f_wlc)
o = scipy.odr.ODR(realdata, model, p0)
-
+
o.set_job(fit_type=2)
out=o.run()
fit_out=[(1/i) for i in out.beta]
-
+
#Calculate fit errors from output standard deviations.
#We must propagate the error because we fit the *inverse* parameters!
#The error = (error of the inverse)*(value**2)
for sd,value in zip(out.sd_beta, fit_out):
err_real=sd*(value**2)
fit_errors.append(err_real)
-
- def wlc_eval(x,params,pl_value,T):
+
+ def wlc_eval(x,params,pl_value,T):
'''
Evaluates the WLC function
'''
lambd, pii = params
else:
lambd = params
-
+
if pl_value:
pii=1/pl_value
-
+
Kb=(1.38065e-23) #boltzmann constant
therm=Kb*T #so we have thermal energy
-
+
return ( (therm*pii/4.0) * (((1-(x*lambd))**-2.0) - 1 + (4.0*x*lambd)) )
-
+
+
#STEP 3: plotting the fit
-
+
#obtain domain to plot the fit - from contact point to last_index plus 20 points
thule_index=last_index+10
if thule_index > len(xvector): #for rare cases in which we fit something at the END of whole curve.
xfit_chunk.reverse()
xfit_chunk_corr_up=[-(x-clicked_points[0].graph_coords[0]) for x in xfit_chunk]
xfit_chunk_corr_up=scipy.array(xfit_chunk_corr_up)
-
+
#the fitted curve: reflip, re-uncorrect
yfit=wlc_eval(xfit_chunk_corr_up, out.beta, pl_value,T)
yfit_down=[-y for y in yfit]
yfit_corr_down=[y+clicked_points[0].graph_coords[1] for y in yfit_down]
-
+
+
+ #calculate fit quality
+ qsum=0
+ yqeval=wlc_eval(xchunk_corr_up,out.beta,pl_value,T)
+ #we need to cut the extra from thuleindex
+ for qindex in np.arange(0,len(yqeval)):
+ qsum+=(yqeval[qindex]-ychunk_corr_up[qindex])**2
+ qstd=np.sqrt(qsum/len(ychunk_corr_up))
+
+
if return_errors:
- return fit_out, yfit_corr_down, xfit_chunk, fit_errors
+ return fit_out, yfit_corr_down, xfit_chunk, fit_errors, qstd
else:
- return fit_out, yfit_corr_down, xfit_chunk, None
-
+ return fit_out, yfit_corr_down, xfit_chunk, None, qstd
+
def fjc_fit(self,clicked_points,xvector,yvector, pl_value, T=293, return_errors=False):
'''
Freely-jointed chain function
clicked_points[0] is the contact point (calculated or hand-clicked)
clicked_points[1] and [2] are edges of chunk
'''
-
+
#STEP 1: Prepare the vectors to apply the fit.
if pl_value is not None:
pl_value=pl_value/(10**9)
-
+
#indexes of the selected chunk
first_index=min(clicked_points[1].index, clicked_points[2].index)
last_index=max(clicked_points[1].index, clicked_points[2].index)
-
+
#getting the chunk and reverting it
xchunk,ychunk=xvector[first_index:last_index],yvector[first_index:last_index]
xchunk.reverse()
- ychunk.reverse()
+ ychunk.reverse()
#put contact point at zero and flip around the contact point (the fit wants a positive growth for extension and force)
xchunk_corr_up=[-(x-clicked_points[0].graph_coords[0]) for x in xchunk]
ychunk_corr_up=[-(y-clicked_points[0].graph_coords[1]) for y in ychunk]
-
-
+
+
#make them arrays
xchunk_corr_up=scipy.array(xchunk_corr_up)
ychunk_corr_up=scipy.array(ychunk_corr_up)
-
-
+
+
#STEP 2: actually do the fit
-
+
#Find furthest point of chunk and add it a bit; the fit must converge
#from an excess!
xchunk_high=max(xchunk_corr_up)
xchunk_high+=(xchunk_high/10)
-
+
#Here are the linearized start parameters for the WLC.
#[lambd=1/Lo , pii=1/P]
-
+
p0=[(1/xchunk_high),(1/(3.5e-10))]
p0_plfix=[(1/xchunk_high)]
'''
ODR STUFF
fixme: remove these comments after testing
'''
+ def dist(px,py,linex,liney):
+ distancesx=scipy.array([(px-x)**2 for x in linex])
+ minindex=np.argmin(distancesx)
+ print minindex, px, linex[0], linex[-1]
+ return (py-liney[minindex])**2
+
def coth(z):
'''
hyperbolic cotangent
'''
return (np.exp(2*z)+1)/(np.exp(2*z)-1)
-
+
def x_fjc(params,f,T=T):
'''
fjc function for ODR fitting
lambd,pii=params
Kb=(1.38065e-23)
therm=Kb*T
-
+
#x=(therm*pii/4.0) * (((1-(x*lambd))**-2) - 1 + (4*x*lambd))
x=(1/lambd)*(coth(f*(1/pii)/therm) - (therm*pii)/f)
return x
-
+
def x_fjc_plfix(params,f,pl_value=pl_value,T=T):
'''
fjc function for ODR fitting
#y=(therm*pii/4.0) * (((1-(x*lambd))**-2) - 1 + (4*x*lambd))
x=(1/lambd)*(coth(f*(1/pii)/therm) - (therm*pii)/f)
return x
-
+
#make the ODR fit
realdata=scipy.odr.RealData(ychunk_corr_up,xchunk_corr_up)
if pl_value:
else:
model=scipy.odr.Model(x_fjc)
o = scipy.odr.ODR(realdata, model, p0)
-
+
o.set_job(fit_type=2)
out=o.run()
fit_out=[(1/i) for i in out.beta]
-
+
#Calculate fit errors from output standard deviations.
#We must propagate the error because we fit the *inverse* parameters!
#The error = (error of the inverse)*(value**2)
for sd,value in zip(out.sd_beta, fit_out):
err_real=sd*(value**2)
fit_errors.append(err_real)
-
- def fjc_eval(y,params,pl_value,T):
+
+ def fjc_eval(y,params,pl_value,T):
'''
Evaluates the WLC function
'''
lambd, pii = params
else:
lambd = params
-
+
if pl_value:
pii=1/pl_value
-
+
Kb=(1.38065e-23) #boltzmann constant
therm=Kb*T #so we have thermal energy
#return ( (therm*pii/4.0) * (((1-(x*lambd))**-2.0) - 1 + (4.0*x*lambd)) )
return (1/lambd)*(coth(y*(1/pii)/therm) - (therm*pii)/y)
-
-
+
+
++
#STEP 3: plotting the fit
#obtain domain to plot the fit - from contact point to last_index plus 20 points
thule_index=last_index+10
#or other buggy situations. Kludge to live with it now...
ychunk=yvector[:thule_index]
y_evalchunk=np.linspace(min(ychunk),max(ychunk),100)
-
+
yfit_down=[-y for y in y_evalchunk]
yfit_corr_down=[y+clicked_points[0].graph_coords[1] for y in yfit_down]
yfit_corr_down=scipy.array(yfit_corr_down)
-
+
#the fitted curve: reflip, re-uncorrect
xfit=fjc_eval(yfit_corr_down, out.beta, pl_value,T)
xfit=list(xfit)
xfit.reverse()
xfit_chunk_corr_up=[-(x-clicked_points[0].graph_coords[0]) for x in xfit]
-
+
#xfit_chunk_corr_up=scipy.array(xfit_chunk_corr_up)
#deltay=yfit_down[0]-yvector[clicked_points[0].index]
-
+
#This is a terrible, terrible kludge to find the point where it should normalize (and from where it should plot)
xxxdists=[]
for index in scipy.arange(1,len(xfit_chunk_corr_up),1):
- xxxdists.append((clicked_points[0].graph_coords[0]-xfit_chunk_corr_up[index])**2)
+ xxxdists.append((clicked_points[0].graph_coords[0]-xfit_chunk_corr_up[index])**2)
normalize_index=xxxdists.index(min(xxxdists))
#End of kludge
-
+
deltay=yfit_down[normalize_index]-clicked_points[0].graph_coords[1]
yfit_corr_down=[y-deltay for y in yfit_down]
-
+
+
+ #calculate fit quality
+ #creates dense y vector
+ yqeval=np.linspace(np.min(ychunk_corr_up)/2,np.max(ychunk_corr_up)*2,10*len(ychunk_corr_up))
+ #corresponding fitted x
+ xqeval=fjc_eval(yqeval,out.beta,pl_value,T)
+
+ qsum=0
+ for qindex in np.arange(0,len(ychunk_corr_up)):
+ qsum+=dist(xchunk_corr_up[qindex],ychunk_corr_up[qindex],xqeval,yqeval)
+ qstd=np.sqrt(qsum/len(ychunk_corr_up))
+
+
if return_errors:
- return fit_out, yfit_corr_down[normalize_index+1:], xfit_chunk_corr_up[normalize_index+1:], fit_errors
+ return fit_out, yfit_corr_down[normalize_index+1:], xfit_chunk_corr_up[normalize_index+1:], fit_errors, qstd
else:
- return fit_out, yfit_corr_down[normalize_index+1:], xfit_chunk_corr_up[normalize_index+1:], None
+ return fit_out, yfit_corr_down[normalize_index+1:], xfit_chunk_corr_up[normalize_index+1:], None, qstd
+
+ def efjc_fit(self,clicked_points,xvector,yvector, pl_value, T=293.0, return_errors=False):
+ '''
+ Extended Freely-jointed chain function
+ ref: F Oesterhelt, M Rief and H E Gaub, New Journal of Physics 1 (1999) 6.1–6.11
+ Please note that 2-parameter fitting (length and kl) usually does not converge, use fixed kl
+ '''
+ '''
+ clicked_points[0] is the contact point (calculated or hand-clicked)
+ clicked_points[1] and [2] are edges of chunk
+
+ '''
+ #Fixed parameters from reference
+ Kb=(1.38065e-2) #in pN.nm
+ Lp=0.358 #planar, nm
+ Lh=0.280 #helical, nm
+ Ks=150e3 #pN/nm
+
+
+ #STEP 1: Prepare the vectors to apply the fit.
+
+ #indexes of the selected chunk
+ first_index=min(clicked_points[1].index, clicked_points[2].index)
+ last_index=max(clicked_points[1].index, clicked_points[2].index)
+
+ #getting the chunk and reverting it
+ xchunk,ychunk=xvector[first_index:last_index],yvector[first_index:last_index]
+ xchunk.reverse()
+ ychunk.reverse()
+ #put contact point at zero and flip around the contact point (the fit wants a positive growth for extension and force)
+ xchunk_corr_up=[-(x-clicked_points[0].graph_coords[0]) for x in xchunk]
+ ychunk_corr_up=[-(y-clicked_points[0].graph_coords[1]) for y in ychunk]
+
+
+ #make them arrays
+ xchunk_corr_up=scipy.array(xchunk_corr_up)
+ ychunk_corr_up=scipy.array(ychunk_corr_up)
+
+ xchunk_corr_up_nm=xchunk_corr_up*1e9
+ ychunk_corr_up_pn=ychunk_corr_up*1e12
+
+
+ #STEP 2: actually do the fit
+
+ #Find furthest point of chunk and add it a bit; the fit must converge
+ #from an excess!
+ xchunk_high=max(xchunk_corr_up_nm)
+ xchunk_high+=(xchunk_high/10.0)
+
+ #Here are the linearized start parameters for the WLC.
+ #[Ns , pii=1/P]
+ #max number of monomers (all helical)for a contour length xchunk_high
+ excessNs=xchunk_high/(Lp)
+ p0=[excessNs,(1.0/(0.7))]
+ p0_plfix=[(excessNs)]
+
+ def dist(px,py,linex,liney):
+ distancesx=scipy.array([(px-x)**2 for x in linex])
+ minindex=np.argmin(distancesx)
+ return (py-liney[minindex])**2
+
+ def deltaG(f):
+ dG0=12.4242 #3kt in pN.nm
+ dL=0.078 #planar-helical
+ return dG0-f*dL
+
+ def Lfactor(f,T=T):
+ Lp=0.358 #planar, nm
+ Lh=0.280 #helical, nm
+ Kb=(1.38065e-2)
+ therm=Kb*T
+ dG=deltaG(f)
+
+ return Lp/(np.exp(dG/therm)+1)+Lh/(np.exp(-dG/therm)+1)
+
+ def coth(z):
+ '''
+ hyperbolic cotangent
+ '''
+ return 1.0/np.tanh(z)
+
+ def x_efjc(params,f,T=T,Ks=Ks):
+ '''
+ efjc function for ODR fitting
+ '''
+
+ Ns=params[0]
+ invkl=params[1]
+ Kb=(1.38065e-2)
+ therm=Kb*T
+ beta=(f/therm)/invkl
+
+ x=Ns*Lfactor(f)*(coth(beta)-1.0/beta)+Ns*f/Ks
+ return x
+
+ def x_efjc_plfix(params,f,kl_value=pl_value,T=T,Ks=Ks):
+ '''
+ efjc function for ODR fitting
+ '''
+
+ Ns=params
+ invkl=1.0/kl_value
+ Kb=(1.38065e-2)
+ therm=Kb*T
+ beta=(f/therm)/invkl
+
+ x=Ns*Lfactor(f)*(coth(beta)-1.0/beta)+Ns*f/Ks
+ return x
+
+ #make the ODR fit
+ realdata=scipy.odr.RealData(ychunk_corr_up_pn,xchunk_corr_up_nm)
+ if pl_value:
+ model=scipy.odr.Model(x_efjc_plfix)
+ o = scipy.odr.ODR(realdata, model, p0_plfix)
+ else:
+ print 'WARNING eFJC fit with free pl sometimes does not converge'
+ model=scipy.odr.Model(x_efjc)
+ o = scipy.odr.ODR(realdata, model, p0)
+
+ o.set_job(fit_type=2)
+ out=o.run()
+
+
+ Ns=out.beta[0]
+ Lc=Ns*Lp*1e-9
+ if len(out.beta)>1:
+ kfit=1e-9/out.beta[1]
+ kfitnm=1/out.beta[1]
+ else:
+ kfit=1e-9*pl_value
+ kfitnm=pl_value
+
+ fit_out=[Lc, kfit]
+
+ #Calculate fit errors from output standard deviations.
+ fit_errors=[]
+ fit_errors.append(out.sd_beta[0]*Lp*1e-9)
+ if len(out.beta)>1:
+ fit_errors.append(1e9*out.sd_beta[1]*kfit**2)
+
+
+
+ def efjc_eval(y,params,pl_value,T=T,Lfactor=Lfactor,Ks=Ks):
+ '''
+ Evaluates the eFJC function
+ '''
+ if not pl_value:
+ Ns, invkl = params
+ else:
+ Ns = params
+
+ if pl_value:
+ invkl=1.0/pl_value
+
+ Kb=(1.38065e-2) #boltzmann constant
+ therm=Kb*T #so we have thermal energy
+ beta=(y/therm)/invkl
+ x= Ns*Lfactor(y)*(coth(beta)-1.0/beta)+Ns*y/Ks
+
+ return x
+
+ #STEP 3: plotting the fit
+ #obtain domain to plot the fit - from contact point to last_index plus 20 points
+ thule_index=last_index+10
+ if thule_index > len(xvector): #for rare cases in which we fit something at the END of whole curve.
+ thule_index = len(xvector)
+ #reverse etc. the domain
+ ychunk=yvector[clicked_points[0].index:thule_index]
+ if len(ychunk)>0:
+ y_evalchunk=np.linspace(min(ychunk),max(ychunk),100)
+ else:
+ #Empty y-chunk. It happens whenever we set the contact point after a recognized peak,
+ #or other buggy situations. Kludge to live with it now...
+ ychunk=yvector[:thule_index]
+ y_evalchunk=np.linspace(min(ychunk),max(ychunk),100)
+
+ yfit_down=[-y for y in y_evalchunk]
+ yfit_corr_down=[y+clicked_points[0].graph_coords[1] for y in yfit_down]
+ yfit_corr_down=scipy.array(yfit_corr_down)
+
+ #the fitted curve: reflip, re-uncorrect
+ xfit=efjc_eval(1e12*yfit_corr_down, out.beta, pl_value,T)*1e-9
+ xfit=list(xfit)
+ xfit.reverse()
+ xfit_chunk_corr_up=[-(x-clicked_points[0].graph_coords[0]) for x in xfit]
+
+ #xfit_chunk_corr_up=scipy.array(xfit_chunk_corr_up)
+ #deltay=yfit_down[0]-yvector[clicked_points[0].index]
+
+ #This is a terrible, terrible kludge to find the point where it should normalize (and from where it should plot)
+ xxxdists=[]
+ for index in scipy.arange(1,len(xfit_chunk_corr_up),1):
+ xxxdists.append((clicked_points[0].graph_coords[0]-xfit_chunk_corr_up[index])**2)
+ normalize_index=xxxdists.index(min(xxxdists))
+ #End of kludge
+
+ deltay=yfit_down[normalize_index]-clicked_points[0].graph_coords[1]
+ yfit_corr_down=[y-deltay for y in yfit_down]
+
+ #calculate fit quality
+ #creates dense y vector
+ yqeval=np.linspace(np.min(ychunk_corr_up_pn)/2,np.max(ychunk_corr_up_pn)*2,10*len(ychunk_corr_up_pn))
+ #corresponding fitted x
+ xqeval=efjc_eval(yqeval,out.beta,pl_value)
+
+ qsum=0
+ for qindex in np.arange(0,len(ychunk_corr_up_pn)):
+ qsum+=dist(xchunk_corr_up_nm[qindex],ychunk_corr_up_pn[qindex],xqeval,yqeval)
+ qstd=1e-12*np.sqrt(qsum/len(ychunk_corr_up_pn))
+
+ if return_errors:
+ return fit_out, yfit_corr_down[normalize_index+1:], xfit_chunk_corr_up[normalize_index+1:], fit_errors, qstd
+ else:
+ return fit_out, yfit_corr_down[normalize_index+1:], xfit_chunk_corr_up[normalize_index+1:], None, qstd
+
+
+
+
def do_wlc(self,args):
'''
WLC
(fit.py plugin)
-
+
See the fit command
'''
self.do_fit(args)
-
+
def do_fjc(self,args):
'''
FJC
(fit.py plugin)
-
+
See the fit command
'''
self.do_fit(args)
-
+
def do_fit(self,args):
'''
FIT
First you have to click a contact point.
Then you have to click the two edges of the data you want to fit.
-
+
+ Fit quality compares the distance to the fit with the thermal noise (a good fit should be close to 1)
+
The fit function depends on the fit_function variable. You can set it with the command
"set fit_function wlc" or "set fit_function fjc" depending on the function you prefer.
-
- For WLC, the function is the simple polynomial worm-like chain as proposed by
- C.Bustamante, J.F.Marko, E.D.Siggia and S.Smith (Science. 1994
+
+ For WLC, the function is the simple polynomial worm-like chain as proposed by
+ C.Bustamante, J.F.Marko, E.D.Siggia and S.Smith (Science. 1994
Sep 9;265(5178):1599-600.)
-
- For FJC, ref:
+
+ For FJC, ref:
C.Ray and B.B. Akhremitchev; http://www.chem.duke.edu/~boris/research/force_spectroscopy/fit_efjc.pdf
+
+ For eFJC, ref:
+ F Oesterhelt, M Rief and H E Gaub, New Journal of Physics 1 (1999) 6.1–6.11 (section 4.2)
+ NOTE: use fixed pl for better results.
Arguments:
- pl=[value] : Use a fixed persistent length (WLC) or Kuhn length (FJC) for the fit. If pl is not given,
- the fit will be a 2-variable
+ pl=[value] : Use a fixed persistent length (WLC) or Kuhn length (FJC) for the fit. If pl is not given,
+ the fit will be a 2-variable
fit. DO NOT put spaces between 'pl', '=' and the value.
- The value must be in nanometers.
-
+ The value must be in nanometers.
+
t=[value] : Use a user-defined temperature. The value must be in
kelvins; by default it is 293 K.
DO NOT put spaces between 't', '=' and the value.
-
- noauto : allows for clicking the contact point by
+
+ noauto : allows for clicking the contact point by
hand (otherwise it is automatically estimated) the first time.
If subsequent measurements are made, the same contact point
clicked is used
-
+
reclick : redefines by hand the contact point, if noauto has been used before
but the user is unsatisfied of the previously choosen contact point.
---------
if ('t=' in arg[0:2]) or ('T=' in arg[0:2]):
t_expression=arg.split('=')
T=float(t_expression[1])
-
+
#use the currently displayed plot for the fit
displayed_plot=self._get_displayed_plot()
-
+
#handle contact point arguments correctly
if 'reclick' in args.split():
print 'Click contact point'
contact_point.absolute_coords=displayed_plot.vectors[1][0][cindex], displayed_plot.vectors[1][1][cindex]
contact_point.find_graph_coords(displayed_plot.vectors[1][0], displayed_plot.vectors[1][1])
contact_point.is_marker=True
-
+
print 'Click edges of chunk'
points=self._measure_N_points(N=2, whatset=1)
points=[contact_point]+points
+
try:
if self.config['fit_function']=='wlc':
- params, yfit, xfit, fit_errors = self.wlc_fit(points, displayed_plot.vectors[1][0], displayed_plot.vectors[1][1],pl_value,T, return_errors=True )
+ params, yfit, xfit, fit_errors,qstd = self.wlc_fit(points, displayed_plot.vectors[1][0], displayed_plot.vectors[1][1],pl_value,T, return_errors=True )
name_of_charlength='Persistent length'
elif self.config['fit_function']=='fjc':
- params, yfit, xfit, fit_errors = self.fjc_fit(points, displayed_plot.vectors[1][0], displayed_plot.vectors[1][1],pl_value,T, return_errors=True )
+ params, yfit, xfit, fit_errors,qstd = self.fjc_fit(points, displayed_plot.vectors[1][0], displayed_plot.vectors[1][1],pl_value,T, return_errors=True )
name_of_charlength='Kuhn length'
+ elif self.config['fit_function']=='efjc':
+ params, yfit, xfit, fit_errors,qstd = self.efjc_fit(points, displayed_plot.vectors[1][0], displayed_plot.vectors[1][1],pl_value,T, return_errors=True )
+ name_of_charlength='Kuhn length (e)'
else:
print 'No recognized fit function defined!'
- print 'Set your fit function to wlc or fjc.'
+ print 'Set your fit function to wlc, fjc or efjc.'
return
-
+
except:
print 'Fit not possible. Probably wrong interval -did you click two *different* points?'
return
-
+
#FIXME: print "Kuhn length" for FJC
print 'Fit function:',self.config['fit_function']
- print 'Contour length: %.2f nm' %(params[0]*(1.0e+9))
- to_dump='contour '+self.current.path+' %.2f nm'%(params[0]*(1.0e+9))
+ print 'Contour length: ',params[0]*(1.0e+9),' nm'
+ to_dump='contour '+self.current.path+' '+str(params[0]*(1.0e+9))+' nm'
self.outlet.push(to_dump)
if len(params)==2: #if we did choose 2-value fit
- print name_of_charlength+': %.2f nm' %(params[1]*(1.0e+9))
- to_dump='persistent '+self.current.path+' %.2f nm' %(params[1]*(1.0e+9))
+ print name_of_charlength+': ',params[1]*(1.0e+9),' nm'
+ to_dump='persistent '+self.current.path+' '+str(params[1]*(1.0e+9))+' nm'
self.outlet.push(to_dump)
-
+
if fit_errors:
fit_nm=[i*(10**9) for i in fit_errors]
- print 'Standard deviation (contour length) %.2f' %fit_nm[0]
+ print 'Standard deviation (contour length)', fit_nm[0]
if len(fit_nm)>1:
- print 'Standard deviation ('+name_of_charlength+') %.2f' %fit_nm[1]
+ print 'Standard deviation ('+name_of_charlength+')', fit_nm[1]
-
-
+
- print 'Fit quality: %.3f ' %(qstd/np.std(displayed_plot.vectors[1][1][-20:-1]))
++ print 'Fit quality: '+str(qstd/np.std(displayed_plot.vectors[1][1][-20:-1]))
+
+
#add the clicked points in the final PlotObject
clickvector_x, clickvector_y=[], []
for item in points:
clickvector_x.append(item.graph_coords[0])
clickvector_y.append(item.graph_coords[1])
-
+
#create a custom PlotObject to gracefully plot the fit along the curves
-
+
fitplot=copy.deepcopy(displayed_plot)
fitplot.add_set(xfit,yfit)
fitplot.add_set(clickvector_x,clickvector_y)
-
+
#FIXME: this colour/styles stuff must be solved at the root!
if fitplot.styles==[]:
fitplot.styles=[None,None,None,'scatter']
else:
fitplot.styles+=[None,'scatter']
-
+
if fitplot.colors==[]:
fitplot.colors=[None,None,None,None]
else:
fitplot.colors+=[None,None]
-
+
self._send_plot([fitplot])
-
-
+
+
#----------
-
-
+
+
def find_contact_point(self,plot=False):
'''
Finds the contact point on the curve.
-
+
The current algorithm (thanks to Francesco Musiani, francesco.musiani@unibo.it and Massimo Sandal) is:
- take care of the PicoForce trigger bug - exclude retraction portions with too high standard deviation
- fit the second half of the retraction curve to a line
- if the fit is not almost horizontal, take a smaller chunk and repeat
- otherwise, we have something horizontal
- so take the average of horizontal points and use it as a baseline
-
+
Then, start from the rise of the retraction curve and look at the first point below the
baseline.
-
+
FIXME: should be moved, probably to generalvclamp.py
'''
-
+
if not plot:
plot=self.plots[0]
-
+
outplot=self.subtract_curves(1)
xret=outplot.vectors[1][0]
ydiff=outplot.vectors[1][1]
yext=plot.vectors[0][1]
xret2=plot.vectors[1][0]
yret=plot.vectors[1][1]
-
+
#taking care of the picoforce trigger bug: we exclude portions of the curve that have too much
#standard deviation. yes, a lot of magic is here.
monster=True
else: #move away from the monster
monlength-=int(len(xret)/50)
finalength-=int(len(xret)/50)
-
-
+
+
#take half of the thing
endlength=int(len(xret)/2)
-
+
ok=False
-
+
while not ok:
xchunk=yext[endlength:monlength]
ychunk=yext[endlength:monlength]
if (abs(regr[1]) > 0.1) and ( endlength < len(xret)-int(len(xret)/6) ) :
endlength+=10
else:
- ok=True
-
-
+ ok=True
+
+
ymean=np.mean(ychunk) #baseline
-
+
index=0
point = ymean+1
-
+
#find the first point below the calculated baseline
while point > ymean:
try:
point=yret[index]
- index+=1
+ index+=1
except IndexError:
#The algorithm didn't find anything below the baseline! It should NEVER happen
- index=0
+ index=0
return index
-
+
return index
-
-
-
+
+
+
def find_contact_point2(self, debug=False):
'''
TO BE DEVELOPED IN THE FUTURE
Finds the contact point on the curve.
-
+
FIXME: should be moved, probably to generalvclamp.py
'''
-
+
#raw_plot=self.current.curve.default_plots()[0]
raw_plot=self.plots[0]
'''xext=self.plots[0].vectors[0][0]
yext=raw_plot.vectors[0][1]
xret2=raw_plot.vectors[1][0]
yret=raw_plot.vectors[1][1]
-
+
first_point=[xext[0], yext[0]]
last_point=[xext[-1], yext[-1]]
-
+
#regr=scipy.polyfit(first_point, last_point,1)[0:2]
diffx=abs(first_point[0]-last_point[0])
diffy=abs(first_point[1]-last_point[1])
-
+
#using polyfit results in numerical errors. good old algebra.
a=diffy/diffx
b=first_point[1]-(a*first_point[0])
baseline=scipy.polyval((a,b), xext)
-
+
ysub=[item-basitem for item,basitem in zip(yext,baseline)]
-
+
contact=ysub.index(min(ysub))
-
+
return xext,ysub,contact
-
+
#now, exploit a ClickedPoint instance to calculate index...
dummy=ClickedPoint()
dummy.absolute_coords=(x_intercept,y_intercept)
dummy.find_graph_coords(xret2,yret)
-
+
if debug:
return dummy.index, regr, regr_contact
else:
return dummy.index
-
-
+
+
def x_do_contact(self,args):
'''
DEBUG COMMAND to be activated in the future
'''
xext,ysub,contact=self.find_contact_point2(debug=True)
-
+
contact_plot=self.plots[0]
contact_plot.add_set(xext,ysub)
contact_plot.add_set([xext[contact]],[self.plots[0].vectors[0][1][contact]])
contact_plot.styles=[None,None,None,'scatter']
self._send_plot([contact_plot])
return
-
-
+
+
index,regr,regr_contact=self.find_contact_point2(debug=True)
print regr
print regr_contact
#nc_line=[(item*regr[0])+regr[1] for item in x_nc]
nc_line=scipy.polyval(regr,xret)
c_line=scipy.polyval(regr_contact,xret)
-
-
+
+
contact_plot=self.current.curve.default_plots()[0]
contact_plot.add_set(xret, nc_line)
contact_plot.add_set(xret, c_line)
#contact_plot.styles.append(None)
contact_plot.destination=1
self._send_plot([contact_plot])
-
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
'''
FLATFILTS
Other plugin dependencies:
procplots.py (plot processing plugin)
'''
- from libhooke import WX_GOOD
+
+ from hooke.libhooke import WX_GOOD
+
import wxversion
wxversion.select(WX_GOOD)
-
import xml.dom.minidom
-
import wx
import scipy
import numpy
from numpy import diff
-
#import pickle
- import libpeakspot as lps
- import libhookecurve as lhc
+ from .. import libpeakspot as lps
+ from .. import libhookecurve as lhc
- class flatfiltsCommands:
-
+ class flatfiltsCommands(object):
+
def _plug_init(self):
#configurate convfilt variables
convfilt_configurator=ConvfiltConfig()
-
- #different OSes have different path conventions
- if self.config['hookedir'][0]=='/':
- slash='/' #a Unix or Unix-like system
- else:
- slash='\\' #it's a drive letter, we assume it's Windows
-
- self.convfilt_config=convfilt_configurator.load_config(self.config['hookedir']+slash+'convfilt.conf')
-
+ self.convfilt_config=convfilt_configurator.load_config('convfilt.conf')
+
def do_flatfilt(self,args):
'''
FLATFILT
median_filter=7
min_npks=4
min_deviation=9
-
+
args=args.split(' ')
if len(args) == 2:
min_npks=int(args[0])
min_deviation=int(args[1])
else:
pass
-
+
print 'Processing playlist...'
notflat_list=[]
-
+
c=0
for item in self.current_list:
c+=1
-
+
try:
notflat=self.has_features(item, median_filter, min_npks, min_deviation)
print 'Curve',item.path, 'is',c,'of',len(self.current_list),': features are ',notflat
except:
notflat=False
print 'Curve',item.path, 'is',c,'of',len(self.current_list),': cannot be filtered. Probably unable to retrieve force data from corrupt file.'
-
+
if notflat:
item.features=notflat
item.curve=None #empty the item object, to further avoid memory leak
notflat_list.append(item)
-
+
if len(notflat_list)==0:
print 'Found nothing interesting. Check your playlist, could be a bug or criteria could be too much stringent'
return
self.current_list=notflat_list
self.current=self.current_list[self.pointer]
self.do_plot(0)
-
+
def has_features(self,item,median_filter,min_npks,min_deviation):
'''
decides if a curve is flat enough to be rejected from analysis: it sees if there
are at least min_npks points that are higher than min_deviation times the absolute value
of noise.
-
+
Algorithm original idea by Francesco Musiani, with my tweaks and corrections.
'''
retvalue=False
-
- item.identify(self.drivers)
+
+ item.identify(self.drivers)
#we assume the first is the plot with the force curve
#do the median to better resolve features from noise
flat_plot=self.plotmanip_median(item.curve.default_plots()[0], item, customvalue=median_filter)
- flat_vects=flat_plot.vectors
+ flat_vects=flat_plot.vectors
item.curve.close_all()
#needed to avoid *big* memory leaks!
del item.curve
del item
-
- #absolute value of derivate
+
+ #absolute value of derivate
yretdiff=diff(flat_vects[1][1])
yretdiff=[abs(value) for value in yretdiff]
#average of derivate values
c_pks+=1
else:
break
-
+
if c_pks>=min_npks:
retvalue = c_pks
-
+
del flat_plot, flat_vects, yretdiff
-
+
return retvalue
################################################################
#-----CONVFILT-------------------------------------------------
#-----Convolution-based peak recognition and filtering.
#Requires the libpeakspot.py library
-
- def has_peaks(self, plot, abs_devs=None):
+
+ def has_peaks(self, plot, abs_devs=None, maxpeak=True, window=10):
'''
Finds peak position in a force curve.
FIXME: should be moved in libpeakspot.py
'''
if abs_devs==None:
abs_devs=self.convfilt_config['mindeviation']
-
-
+
+
xret=plot.vectors[1][0]
yret=plot.vectors[1][1]
#Calculate convolution.
convoluted=lps.conv_dx(yret, self.convfilt_config['convolution'])
-
+
#surely cut everything before the contact point
cut_index=self.find_contact_point(plot)
#cut even more, before the blind window
blind_index+=1
cut_index+=blind_index
#do the dirty convolution-peak finding stuff
- noise_level=lps.noise_absdev(convoluted[cut_index:], self.convfilt_config['positive'], self.convfilt_config['maxcut'], self.convfilt_config['stable'])
- above=lps.abovenoise(convoluted,noise_level,cut_index,abs_devs)
+ noise_level=lps.noise_absdev(convoluted[cut_index:], self.convfilt_config['positive'], self.convfilt_config['maxcut'], self.convfilt_config['stable'])
+ above=lps.abovenoise(convoluted,noise_level,cut_index,abs_devs)
peak_location,peak_size=lps.find_peaks(above,seedouble=self.convfilt_config['seedouble'])
-
- #take the maximum
+
+ #take the minimum or the maximum of a peak
for i in range(len(peak_location)):
peak=peak_location[i]
- maxpk=min(yret[peak-10:peak+10])
- index_maxpk=yret[peak-10:peak+10].index(maxpk)+(peak-10)
- peak_location[i]=index_maxpk
-
+ valpk=min(yret[peak-window:peak+window]) #maximum in force (near the unfolding point)
+ index_pk=yret[peak-window:peak+window].index(valpk)+(peak-window)
+
+ if maxpeak==False:
+ valpk=max(yret[peak:peak+window]) #minimum in force, near the baseline
+ index_pk=yret[peak:peak+window].index(valpk)+(peak)
+
+# Let's explain that for the minimum. Immaging that we know that there is a peak at position/region 100 and you have found its y-value,
+# Now you look in the array, from 100-10 to 100+10 (if the window is 10).
+# This "100-10 to 100+10" is substancially a new array with its index. In this array you have 20
+# elements, so the index of your y-value will be 10.
+# Now to find the index in the TOTAL array you have to add the "position" of the "region" (that in this case
+# correspond to 100) and also substract the window size ---> (+100-10)
+
+ peak_location[i]=index_pk
+
return peak_location,peak_size
-
-
+
+
def exec_has_peaks(self,item,abs_devs):
'''
encapsulates has_peaks for the purpose of correctly treating the curve objects in the convfilt loop,
to avoid memory leaks
'''
- item.identify(self.drivers)
+ item.identify(self.drivers)
#we assume the first is the plot with the force curve
plot=item.curve.default_plots()[0]
-
+
if 'flatten' in self.config['plotmanips']:
#If flatten is present, use it for better recognition of peaks...
flatten=self._find_plotmanip('flatten') #extract flatten plot manipulator
plot=flatten(plot, item, customvalue=1)
-
+
peak_location,peak_size=self.has_peaks(plot,abs_devs)
#close all open files
item.curve.close_all()
del item.curve
del item
return peak_location, peak_size
-
+
#------------------------
#------commands----------
- #------------------------
+ #------------------------
def do_peaks(self,args):
'''
PEAKS
'''
if len(args)==0:
args=self.convfilt_config['mindeviation']
-
+
try:
abs_devs=float(args)
except:
print 'Wrong argument, using config value'
abs_devs=float(self.convfilt_config['mindeviation'])
-
+
defplots=self.current.curve.default_plots()[0] #we need the raw, uncorrected plots
-
+
if 'flatten' in self.config['plotmanips']:
flatten=self._find_plotmanip('flatten') #extract flatten plot manipulator
defplots=flatten(defplots, self.current)
else:
print 'You have the flatten plot manipulator not loaded. Enabling it could give you better results.'
-
+
peak_location,peak_size=self.has_peaks(defplots,abs_devs)
print 'Found '+str(len(peak_location))+' peaks.'
to_dump='peaks '+self.current.path+' '+str(len(peak_location))
self.outlet.push(to_dump)
#print peak_location
-
+
#if no peaks, we have nothing to plot. exit.
if len(peak_location)==0:
return
-
+
#otherwise, we plot the peak locations.
xplotted_ret=self.plots[0].vectors[1][0]
yplotted_ret=self.plots[0].vectors[1][1]
xgood=[xplotted_ret[index] for index in peak_location]
ygood=[yplotted_ret[index] for index in peak_location]
-
+
recplot=self._get_displayed_plot()
recplot.vectors.append([xgood,ygood])
if recplot.styles==[]:
else:
recplot.styles+=['scatter']
recplot.colors+=[None]
-
+
self._send_plot([recplot])
-
+
def do_convfilt(self,args):
'''
CONVFILT
If called without arguments, it uses default values.
'''
-
+
min_npks=self.convfilt_config['minpeaks']
min_deviation=self.convfilt_config['mindeviation']
-
+
args=args.split(' ')
if len(args) == 2:
min_npks=int(args[0])
min_deviation=int(args[1])
else:
pass
-
+
print 'Processing playlist...'
print '(Please wait)'
notflat_list=[]
-
+
c=0
for item in self.current_list:
c+=1
-
- try:
+
+ try:
peak_location,peak_size=self.exec_has_peaks(item,min_deviation)
if len(peak_location)>=min_npks:
isok='+'
except:
peak_location,peak_size=[],[]
print 'Curve',item.path, 'is',c,'of',len(self.current_list),': cannot be filtered. Probably unable to retrieve force data from corrupt file.'
-
+
if len(peak_location)>=min_npks:
item.peak_location=peak_location
item.peak_size=peak_size
item.curve=None #empty the item object, to further avoid memory leak
notflat_list.append(item)
+ for i in range(1000):
+ k=0
+
#Warn that no flattening had been done.
if not ('flatten' in self.config['plotmanips']):
print 'Flatten manipulator was not found. Processing was done without flattening.'
print 'Try to enable it in your configuration file for better results.'
-
+
if len(notflat_list)==0:
print 'Found nothing interesting. Check your playlist, could be a bug or criteria could be too much stringent'
return
self.current_list=notflat_list
self.current=self.current_list[self.pointer]
self.do_plot(0)
-
-
+
+
def do_setconv(self,args):
'''
SETCONV
print 'This is not an internal convfilt variable!'
print 'Run "setconv" without arguments to see a list of defined variables.'
return
-
+
if len(args)==1:
print self.convfilt_config[args[0]]
elif len(args)>1:
#########################
#HANDLING OF CONFIGURATION FILE
- class ConvfiltConfig:
+ class ConvfiltConfig(object):
'''
Handling of convfilt configuration file
-
+
Mostly based on the simple-yet-useful examples of the Python Library Reference
about xml.dom.minidom
-
+
FIXME: starting to look a mess, should require refactoring
'''
-
+
def __init__(self):
self.config={}
-
-
+
+
def load_config(self, filename):
- myconfig=file(filename)
+ myconfig=file(filename)
#the following 3 lines are needed to strip newlines. otherwise, since newlines
#are XML elements too, the parser would read them (and re-save them, multiplying
#newlines...)
the_file=myconfig.read()
the_file_lines=the_file.split('\n')
the_file=''.join(the_file_lines)
-
- self.config_tree=xml.dom.minidom.parseString(the_file)
-
+
+ self.config_tree=xml.dom.minidom.parseString(the_file)
+
def getText(nodelist):
#take the text from a nodelist
#from Python Library Reference 13.7.2
if node.nodeType == node.TEXT_NODE:
rc += node.data
return rc
-
+
def handleConfig(config):
noiseabsdev_elements=config.getElementsByTagName("noise_absdev")
convfilt_elements=config.getElementsByTagName("convfilt")
handleAbsdev(noiseabsdev_elements)
handleConvfilt(convfilt_elements)
-
+
def handleAbsdev(noiseabsdev_elements):
for element in noiseabsdev_elements:
for attribute in element.attributes.keys():
self.config[attribute]=element.getAttribute(attribute)
-
+
def handleConvfilt(convfilt_elements):
for element in convfilt_elements:
for attribute in element.attributes.keys():
self.config[attribute]=element.getAttribute(attribute)
-
+
handleConfig(self.config_tree)
#making items in the dictionary machine-readable
for item in self.config.keys():
self.config[item]=eval(self.config[item])
except NameError: #if it's an unreadable string, keep it as a string
pass
-
- return self.config
+
+ return self.config
- # -*- coding: utf-8 -*-
+#!/usr/bin/env python
++# -*- coding: iso-8859-1 -*-
+
'''
generalvclamp.py
Plugin regarding general velocity clamp measurements
'''
- from libhooke import WX_GOOD, ClickedPoint
+ from hooke.libhooke import WX_GOOD, ClickedPoint
import wxversion
wxversion.select(WX_GOOD)
from wx import PostEvent
warnings.simplefilter('ignore',np.RankWarning)
- class generalvclampCommands:
-
+ class generalvclampCommands(object):
+
def _plug_init(self):
self.basecurrent=None
self.basepoints=None
self.autofile=''
-
+
def do_distance(self,args):
'''
DISTANCE
return
else:
dx,unitx,dy,unity=self._delta(set=1)
- print "%.1f nm" %(dx*(10**9))
+ print str(dx*(10**9))+' nm'
to_dump='distance '+self.current.path+' '+str(dx*(10**9))+' nm'
self.outlet.push(to_dump)
Measure the force difference (in pN) between two points
---------------
Syntax: force
- '''
+ '''
if self.current.curve.experiment == 'clamp':
print 'This command makes no sense for a force clamp experiment.'
return
dx,unitx,dy,unity=self._delta(set=1)
- print " %.1f pN"%(dy*(10**12))
+ print str(dy*(10**12))+' pN'
to_dump='force '+self.current.path+' '+str(dy*(10**12))+' pN'
self.outlet.push(to_dump)
-
-
+
+
def do_forcebase(self,args):
'''
FORCEBASE
(generalvclamp.py)
Measures the difference in force (in pN) between a point and a baseline
took as the average between two points.
-
+
The baseline is fixed once for a given curve and different force measurements,
unless the user wants it to be recalculated
------------
'''
rebase=False #if true=we select rebase
maxpoint=False #if true=we measure the maximum peak
-
+
plot=self._get_displayed_plot()
whatset=1 #fixme: for all sets
if 'rebase' in args or (self.basecurrent != self.current.path):
rebase=True
if 'max' in args:
maxpoint=True
-
+
if rebase:
print 'Select baseline'
self.basepoints=self._measure_N_points(N=2, whatset=whatset)
self.basecurrent=self.current.path
-
+
if maxpoint:
print 'Select two points'
points=self._measure_N_points(N=2, whatset=whatset)
points=self._measure_N_points(N=1, whatset=whatset)
#whatplot=points[0].dest
y=points[0].graph_coords[1]
-
+
#fixme: code duplication
boundaries=[self.basepoints[0].index, self.basepoints[1].index]
boundaries.sort()
to_average=plot.vectors[whatset][1][boundaries[0]:boundaries[1]] #y points to average
-
+
avg=np.mean(to_average)
forcebase=abs(y-avg)
- print "%.1f pN"%(forcebase*(10**12))
+ print str(forcebase*(10**12))+' pN'
to_dump='forcebase '+self.current.path+' '+str(forcebase*(10**12))+' pN'
self.outlet.push(to_dump)
-\r
+
def plotmanip_multiplier(self, plot, current):
'''
- Multiplies all the Y values of an SMFS curve by a value stored in the 'force_multiplier'\r
+ Multiplies all the Y values of an SMFS curve by a value stored in the 'force_multiplier'
configuration variable. Useful for calibrations and other stuff.
'''
-
+
#not a smfs curve...
if current.curve.experiment != 'smfs':
return plot
-
+
#only one set is present...
if len(self.plots[0].vectors) != 2:
return plot
-
+
#multiplier is 1...
if (self.config['force_multiplier']==1):
- return plot\r
-\r
+ return plot
+
for i in range(len(plot.vectors[0][1])):
- plot.vectors[0][1][i]=plot.vectors[0][1][i]*self.config['force_multiplier'] \r
-\r
- for i in range(len(plot.vectors[1][1])):
- plot.vectors[1][1][i]=plot.vectors[1][1][i]*self.config['force_multiplier']\r
-\r
- return plot \r
+ plot.vectors[0][1][i]=plot.vectors[0][1][i]*self.config['force_multiplier']
+ for i in range(len(plot.vectors[1][1])):
+ plot.vectors[1][1][i]=plot.vectors[1][1][i]*self.config['force_multiplier']
+ return plot
+
+
def plotmanip_flatten(self, plot, current, customvalue=False):
'''
Subtracts a polynomial fit to the non-contact part of the curve, as to flatten it.
- the best polynomial fit is chosen among polynomials of degree 1 to n, where n is
+ the best polynomial fit is chosen among polynomials of degree 1 to n, where n is
given by the configuration file or by the customvalue.
-
+
customvalue= int (>0) --> starts the function even if config says no (default=False)
'''
-
+
#not a smfs curve...
if current.curve.experiment != 'smfs':
return plot
-
+
#only one set is present...
if len(self.plots[0].vectors) != 2:
return plot
-
+
#config is not flatten, and customvalue flag is false too
if (not self.config['flatten']) and (not customvalue):
return plot
-
+
max_exponent=12
delta_contact=0
-
+
if customvalue:
max_cycles=customvalue
else:
max_cycles=self.config['flatten'] #Using > 1 usually doesn't help and can give artefacts. However, it could be useful too.
-
+
contact_index=self.find_contact_point()
-
+
valn=[[] for item in range(max_exponent)]
yrn=[0.0 for item in range(max_exponent)]
errn=[0.0 for item in range(max_exponent)]
-
+
+ #Check if we have a proper numerical value
+ try:
+ zzz=int(max_cycles)
+ except:
+ #Loudly and annoyingly complain if it's not a number, then fallback to zero
+ print '''Warning: flatten value is not a number!
+ Use "set flatten" or edit hooke.conf to set it properly
+ Using zero.'''
+ max_cycles=0
+
for i in range(int(max_cycles)):
-
+
x_ext=plot.vectors[0][0][contact_index+delta_contact:]
y_ext=plot.vectors[0][1][contact_index+delta_contact:]
x_ret=plot.vectors[1][0][contact_index+delta_contact:]
return plot
best_exponent=errn.index(min(errn))
-
+
#extension
ycorr_ext=y_ext-yrn[best_exponent]+y_ext[0] #noncontact part
- yjoin_ext=np.array(plot.vectors[0][1][0:contact_index+delta_contact]) #contact part
+ yjoin_ext=np.array(plot.vectors[0][1][0:contact_index+delta_contact]) #contact part
#retraction
ycorr_ret=y_ret-yrn[best_exponent]+y_ext[0] #noncontact part
yjoin_ret=np.array(plot.vectors[1][1][0:contact_index+delta_contact]) #contact part
-
+
ycorr_ext=np.concatenate((yjoin_ext, ycorr_ext))
ycorr_ret=np.concatenate((yjoin_ret, ycorr_ret))
-
+
plot.vectors[0][1]=list(ycorr_ext)
plot.vectors[1][1]=list(ycorr_ret)
-
+
return plot
-
+
#---SLOPE---
def do_slope(self,args):
'''
if fitspan == 0:
# Gets the Xs of two clicked points as indexes on the current curve vector
print 'Click twice to delimit chunk'
- clickedpoints=[]
points=self._measure_N_points(N=2,whatset=1)
- clickedpoints=[points[0].index,points[1].index]
- clickedpoints.sort()
else:
print 'Click once on the leftmost point of the chunk (i.e.usually the peak)'
- clickedpoints=[]
points=self._measure_N_points(N=1,whatset=1)
- clickedpoints=[points[0].index-fitspan,points[0].index]
+
+ slope=self._slope(points,fitspan)
- # Calls the function linefit_between
+ # Outputs the relevant slope parameter
+ print 'Slope:'
+ print str(slope)
+ to_dump='slope '+self.current.path+' '+str(slope)
+ self.outlet.push(to_dump)
+
+ def _slope(self,points,fitspan):
+ # Calls the function linefit_between
parameters=[0,0,[],[]]
+ try:
+ clickedpoints=[points[0].index,points[1].index]
+ clickedpoints.sort()
+ except:
+ clickedpoints=[points[0].index-fitspan,points[0].index]
+
try:
parameters=self.linefit_between(clickedpoints[0],clickedpoints[1])
except:
print 'Cannot fit. Did you click twice the same point?'
return
-
- # Outputs the relevant slope parameter
- print 'Slope:'
- print str(parameters[0])
- to_dump='slope '+self.current.path+' '+str(parameters[0])
- self.outlet.push(to_dump)
-
+
# Makes a vector with the fitted parameters and sends it to the GUI
xtoplot=parameters[2]
ytoplot=[]
x=0
for x in xtoplot:
ytoplot.append((x*parameters[0])+parameters[1])
-
+
clickvector_x, clickvector_y=[], []
for item in points:
clickvector_x.append(item.graph_coords[0])
clickvector_y.append(item.graph_coords[1])
lineplot=self._get_displayed_plot(0) #get topmost displayed plot
-
+
lineplot.add_set(xtoplot,ytoplot)
lineplot.add_set(clickvector_x, clickvector_y)
-
-
+
+
if lineplot.styles==[]:
lineplot.styles=[None,None,None,'scatter']
else:
lineplot.styles+=[None,'scatter']
if lineplot.colors==[]:
- lineplot.styles=[None,None,None,None]
+ lineplot.colors=[None,None,'black',None]
else:
- lineplot.colors+=[None,None]
+ lineplot.colors+=['black',None]
+
+
+ self._send_plot([lineplot])
+ return parameters[0]
- self._send_plot([lineplot])
def linefit_between(self,index1,index2,whatset=1):
'''
# Translates the indexes into two vectors containing the x,y data to fit
xtofit=self.plots[0].vectors[whatset][0][index1:index2]
ytofit=self.plots[0].vectors[whatset][1][index1:index2]
-
+
# Does the actual linear fitting (simple least squares with numpy.polyfit)
linefit=[]
linefit=np.polyfit(xtofit,ytofit,1)
return (linefit[0],linefit[1],xtofit,ytofit)
-
-
-
--- /dev/null
- from libhooke import WX_GOOD, ClickedPoint
+# -*- coding: utf-8 -*-
- print 'Saving...'
++from ..libhooke import WX_GOOD, ClickedPoint
+import wxversion
+wxversion.select(WX_GOOD)
+from wx import PostEvent
+import numpy as np
+import scipy as sp
+import copy
+import os.path
+import time
+import sys
+import warnings
+warnings.simplefilter('ignore',np.RankWarning)
+
+
+class jumpstatCommands():
+
+ def do_jumpstat(self,args):
+ '''
+ JUMPSTAT
+ jumpstat.py
+ Based on the convolution recognition automatically give:
+ - the delta distance between the peaks,
+ - the delta-force from the top of the peaks and subsequent relaxation,
+ - the delta-force from the top of the peaks and the baseline
+ The command allow also to remove the unwanted peaks that can be due to interference.
+ When you first issue the command, it will ask for the filename. If you are giving the filename
+ of an existing file, autopeak will resume it and append measurements to it. If you are giving
+ a new filename, it will create the file and append to it until you close Hooke.
+ You can also define a minimun deviation of the peaks.
+
+ Syntax:
+ jumpstat [deviation]
+ deviation = number of times the convolution signal is above the noise absolute deviation.
+ '''
+
+
+ #finding the max and the minimum positions for all the peaks
+ noflatten=False
+ #we use if else only to avoid a "bad input" message from find_current_peaks
+ if (len(args)==0):
+ max_peaks_location, peak_size=self.find_current_peaks(noflatten)
+ min_peaks_location, pks2=self.find_current_peaks(noflatten, True, False)
+ else:
+ max_peaks_location, peak_size=self.find_current_peaks(noflatten, args)
+ min_peaks_location, pks2=self.find_current_peaks(noflatten, args, False)
+
+
+ #print "max_peaks_location: "+str(len(max_peaks_location))
+ #print "min_peaks_location: "+str(len(min_peaks_location))
+
+ #if no peaks, we have nothing to plot. exit.
+ if len(max_peaks_location)==0:
+ print "No peaks on this curve."
+ return
+
+ if len(max_peaks_location)!=len(min_peaks_location):
+ print "Something went wrong in peaks recognition, number of minima is different from number of maxima. Exiting."
+ return
+
+ #otherwise, we plot the peak locations.
+ xplotted_ret=self.plots[0].vectors[1][0]
+ yplotted_ret=self.plots[0].vectors[1][1]
+ xgood=[xplotted_ret[index] for index in max_peaks_location]
+ ygood=[yplotted_ret[index] for index in max_peaks_location]
+
+ xafter=[xplotted_ret[index] for index in min_peaks_location]
+ yafter=[yplotted_ret[index] for index in min_peaks_location]
+
+ recplot=self._get_displayed_plot()
+ recplot2=self._get_displayed_plot()
+ recplot.vectors.append([xgood,ygood])
+ recplot2.vectors.append([xafter,yafter])
+
+ if recplot.styles==[]:
+ recplot.styles=[None,None,'scatter']
+ recplot.colors=[None,None,None]
+ else:
+ recplot.styles+=['scatter']
+ recplot.colors+=[None]
+
+ if recplot2.styles==[]:
+ recplot2.styles=[None,None,None]
+ recplot2.colors=[None,'1.0',None]
+ else:
+ recplot2.styles+=['scatter']
+ recplot2.colors+=['0.5']
+
+ self._send_plot([recplot])
+ self._send_plot([recplot2])
+
+
+ #finding the baseline
+ self.basepoints=self.baseline_points(max_peaks_location, recplot)
+ boundaries=[self.basepoints[0].index, self.basepoints[1].index]
+ boundaries.sort()
+ to_average=recplot.vectors[1][1][boundaries[0]:boundaries[1]] #y points to average
+ avg=np.mean(to_average)
+
+
+ dist=[]
+ jumpforce=[]
+ force=[]
+
+ #we calculate the distance vector
+ for g in range(len(max_peaks_location)-1):
+ dist.append((10**9)*(xplotted_ret[max_peaks_location[g]]-xplotted_ret[max_peaks_location[g+1]]))
+ print "Distance values for the peaks in nm:"
+ print dist
+
+ #the jump-force vector
+ for g in range(len(max_peaks_location)):
+ jumpforce.append((10**12) *(yplotted_ret[min_peaks_location[g]] -yplotted_ret[max_peaks_location[g]]) )
+ print "Force values for the jumps of the peaks in pN:"
+ print jumpforce
+
+ #the force from baseline vector
+ for g in range(len(max_peaks_location)):
+ force.append((10**12)*(avg-yplotted_ret[max_peaks_location[g]]))
+ print "Force values for the peaks in pN:"
+ print force
+
+
+
+ #Now ask for the peaks that we don't want
+ print 'Peaks to ignore (0,1...n from contact point,return to take all)'
+ print 'N to discard measurement'
+ exclude_raw=raw_input('Input:')
+ if exclude_raw=='N':
+ print 'Discarded.'
+ return
+
+ if not exclude_raw=='':
+ exclude=exclude_raw.split(',')
+ #we convert in numbers the input
+ try:
+ exclude=[int(item) for item in exclude]
+ except:
+ print 'Bad input, taking nothing.'
+ return
+
+# we remove the peaks that we don't want from the list, we need a counter beacause if we remove
+# a peaks the other peaks in the list are shifted by one at each step
+ count=0
+ for a in exclude:
+ if (a==0):
+ max_peaks_location=max_peaks_location[1:]
+ min_peaks_location=min_peaks_location[1:]
+ else:
+ new_a=a-count
+ max_peaks_location= max_peaks_location[0:new_a]+max_peaks_location[new_a+1:]
+ min_peaks_location= min_peaks_location[0:new_a]+min_peaks_location[new_a+1:]
+ peak_size= peak_size[0:new_a]+peak_size[new_a+1:]
+ count+=1
+
+
+ #print "max_peaks_location: "+str(len(max_peaks_location))
+ #print "min_peaks_location: "+str(len(min_peaks_location))
+
+
+ dist=[]
+ jumpforce=[]
+ force=[]
+ #we recalculate the distances and the forces after the removing of the unwanted peaks
+ for g in range(len(max_peaks_location)-1):
+ dist.append(xplotted_ret[max_peaks_location[g]]-xplotted_ret[max_peaks_location[g+1]])
+ for g in range(len(max_peaks_location)):
+ jumpforce.append( yplotted_ret[min_peaks_location[g]] - yplotted_ret[max_peaks_location[g]] )
+ for g in range(len(max_peaks_location)):
+ force.append(avg - yplotted_ret[max_peaks_location[g]])
+
+
+
+
+
+ #Save file info
+ if self.autofile=='':
+ self.autofile=raw_input('Jumpstat filename? (return to ignore) ')
+ if self.autofile=='':
+ print 'Not saved.'
+ return
+
+ if not os.path.exists(self.autofile):
+ f=open(self.autofile,'w+')
+ f.write('Analysis started '+time.asctime()+'\n')
+ f.write('----------------------------------------\n')
+ f.write('; Delta Distance length (m); Jump Force pN; Standard Force pN\n')
+ f.write(self.current.path+'\n')
+ for k in range(len(dist)):
+ f.write(";")
+ f.write(str(dist[k])+";"+str(jumpforce[k])+";"+str(force[k])+"\n" )
+ f.write("\n")
+ f.close()
+
+ else:
+ f=open(self.autofile,'a+')
+ f.write(self.current.path+'\n')
+ for k in range(len(dist)):
+ f.write(";")
+ f.write(str(dist[k])+";"+str(jumpforce[k])+";"+str(force[k])+"\n" )
+ f.write("\n")
+ f.close()
+
++ print 'Saving...'
# -*- coding: utf-8 -*-
- from libhooke import WX_GOOD, ClickedPoint
+ from hooke.libhooke import WX_GOOD, ClickedPoint
+
import wxversion
wxversion.select(WX_GOOD)
from wx import PostEvent
warnings.simplefilter('ignore',np.RankWarning)
- class multidistanceCommands:
-
+ class multidistanceCommands(object):
+
def do_multidistance(self,args):
'''
MULTIDISTANCE
of an existing file, autopeak will resume it and append measurements to it. If you are giving
a new filename, it will create the file and append to it until you close Hooke.
You can also define a minimun deviation of the peaks.
-
+
Syntax:
multidistance [deviation]
deviation = number of times the convolution signal is above the noise absolute deviation.
'''
- def find_current_peaks(noflatten, a):
- #Find peaks.
- if len(a)==0:
- a=self.convfilt_config['mindeviation']
- try:
- abs_devs=float(a)
- except:
- print "Bad input, using default."
- abs_devs=self.convfilt_config['mindeviation']
-
- defplot=self.current.curve.default_plots()[0]
- if not noflatten:
- flatten=self._find_plotmanip('flatten') #Extract flatten plotmanip
- defplot=flatten(defplot, self.current, customvalue=1) #Flatten curve before feeding it to has_peaks
- pk_loc,peak_size=self.has_peaks(defplot, abs_devs)
- return pk_loc, peak_size
-
-
+
noflatten=False
- peaks_location, peak_size=find_current_peaks(noflatten, args)
-
+ peaks_location, peak_size=self.find_current_peaks(noflatten)
+
#if no peaks, we have nothing to plot. exit.
if len(peaks_location)==0:
return
-
+
#otherwise, we plot the peak locations.
xplotted_ret=self.plots[0].vectors[1][0]
yplotted_ret=self.plots[0].vectors[1][1]
xgood=[xplotted_ret[index] for index in peaks_location]
ygood=[yplotted_ret[index] for index in peaks_location]
-
+
recplot=self._get_displayed_plot()
recplot.vectors.append([xgood,ygood])
if recplot.styles==[]:
if exclude_raw=='N':
print 'Discarded.'
return
-
+
if not exclude_raw=='':
exclude=exclude_raw.split(',')
#we convert in numbers the input
peaks_location= peaks_location[0:new_a]+peaks_location[new_a+1:]
peak_size= peak_size[0:new_a]+peak_size[new_a+1:]
count+=1
-
+
#we calculate the distance vector
dist=[]
for i in range(len(peaks_location)-1):
dist.append(xplotted_ret[peaks_location[i]]-xplotted_ret[peaks_location[i+1]])
-
-
+
+
if self.autofile=='':
print 'Not saved.'
return
-
+
if not os.path.exists(self.autofile):
f=open(self.autofile,'w+')
f.write('Analysis started '+time.asctime()+'\n')
f.write(str(o))
f.write("\n")
f.close()
-
+
print 'Saving...'
f=open(self.autofile,'a+')
-
+
f.write(self.current.path+'\n')
for i in dist:
f.write(";")
f.write(str(i))
- f.write("\n")
+ f.write("\n")
f.close()
--- /dev/null
- from libhooke import WX_GOOD, ClickedPoint
+#!/usr/bin/env python
+
+'''
+multifit.py
+
+Alberto Gomez-Casado, (c) 2010, University of Twente (The Netherlands)
+Licensed under GNU GPL v2
+'''
+
+#FIXME clean this, probably some dependencies are not needed
+
++from ..libhooke import WX_GOOD, ClickedPoint
+import wxversion
+wxversion.select(WX_GOOD)
+from wx import PostEvent
+import numpy as np
+import scipy as sp
+import copy
+import os.path
+import time
+import tempfile
+import warnings
+warnings.simplefilter('ignore',np.RankWarning)
+import Queue
+
+global measure_wlc
+global EVT_MEASURE_WLC
+
+global events_from_fit
+events_from_fit=Queue.Queue() #GUI ---> CLI COMMUNICATION
+
+
+class multifitCommands:
+
+ def do_multifit(self,args):
+ '''
+ MULTIFIT
+ (multifit.py)
+ Presents curves for manual analysis in a comfortable mouse-only fashion.
+ Obtains contour length, persistance length, rupture force and
+ slope - loading rate.
+ WLC is shown in red, eFJC in blue.
+ -------------
+ Syntax:
+ multifit [pl=value] [kl=value] [t=value] [slopew=value] [basew=value]
+ [slope2p] [justone]
+
+ pl=[value] and kl=[value]: Use a fixed persistent length (WLC) or Kuhn
+ length (eFJC) for the fit. If pl is not given, the fit will be
+ a 2-variable fit.
+ DO NOT put spaces between 'pl', '=' and the value.
+ eFJC fit works better with a fixed kl
+ The value must be in nanometers.
+
+ t=[value] : Use a user-defined temperature. The value must be in
+ kelvins; by default it is 293 K.
+ DO NOT put spaces between 't', '=' and the value.
+
+ slopew and basew : width in points for slope fitting (points to the
+ right of clicked rupture) and base level fitting (points to
+ the left of clicked top of rupture).
+ If slopew is not provided, the routine will ask for a 5th
+ point to fit the slope.
+ DO NOT put spaces between 'slopew' or 'basew', '=' value.
+
+ slope2p : calculates the slope from the two clicked points,
+ instead of fitting data between them
+
+ justone : performs the fits over current curve instead of iterating
+
+ See fit command help for more information on the options and fit
+ procedures.
+ NOTE: centerzero plot modifier should be activated (set centerzero 1).
+ '''
+
+ #NOTE duplicates a lot of code from do_fit in fit.py, a call to it could be used directly
+ #but it is easier to control the program flow bypassing it
+
+ pl_value=None
+ kl_value=None
+ T=self.config['temperature']
+ slopew=None
+ basew=15
+ justone=False
+ twops=False
+
+ #FIXME spaces are not allowed between pl or t and value
+ for arg in args.split():
+ #look for a persistent length argument.
+ if 'pl=' in arg:
+ pl_expression=arg.split('=')
+ pl_value=float(pl_expression[1]) #actual value
+ if 'kl=' in arg:
+ kl_expression=arg.split('=')
+ kl_value=float(kl_expression[1]) #actual value
+ #look for a T argument.
+ if ('t=' in arg) or ('T=' in arg):
+ t_expression=arg.split('=')
+ T=float(t_expression[1])
+ #look for a basew argument.
+ if ('basew=' in arg):
+ basew_expression=arg.split('=')
+ basew=int(basew_expression[1])
+ #look for a slopew argument.
+ if ('slopew=' in arg):
+ slopew_expression=arg.split('=')
+ slopew=int(slopew_expression[1])
+ if('justone' in arg):
+ justone=True
+ if('slope2p' in arg):
+ twops=True
+
+ counter=0
+ savecounter=0
+ curveindex=0
+
+ if not justone:
+ print 'What curve no. you would like to start? (enter for ignoring)'
+ skip=raw_input()
+
+ if skip.isdigit()==False:
+ skip=0
+ else:
+ skip=int(skip)-1
+ print 'Skipping '+str(skip)+ ' curves'
+ else:
+ skip=0
+ #begin presenting curves for analysis
+ while curveindex <len(self.current_list):
+ if not justone:
+ counter+=1
+ curve=self.current_list[curveindex]
+ if skip>curveindex:
+ curveindex+=1
+ continue
+
+ #give periodically the opportunity to stop the analysis
+ if counter%20==0 and counter>0:
+ print '\nYou checked '+str(counter)+' curves. Do you want to continue?'
+ self.current=curve
+ self.do_plot(0)
+ if self.YNclick():
+ print 'Going on...'
+ else:
+ break
+ else:
+ self.current=curve
+ self.do_plot(0)
+ else:
+ curve=self.current
+ self.do_plot(0)
+ if not justone:
+ print '\nCurve '+str(curveindex+1)+' of '+str(len(self.current_list))
+ print 'Click contact point or left end (red area)of the curve to skip'
+ #hightlights an area dedicated to reject current curve
+ sizeret=len(self.plots[0].vectors[1][0])
+ noarea=self.plots[0].vectors[1][0][-sizeret/6:-1]
+ noareaplot=copy.deepcopy(self._get_displayed_plot())
+ #noise dependent slight shift to improve visibility
+ noareaplot.add_set(noarea,np.ones_like(noarea)*5.0*np.std(self.plots[0].vectors[1][1][-sizeret/6:-1]))
+ noareaplot.styles+=['scatter']
+ noareaplot.colors+=["red"]
+ self._send_plot([noareaplot])
+ contact_point=self._measure_N_points(N=1, whatset=1)[0]
+ contact_point_index=contact_point.index
+ noareaplot.remove_set(-1)
+ #remove set leaves some styles disturbing the next graph, fixed by doing do_plot
+ self.do_plot(0)
+ if contact_point_index>5.0/6.0*sizeret:
+ if justone:
+ break
+ curveindex+=1
+ continue
+
+ self.wlccontact_point=contact_point
+ self.wlccontact_index=contact_point.index
+ self.wlccurrent=self.current.path
+
+ print 'Now click two points for the fitting area (one should be the rupture point)'
+ wlcpoints=self._measure_N_points(N=2,whatset=1)
+ print 'And one point of the top of the jump'
+ toppoint=self._measure_N_points(N=1,whatset=1)
+ slopepoint=ClickedPoint()
+ if slopew==None:
+ print 'Click a point to calculate slope'
+ slopepoint=self._measure_N_points(N=1,whatset=1)
+
+ fitpoints=[contact_point]+wlcpoints
+ #use the currently displayed plot for the fit
+ displayed_plot=self._get_displayed_plot()
+
+ #use both fit functions
+ try:
+ wlcparams, wlcyfit, wlcxfit, wlcfit_errors,wlc_qstd = self.wlc_fit(fitpoints, displayed_plot.vectors[1][0], displayed_plot.vectors[1][1],pl_value,T, return_errors=True )
+ wlcerror=False
+ except:
+ print 'WLC fit not possible'
+ wlcerror=True
+
+ try:
+ fjcparams, fjcyfit, fjcxfit, fjcfit_errors,fjc_qstd = self.efjc_fit(fitpoints, displayed_plot.vectors[1][0], displayed_plot.vectors[1][1],kl_value,T, return_errors=True )
+ fjcerror=False
+ except:
+ print 'eFJC fit not possible'
+ fjcerror=True
+
+ #Measure rupture force
+ ruptpoint=ClickedPoint()
+ if wlcpoints[0].graph_coords[1]<wlcpoints[1].graph_coords[1]:
+ ruptpoint=wlcpoints[0]
+ else:
+ ruptpoint=wlcpoints[1]
+ tpindex=toppoint[0].index
+ toplevel=np.average(displayed_plot.vectors[1][1][tpindex:tpindex+basew])
+ force=toplevel-ruptpoint.graph_coords[1]
+
+ #Measure the slope - loading rate
+
+ if slopew==None:
+ if twops:
+ xint=ruptpoint.graph_coords[0]-slopepoint[0].graph_coords[0]
+ yint=ruptpoint.graph_coords[1]-slopepoint[0].graph_coords[1]
+ slope=yint/xint
+ slopeplot=self._get_displayed_plot(0) #get topmost displayed plot
+ slopeplot.add_set([ruptpoint.graph_coords[0],slopepoint[0].graph_coords[0]],[ruptpoint.graph_coords[1],slopepoint[0].graph_coords[1]])
+ if slopeplot.styles==[]:
+ slopeplot.styles=[None,None,'scatter']
+ slopeplot.colors=[None,None,'red']
+ else:
+ slopeplot.styles+=['scatter']
+ slopeplot.colors+=['red']
+ else:
+ slope=self._slope([ruptpoint]+slopepoint,slopew)
+ else:
+ slope=self._slope([ruptpoint],slopew)
+
+ #plot results (_slope already did)
+
+ #now we have the fit, we can plot it
+ #add the clicked points in the final PlotObject
+ clickvector_x, clickvector_y=[], []
+ for item in wlcpoints:
+ clickvector_x.append(item.graph_coords[0])
+ clickvector_y.append(item.graph_coords[1])
+
+ #create a custom PlotObject to gracefully plot the fit along the curves
+
+ #first fix those irritating zoom-destroying fits
+ lowestpoint=min(displayed_plot.vectors[1][1])
+ for i in np.arange(0,len(wlcyfit)):
+ if wlcyfit[i] < 1.2*lowestpoint:
+ wlcyfit[i]=toplevel
+ for i in np.arange(0,len(fjcyfit)):
+ if fjcyfit[i] < 1.2*lowestpoint:
+ fjcyfit[i]=toplevel
+
+ fitplot=copy.deepcopy(displayed_plot)
+ fitplot.add_set(wlcxfit,wlcyfit)
+ fitplot.add_set(fjcxfit,fjcyfit)
+ fitplot.add_set(clickvector_x,clickvector_y)
+
+ fitplot.styles+=[None,None,'scatter']
+ fitplot.colors+=["red","blue",None]
+
+ self._send_plot([fitplot])
+
+
+ #present results of measurement
+ if len(wlcparams)==1:
+ wlcparams.append(pl_value*1e-9)
+ if len(fjcparams)==1:
+ fjcparams.append(kl_value*1e-9)
+
+ if fjcfit_errors:
+ fjcfit_nm=[i*(10**9) for i in fjcfit_errors]
+ if len(fjcfit_nm)==1:
+ fjcfit_nm.append(0)
+ else:
+ fjcfit_errors=[0,0]
+ if wlcfit_errors:
+ wlcfit_nm=[i*(10**9) for i in wlcfit_errors]
+ if len(wlcfit_nm)==1:
+ wlcfit_nm.append(0)
+ else:
+ wlcfit_errors=[0,0]
+
+ wlc_fitq=wlc_qstd/np.std(displayed_plot.vectors[1][1][-20:-1])
+ fjc_fitq=fjc_qstd/np.std(displayed_plot.vectors[1][1][-20:-1])
+
+ print '\nRESULTS'
+ print 'WLC contour : '+str(1e9*wlcparams[0])+u' \u00b1 '+str(wlcfit_nm[0])+' nm'
+ print 'Per. length : '+str(1e9*wlcparams[1])+u' \u00b1 '+str(wlcfit_nm[1])+' nm'
+ print 'Quality :'+str(wlc_fitq)
+ print '---'
+ print 'eFJC contour : '+str(1e9*fjcparams[0])+u' \u00b1 '+str(fjcfit_nm[0])+' nm'
+ print 'Kuhn length (e): '+str(1e9*fjcparams[1])+u' \u00b1 '+str(fjcfit_nm[1])+' nm'
+ print 'Quality :'+str(fjc_fitq)
+ print '---'
+ print 'Force : '+str(1e12*force)+' pN'
+ print 'Slope : '+str(slope)+' N/m'
+
+ try:
+ #FIXME all drivers should provide retract velocity, in SI or homogeneous units
+ lrate=slope*self.current.curve.retract_velocity
+ print 'Loading rate (UNTESTED):'+str(lrate)
+ except:
+ print 'Loading rate : n/a'
+ lrate='n/a'
+
+ if justone:
+ return
+
+ #save accept/discard/repeat
+ print '\n\nDo you want to save these?'
+ if self.YNclick():
+
+ #Save file info
+ if self.autofile=='':
+ self.autofile=raw_input('Results filename? (press enter for a random generated name)')
+ if self.autofile=='':
+ [osf,name]=tempfile.mkstemp(prefix='analysis-',suffix='.txt',text=True,dir=self.config['hookedir'])
+ print 'saving in '+name
+ self.autofile=name
+ os.close(osf)
+ f=open(self.autofile,'a+')
+ f.write('Analysis started '+time.asctime()+'\n')
+ f.write('----------------------------------------\n')
+ f.write(' File ; WLC Cont. length (nm) ; Sigma WLC cl; Per. Length (nm) ; Sigma pl ; WLC-Q ; eFJC Cont. length (nm) ; Sigma eFJC cl ; Kuhn length (nm); Sigma kl ; FJC-Q ;Force (pN) ; Slope (N/m) ; (untested) Loading rate (pN/s)\n')
+ f.close()
+
+ if not os.path.exists(self.autofile):
+ f=open(self.autofile,'a+')
+ f.write('Analysis started '+time.asctime()+'\n')
+ f.write('----------------------------------------\n')
+ f.write(' File ; WLC Cont. length (nm) ; Sigma WLC cl; Per. Length (nm) ; Sigma pl; WLC-Q ;eFJC Cont. length (nm) ; Sigma eFJC cl ; Kuhn length (nm); Sigma kl ; FJC-Q ;Force (pN) ; Slope (N/m) ; (untested) Loading rate (pN/s)\n')
+ f.close()
+
+ print 'Saving...'
+ savecounter+=1
+ f=open(self.autofile,'a+')
+ f.write(self.current.path)
+ f.write(' ; '+str(1e9*wlcparams[0])+' ; '+str(wlcfit_nm[0])+' ; '+str(1e9*wlcparams[1])+' ; '+str(wlcfit_nm[1])+' ; '+ str(wlc_fitq)+' ; '+str(1e9*fjcparams[0])+' ; '+str(fjcfit_nm[0])+' ; '+str(1e9*fjcparams[1])+' ; '+str(fjcfit_nm[1])+' ; '+str(fjc_fitq)+' ; '+str(1e12*force)+' ; '+ str(slope)+' ; '+str(lrate)+'\n')
+ f.close()
+ else:
+ print '\nWould you like to try again on this same curve?'
+ if self.YNclick():
+ continue
+ curveindex+=1
+
+ if not justone:
+ print 'You saved '+str(savecounter)+' out of '+str(len(self.current_list))+' total curves.'
+
+
+
+ def YNclick(self):
+ #shows something in the graph to indicate we expect YNclick
+ #FIXME is there an easy way to write in the graph?
+ askplot=self._get_displayed_plot()
+ center=np.min(askplot.vectors[1][0])+15e-9
+ scale=np.std(askplot.vectors[1][1][-50:-1])
+ hscale=np.mean(np.diff(askplot.vectors[1][0][-10:-1]))
+ xhline=[center-20*hscale,center-10*hscale,center+10*hscale,center+20*hscale]
+ ytopline=[15*scale,20*scale,20*scale,15*scale]
+ ybotline=[-15*scale,-20*scale,-20*scale,-15*scale]
+ yvline=np.arange(-25*scale,26*scale,scale/2)
+ xvline=np.ones_like(yvline)*center
+ xshow=np.concatenate((xvline,xhline,xhline))
+ yshow=np.concatenate((yvline,ytopline,ybotline))
+ askplot.add_set(xshow,yshow)
+ askplot.styles+=['scatter']
+ askplot.colors+=["black"]
+ self._send_plot([askplot])
+ print 'Click any point over y=0 for Yes, under it for No'
+ point=self._measure_N_points(N=1,whatset=1)[0]
+ value=point.absolute_coords[1]
+ askplot.remove_set(-1)
+ self._send_plot([askplot])
+ if value<0:
+ return False
+ else:
+ return True
+
+
+
+
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+ from ..libhooke import WX_GOOD, ClickedPoint, config_file_path
from mdp import pca
- from libhooke import WX_GOOD, ClickedPoint
import wxversion
wxversion.select(WX_GOOD)
from wx import PostEvent
import copy
import os.path
import time
- import libhookecurve as lhc
import pylab as pyl
+ from .. import libhookecurve as lhc
+
+
import warnings
warnings.simplefilter('ignore',np.RankWarning)
- class pclusterCommands:
-
+ class pclusterCommands(object):
+
def _plug_init(self):
self.clustplot1=None
self.clustplot2=None
Automatically measures peaks and extracts informations for further clustering
(c)Paolo Pancaldi, Massimo Sandal 2009
'''
- if self.config['hookedir'][0]=='/':
- slash='/' #a Unix or Unix-like system
- else:
- slash='\\'
blindw = str(self.convfilt_config['blindwindow'])
pclus_dir = "pCluster_blind"+blindw+"_"+time.strftime("%Y%m%d_%H%M")
- self.my_work_dir = os.getcwd()+slash+pclus_dir+slash
+ self.my_work_dir = os.path.join(os.getcwd(), pclus_dir)
self.my_curr_dir = os.path.basename(os.getcwd())
os.mkdir(self.my_work_dir)
-
+
#--Custom persistent length
pl_value=None
for arg in args.split():
pl_value=float(pl_expression[1]) #actual value
else:
pl_value=None
-
+
#configuration variables
min_npks = self.convfilt_config['minpeaks']
min_deviation = self.convfilt_config['mindeviation']
-
+
pclust_filename = "automeasure_"+self.my_curr_dir+"_blind"+blindw+".txt" #raw_input('Automeasure filename? ')
realclust_filename = "coordinate_"+self.my_curr_dir+"_blind"+blindw+".txt" #raw_input('Coordinates filename? ')
peackforce_filename = "peakforce_"+self.my_curr_dir+"_blind"+blindw+".txt" #raw_input('Peacks and Forces filename? ')
-
+
f=open(self.my_work_dir+pclust_filename,'w+')
f.write('Analysis started '+time.asctime()+'\n')
f.write('----------------------------------------\n')
f.write('; Contour length (nm) ; Persistence length (nm) ; Max.Force (pN) ; Slope (N/m) ; Sigma contour (nm) ; Sigma persistence (nm)\n')
f.close()
-
+
f=open(self.my_work_dir+realclust_filename,'w+')
f.write('Analysis started '+time.asctime()+'\n')
f.write('----------------------------------------\n')
f.write('; Peak number ; Mean delta (nm) ; Median delta (nm) ; Mean force (pN) ; Median force (pN) ; First peak length (nm) ; Last peak length (nm) ; Max force (pN) ; Min force (pN) ; Max delta (nm) ; Min delta (nm) ; Peaks Diff\n')
f.close()
-
+
f=open(self.my_work_dir+peackforce_filename,'w+')
f.write('Analysis started '+time.asctime()+'\n')
f.write('----------------------------------------\n')
f.write('; Peak number ; 1 peak Length (nm) ; 1 peak Force (pN) ; 2 peak Length (nm) ; 2 peak Force (pN) ; 3 peak Length (nm) ; 3 peak Force (pN) ; 4 peak Length (nm) ; 4 peak Force (pN) ; 5 peak Length (nm) ; 5 peak Force (pN) ; 6 peak Length (nm) ; 6 peak Force (pN) ; 7 peak Length (nm) ; 7 peak Force (pN) ; 8 peak Length (nm) ; 8 peak Force (pN)\n')
f.close()
-
- # ------ FUNCTION ------
- def fit_interval_nm(start_index,plot,nm,backwards):
- '''
- Calculates the number of points to fit, given a fit interval in nm
- start_index: index of point
- plot: plot to use
- backwards: if true, finds a point backwards.
- '''
- whatset=1 #FIXME: should be decidable
- x_vect=plot.vectors[1][0]
-
- c=0
- i=start_index
- start=x_vect[start_index]
- maxlen=len(x_vect)
- while abs(x_vect[i]-x_vect[start_index])*(10**9) < nm:
- if i==0 or i==maxlen-1: #we reached boundaries of vector!
- return c
- if backwards:
- i-=1
- else:
- i+=1
- c+=1
- return c
+
def plot_informations(itplot,pl_value):
'''
fit_errors [6.5817195369767644e-010, 2.4415923138871498e-011]
'''
fit_points=int(self.config['auto_fit_points']) # number of points to fit before the peak maximum <50>
-
+
T=self.config['temperature'] #temperature of the system in kelvins. By default it is 293 K. <301.0>
cindex=self.find_contact_point(itplot[0]) #Automatically find contact point <158, libhooke.ClickedPoint>
contact_point=self._clickize(itplot[0].vectors[1][0], itplot[0].vectors[1][1], cindex)
self.basepoints=[]
- base_index_0=peak_location[-1]+fit_interval_nm(peak_location[-1], itplot[0], self.config['auto_right_baseline'],False)
+ base_index_0=peak_location[-1]+self.fit_interval_nm(peak_location[-1], itplot[0], self.config['auto_right_baseline'],False)
self.basepoints.append(self._clickize(itplot[0].vectors[1][0],itplot[0].vectors[1][1],base_index_0))
- base_index_1=self.basepoints[0].index+fit_interval_nm(self.basepoints[0].index, itplot[0], self.config['auto_left_baseline'],False)
+ base_index_1=self.basepoints[0].index+self.fit_interval_nm(self.basepoints[0].index, itplot[0], self.config['auto_left_baseline'],False)
self.basepoints.append(self._clickize(itplot[0].vectors[1][0],itplot[0].vectors[1][1],base_index_1))
self.basecurrent=self.current.path
boundaries=[self.basepoints[0].index, self.basepoints[1].index]
def features_peaks(itplot, peak, fit_points, contact_point, pl_value, T, cindex, avg):
'''
- calculate informations for each peak and add they in
+ calculate informations for each peak and add they in
c_lengths, p_lengths, sigma_c_lengths, sigma_p_lengths, forces, slopes
'''
c_leng=None
sigma_p_leng=None
force=None
slope=None
-
+
delta_force=10
slope_span=int(self.config['auto_slope_span'])
-
+
peak_point=self._clickize(itplot[0].vectors[1][0],itplot[0].vectors[1][1],peak)
other_fit_point=self._clickize(itplot[0].vectors[1][0],itplot[0].vectors[1][1],peak-fit_points)
-
+
points=[contact_point, peak_point, other_fit_point]
-
+
params, yfit, xfit, fit_errors = self.wlc_fit(points, itplot[0].vectors[1][0], itplot[0].vectors[1][1], pl_value, T, return_errors=True)
-
+
#Measure forces
delta_to_measure=itplot[0].vectors[1][1][peak-delta_force:peak+delta_force]
y=min(delta_to_measure)
#check if persistent length makes sense. otherwise, discard peak.
if p_leng>self.config['auto_min_p'] and p_leng<self.config['auto_max_p']:
'''
- p_lengths.append(p_leng)
+ p_lengths.append(p_leng)
c_lengths.append(params[0]*(1.0e+9))
sigma_c_lengths.append(fit_errors[0]*(1.0e+9))
sigma_p_lengths.append(fit_errors[1]*(1.0e+9))
forces.append(abs(y-avg)*(1.0e+12))
- slopes.append(slope)
+ slopes.append(slope)
'''
c_leng=params[0]*(1.0e+9)
sigma_c_leng=fit_errors[0]*(1.0e+9)
itplot[0]=flatten(itplot[0], item, customvalue=1)
try:
peak_location,peak_size=self.exec_has_peaks(item,min_deviation)
- except:
+ except:
#We have troubles with exec_has_peaks (bad curve, whatever).
#Print info and go to next cycle.
print 'Cannot process ',item.path
- continue
+ continue
if len(peak_location)==0:
print 'No peaks!'
continue
fit_points, contact_point, pl_value, T, cindex, avg = plot_informations(itplot,pl_value)
-
+
print '\n\nCurve',item.path, 'is',c,'of',len(self.current_list),': found '+str(len(peak_location))+' peaks.'
#initialize output data vectors
if len(vect)==0:
for i in range(len(c_lengths)):
vect.append(0)
-
+
print 'Measurements for all peaks detected:'
print 'contour (nm)', c_lengths
print 'sigma contour (nm)',sigma_c_lengths
print 'sigma p (nm)',sigma_p_lengths
print 'forces (pN)',forces
print 'slopes (N/m)',slopes
-
+
'''
write automeasure text file
'''
for i in range(len(c_lengths)):
f.write(' ; '+str(c_lengths[i])+' ; '+str(p_lengths[i])+' ; '+str(forces[i])+' ; '+str(slopes[i])+' ; '+str(sigma_c_lengths[i])+' ; '+str(sigma_p_lengths[i])+'\n')
f.close()
-
+
peak_number=len(c_lengths)
-
+
'''
write peackforce text file
'''
peackforce_info = peackforce_info + ' ; ' + str(c_lengths[i]) + ' ; ' + str(forces[i])
f.write(' ; '+str(peak_number)+peackforce_info+'\n')
f.close()
-
+
'''
calculate clustering coordinates
'''
deltas=[]
for i in range(len(c_lengths)-1):
deltas.append(c_lengths[i+1]-c_lengths[i])
-
+
delta_mean=np.mean(deltas)
delta_median=np.median(deltas)
-
+
force_mean=np.mean(forces)
force_median=np.median(forces)
-
+
first_peak_cl=c_lengths[0]
last_peak_cl=c_lengths[-1]
-
+
max_force=max(forces[:-1])
min_force=min(forces)
-
+
max_delta=max(deltas)
min_delta=min(deltas)
-
+
delta_stdev=np.std(deltas)
forces_stdev=np.std(forces[:-1])
-
+
peaks_diff=(last_peak_cl-first_peak_cl)/peak_number
-
+
print 'Coordinates'
print 'Peaks',peak_number
print 'Mean delta',delta_mean
print 'Delta stdev',delta_stdev
print 'Forces stdev',forces_stdev
print 'Peaks difference',peaks_diff
-
+
'''
write clustering coordinates
'''
' ; '+str(peaks_diff)+ # 12
'\n')
f.close()
-
+
# start PCA
self.do_pca(pclus_dir+"/"+realclust_filename)
-
-
+
+
def do_pca(self,args):
'''
PCA -> "pca gaeta_coor_blind50.txt 1,3,6"
Automatically measures pca from coordinates filename and shows two interactives plots
- With the second argument (arbitrary) you can select the columns and the multiplier factor
+ With the second argument (arbitrary) you can select the columns and the multiplier factor
to use for the pca (for es "1,3*50,6,8x10,9"). Dont use spaces. "*" or "x" are the same thing.
Without second argument it reads pca_config.txt file
(c)Paolo Pancaldi, Massimo Sandal 2009
'''
-
+
# reads the columns of pca
- if self.config['hookedir'][0]=='/':
- slash='/' #a Unix or Unix-like system
- else:
- slash='\\'
- self.my_hooke_dir = self.config['hookedir']+slash
- #self.my_work_dir = os.getcwd()+slash+"pCluster_"+time.strftime("%Y%m%d_%H%M")+slash
- #self.my_curr_dir = os.path.basename(os.getcwd())
- conf=open(self.my_hooke_dir+"pca_config.txt")
+ conf=open(config_file_path("pca_config.txt"), 'r')
config = conf.readlines()
conf.close()
-
+
self.plot_myCoord = [] # tiene le coordinate prese direttamente dal file creato con pCluster
self.plot_origCoord = [] # tiene le coordinate solo delle colonne scelte e moltiplicate per i valori scelti
self.plot_pcaCoord = [] # tiene le due colonne della pca
self.plot_NewPcaCoord = [] # tiene le due colonne della pca filtrate per densita
self.plot_NewPcaCoordTr=[] # tiene le due colonne della pca trasposta filtrate per densita
plot_path_temp = ""
-
- # prende in inpunt un arg (nome del file)
+
+ # prende in inpunt un arg (nome del file)
# e il secondo le colonne su cui lavorare (e' arbitrario, riceve x es "1,2,3")
arg = args.split(" ")
if arg[0]==args:
else:
file_name=arg[0]
config[0] = arg[1]
-
+
# creo l'array "plot_myCoord" con tutte le coordinate dei plots
# e l'array plot_paths con tutti i percorsi dei plots
nPlotTot = -3 #tolgo le prime 3 righe iniziali del file
if row[0]==" " and row.find('nan')==-1 and row.find("-1.#IND")==-1:
row = row[row.index(";",2)+2:].split(" ; ") # non considero la prima colonna col #picchi
row = [float(i) for i in row]
-
+
#0:Mean delta, 1:Median delta, 2:Mean force, 3:Median force, 4:First peak length, 5:Last peak length
#6:Max delta 7:Min delta 8:Max force 9:Min force 10:Std delta 11:Std force
if (row[0]<500 and row[1]<500 and row[2]<500 and row[3]<500 and row[4]<500 and row[5]<500 and row[6]<500 and row[7]<500 and row[8]<500 and row[9]<500 and row[10]<500 and row[11]<500):
self.plot_myCoord.append(row)
self.plot_paths.append(plot_path_temp)
f.close()
-
- # creo l'array con alcune colonne e pure moltiplicate
+
+ # creo l'array con alcune colonne e pure moltiplicate
for row in self.plot_myCoord:
res=[]
for cols in config[0].split(","):
molt = 1
res.append(row[col]*molt)
self.plot_origCoord.append(res)
-
+
# array convert, calculate PCA, transpose
self.plot_origCoord = np.array(self.plot_origCoord,dtype='float')
#print self.plot_origCoord.shape
self.plot_pcaCoordTr = np.transpose(self.plot_pcaCoord)
pca_X=np.array(self.plot_pcaCoordTr[0],dtype='float')
pca_Y=np.array(self.plot_pcaCoordTr[1],dtype='float')
-
+
'''
# Start section of testing with good plots # 4 TESTING!
Xsyn_1=[]
- Ysyn_1=[]
+ Ysyn_1=[]
Xgb1_1=[]
Ygb1_1=[]
Xbad_1=[]
goodnames=goodnamefile.readlines()
nPlotGood = len(goodnames)-2 #tolgo prima e ultima riga
goodnames=[i.split()[0] for i in goodnames[1:]]
-
+
for index in range(len(self.plot_paths)):
if self.plot_paths[index][:-1] in goodnames:
Xsyn_1.append(pca_X[index])
Ybad_1.append(pca_Y[index])
# Stop section of testing with good plots # 4 TESTING!
'''
-
+
# print first plot
clustplot1=lhc.PlotObject()
clustplot1.add_set(pca_X,pca_Y)
clustplot1.destination=0
self._send_plot([clustplot1])
self.clustplot1=clustplot1
-
+
# density and filer estimation
kernel = sp.stats.kde.gaussian_kde(sp.c_[pca_X,pca_Y].T)
tallest = 0
axis_X = np.linspace(xmin,xmax,num=100)
axis_Y = np.linspace(ymin,ymax,num=100)
'''
-
+
# density filtering:
# tramite "kernel.evaluate" trovo lo score (altezza) di ogni coordinata e decido se mantenerla o no
filtered_pca_X = []
filtered_PcaCoordTr.append(filtered_pca_X)
filtered_PcaCoordTr.append(filtered_pca_Y)
filtered_PcaCoord = np.transpose(filtered_PcaCoordTr)
-
+
# creo i due array "plot_FiltOrigCoord" e "plot_FiltPaths" contenenti solo i dati filtrati con alta densita
for index in range(len(self.plot_pcaCoord)):
if self.plot_pcaCoord[index] in filtered_PcaCoord:
self.plot_FiltOrigCoord.append(self.plot_myCoord[index])
self.plot_FiltPaths.append(self.plot_paths[index])
-
+
'''
# START PCA#2: USELESS!!!
-
+
# creo l array con alcune colonne e pure moltiplicate
temp_coord = []
for row in self.plot_FiltOrigCoord:
res.append(row[col]*molt)
temp_coord.append(res)
self.plot_FiltOrigCoord = temp_coord
-
+
# ricalcolo la PCA: array convert, calculate PCA, transpose
self.plot_FiltOrigCoord = np.array(self.plot_FiltOrigCoord,dtype='float')
#print self.plot_FiltOrigCoord.shape
self.plot_NewPcaCoordTr = np.transpose(self.plot_NewPcaCoord)
pca_X2=np.array(self.plot_NewPcaCoordTr[0],dtype='float')
pca_Y2=np.array(self.plot_NewPcaCoordTr[1],dtype='float')
-
+
# Start section of testing with good plots # 4 TESTING!
Xsyn_2=[]
Ysyn_2=[]
else:
Xbad_2.append(pca_X2[index])
Ybad_2.append(pca_Y2[index])
-
+
# print second plot
clustplot2=lhc.PlotObject()
#clustplot2.add_set(pca_X2,pca_Y2)
self._send_plot([clustplot2])
self.clustplot2=clustplot2
'''
-
+
# PRINT density plot
clustplot2=lhc.PlotObject()
clustplot2.add_set(filtered_pca_X,filtered_pca_Y)
clustplot2.destination=1
self._send_plot([clustplot2])
self.clustplot2=clustplot2
-
+
# printing results
config_pca1 = config[0].replace("*", "x").rstrip("\n")
config_pca2 = config[2].replace("*", "x").rstrip("\n")
print "Piu alta: ", tallest
#print "- FILTRO 3: 2'PCA:"+config_pca2
print ""
-
+
# -- exporting coordinates and plot of PCA in debug mode! --
if config[3].find("true")!=-1:
#1' PCA: save plot and build coordinate s file
f.write (" ; " + str(cel))
f.write ("\n")
f.close()
-
+
# pCLUSTER SAVING!!!
import shutil
pcl_name = file_name.replace("coordinate_", "goodplots_").replace('.txt','_'+config_pca1+'_'+str(my_filter).replace(".",","))
f.write (myfile+"\n")
shutil.copy2(myfile, pcl_name)
f.close()
-
-
+
+
def do_multipca(self,args):
'''
MULTIPCA -> "multipca gaeta_coor_blind50.txt 3"
- Automatically multiply the column suggest in second argument for value between 1-100 (step of 2),
+ Automatically multiply the column suggest in second argument for value between 1-100 (step of 2),
measures pca from coordinates filename and save the png plots.
(c)Paolo Pancaldi, Massimo Sandal 2009
'''
for j in range(1, 13):
if i!=j:
self.do_pca(file_name + " " + str(i) + "," + str(j))
-
+
def do_triplepca(self,args):
'''
TRIPLEPCA -> "triplepca gaeta_coor_blind50.txt"
'''
It returns id, coordinates and file name of a clicked dot on a PCA graphic
'''
-
+
self._send_plot([self.clustplot1]) #quick workaround for BAD problems in the GUI
print 'Click point'
point = self._measure_N_points(N=1, whatset=0)
print "coord: " + str(dot_coord)
self.do_genlist(str(plot_file))
#self.do_jump(str(plot_file))
-
+
# indea iniziata e messa da parte...
def do_peakforce(self, args):
'''
Automatically measures peack and force plots
(c)Paolo Pancaldi, Massimo Sandal 2009
'''
-
+
# prende in inpunt un arg (nome del file)
file_name=args
f=open(file_name)
-
+
# scrivo un file temp
g = open('_prove.txt','w')
-
+
plot_file = '';
rows = f.readlines()
for row in rows:
g.write(writeme)
g.close()
f.close()
-
-
- #!/usr/bin/env python
-
'''
PROCPLOTS
Processed plots plugin for force curves.
Licensed under the GNU GPL version 2
'''
- from libhooke import WX_GOOD
+ from ..libhooke import WX_GOOD
import wxversion
wxversion.select(WX_GOOD)
import wx
- import libhookecurve as lhc
import numpy as np
import scipy as sp
import scipy.signal
import copy
- class procplotsCommands:
-
+ from .. import libhookecurve as lhc
+
+
+ class procplotsCommands(object):
+
def _plug_init(self):
pass
-
+
def do_derivplot(self,args):
'''
DERIVPLOT
Plots the derivate (actually, the discrete differentiation) of the current force curve
---------
Syntax: derivplot
- '''
- dplot=self.derivplot_curves()
- plot_graph=self.list_of_events['plot_graph']
+ '''
+ dplot=self.derivplot_curves()
+ plot_graph=self.list_of_events['plot_graph']
wx.PostEvent(self.frame,plot_graph(plots=[dplot]))
-
+
def derivplot_curves(self):
'''
do_derivplot helper function
-
+
create derivate plot curves for force curves.
- '''
+ '''
dplot=lhc.PlotObject()
- dplot.vectors=[]
-
+ dplot.vectors=[]
+
for vector in self.plots[0].vectors:
dplot.vectors.append([])
dplot.vectors[-1].append(vector[0][:-1])
dplot.vectors[-1].append(np.diff(vector[1]))
-
+
dplot.destination=1
dplot.units=self.plots[0].units
-
- return dplot
-
+
+ return dplot
+
def do_subtplot(self,args):
'''
SUBTPLOT
Syntax: subtplot
'''
#FIXME: sub_filter and sub_order must be args
-
+
if len(self.plots[0].vectors) != 2:
print 'This command only works on a curve with two different plots.'
pass
-
+
outplot=self.subtract_curves(sub_order=1)
-
- plot_graph=self.list_of_events['plot_graph']
+
+ plot_graph=self.list_of_events['plot_graph']
wx.PostEvent(self.frame,plot_graph(plots=[outplot]))
-
+
def subtract_curves(self, sub_order):
'''
subtracts the extension from the retraction
- '''
+ '''
xext=self.plots[0].vectors[0][0]
yext=self.plots[0].vectors[0][1]
xret=self.plots[0].vectors[1][0]
yret=self.plots[0].vectors[1][1]
-
+
#we want the same number of points
maxpoints_tot=min(len(xext),len(xret))
xext=xext[0:maxpoints_tot]
yext=yext[0:maxpoints_tot]
xret=xret[0:maxpoints_tot]
yret=yret[0:maxpoints_tot]
-
+
if sub_order:
ydiff=[yretval-yextval for yretval,yextval in zip(yret,yext)]
else: #reverse subtraction (not sure it's useful, but...)
- ydiff=[yextval-yretval for yextval,yretval in zip(yext,yret)]
-
+ ydiff=[yextval-yretval for yextval,yretval in zip(yext,yret)]
+
outplot=copy.deepcopy(self.plots[0])
outplot.vectors[0][0], outplot.vectors[1][0] = xext,xret #FIXME: if I use xret, it is not correct!
outplot.vectors[1][1]=ydiff
outplot.vectors[0][1]=[0 for item in outplot.vectors[1][0]]
-
+
return outplot
median_filter=customvalue
else:
median_filter=self.config['medfilt']
-
+
if median_filter==0:
return plot
-
+
if float(median_filter)/2 == int(median_filter)/2:
median_filter+=1
-
+
nplots=len(plot.vectors)
c=0
while c<nplots:
plot.vectors[c][1]=scipy.signal.medfilt(plot.vectors[c][1],median_filter)
c+=1
-
+
return plot
-
+
def plotmanip_correct(self, plot, current, customvalue=None):
'''
- the deflection() vector is as long as the X of extension + the X of retraction
- plot.vectors[0][0] is the X of extension curve
- plot.vectors[1][0] is the X of retraction curve
-
+
FIXME: both this method and the picoforce driver have to be updated, deflection() must return
a more senseful data structure!
'''
#use only for force spectroscopy experiments!
if current.curve.experiment != 'smfs':
return plot
-
+
if customvalue != None:
execute_me=customvalue
else:
execute_me=self.config['correct']
if not execute_me:
return plot
-
+
defl_ext,defl_ret=current.curve.deflection()
#halflen=len(deflall)/2
-
+
plot.vectors[0][0]=[(zpoint-deflpoint) for zpoint,deflpoint in zip(plot.vectors[0][0],defl_ext)]
plot.vectors[1][0]=[(zpoint-deflpoint) for zpoint,deflpoint in zip(plot.vectors[1][0],defl_ret)]
return plot
+
+ def plotmanip_centerzero(self, plot, current, customvalue=None):
+ '''
+ Centers the force curve so the median (the free level) corresponds to 0 N
+ Assumes that:
+ - plot.vectors[0][1] is the Y of extension curve
+ - plot.vectors[1][1] is the Y of retraction curve
+
+
+ '''
+ #use only for force spectroscopy experiments!
+ if current.curve.experiment != 'smfs':
+ return plot
+
+ if customvalue != None:
+ execute_me=customvalue
+ else:
+ execute_me=self.config['centerzero']
+ if not execute_me:
+ return plot
+
+
+
+ #levelapp=float(np.median(plot.vectors[0][1]))
+ levelret=float(np.median(plot.vectors[1][1][-300:-1]))
+
+ level=levelret
+
+ approach=[i-level for i in plot.vectors[0][1]]
+ retract=[i-level for i in plot.vectors[1][1]]
+
+ plot.vectors[0][1]=approach
+ plot.vectors[1][1]=retract
+ return plot
+
'''
def plotmanip_detriggerize(self, plot, current, customvalue=None):
#DEPRECATED
if self.config['detrigger']==0:
return plot
-
+
cutindex=2
startvalue=plot.vectors[0][0][0]
-
- for index in range(len(plot.vectors[0][0])-1,2,-2):
+
+ for index in range(len(plot.vectors[0][0])-1,2,-2):
if plot.vectors[0][0][index]>startvalue:
cutindex=index
else:
plot.vectors[0][0]=plot.vectors[0][0][:cutindex]
plot.vectors[0][1]=plot.vectors[0][1][:cutindex]
-
+
return plot
- '''
-
-
-
+ '''
+
+
+
#FFT---------------------------
def fft_plot(self, vector):
'''
'''
fftplot=lhc.PlotObject()
fftplot.vectors=[[]]
-
+
fftlen=len(vector)/2 #need just 1/2 of length
- fftplot.vectors[-1].append(np.arange(1,fftlen).tolist())
-
+ fftplot.vectors[-1].append(np.arange(1,fftlen).tolist())
+
try:
fftplot.vectors[-1].append(abs(np.fft(vector)[1:fftlen]).tolist())
except TypeError: #we take care of newer NumPy (1.0.x)
fftplot.vectors[-1].append(abs(np.fft.fft(vector)[1:fftlen]).tolist())
-
-
+
+
fftplot.destination=1
-
-
+
+
return fftplot
-
-
+
+
def do_fft(self,args):
'''
FFT
Plots the fast Fourier transform of the selected plot
---
Syntax: fft [top,bottom] [select] [0,1...]
-
+
By default, fft performs the Fourier transform on all the 0-th data set on the
top plot.
-
+
[top,bottom]: which plot is the data set to fft (default: top)
[select]: you pick up two points on the plot and fft only the segment between
[0,1,...]: which data set on the selected plot you want to fft (default: 0)
'''
-
+
#args parsing
#whatplot = plot to fft
#whatset = set to fft in the plot
whatset=int(arg)
except ValueError:
pass
-
+
if select:
points=self._measure_N_points(N=2, whatset=whatset)
boundaries=[points[0].index, points[1].index]
y_to_fft=self.plots[whatplot].vectors[whatset][1][boundaries[0]:boundaries[1]] #y
else:
y_to_fft=self.plots[whatplot].vectors[whatset][1] #y
-
- fftplot=self.fft_plot(y_to_fft)
+
+ fftplot=self.fft_plot(y_to_fft)
fftplot.units=['frequency', 'power']
- plot_graph=self.list_of_events['plot_graph']
+ plot_graph=self.list_of_events['plot_graph']
wx.PostEvent(self.frame,plot_graph(plots=[fftplot]))
-
--- /dev/null
- from libhooke import WX_GOOD
+#!/usr/bin/env python
+
+'''review.py
+Alberto Gomez-Casado (c) 2010 University of Twente
+'''
+
++from ..libhooke import WX_GOOD
+import wxversion
+wxversion.select(WX_GOOD)
+from wx import PostEvent
+import numpy as np
+import shutil
+import libinput as linp
+import copy
+import os.path
+
+import warnings
+warnings.simplefilter('ignore',np.RankWarning)
+
+
+class reviewCommands:
+
+ def do_review(self,args):
+ '''
+ REVIEW
+ (review.py)
+ Presents curves (in current playlist) in groups of ten. User can indicate which curves will be selected to be saved in a separate directory for further analysis.
+ By default curves are presented separated -30 nm in x and -100 pN in y.
+ Curve number one of each set is the one showing the approach.
+ ------------
+ Syntax:
+ review [x spacing (nm)] [y spacing (pN]
+
+ '''
+
+ args=args.split()
+
+ if len(args)==2:
+ try:
+ xgap=int(args[0])*1e-9 #scale to SI units
+ ygap=int(args[1])*1e-12
+ except:
+ print 'Spacings must be numeric! Using defaults'
+ xgap=-30*1e-9
+ ygap=-100*1e-12
+ else:
+ xgap=-30*1e-9
+ ygap=-100*1e-12
+
+ print 'Processing playlist...'
+ print '(Please wait)'
+ keep_list=[]
+
+ c=0
+ print 'You can stop the review at any moment by entering \'q\' you can go back ten curves entering \'b\''
+ print 'What curve no. you would like to start? (Enter for starting from the first)'
+ skip=raw_input()
+
+ if skip.isdigit()==False:
+ skip=0
+ else:
+ skip=int(skip)
+ print 'Skipping '+str(skip)+ ' curves'
+ c=skip
+
+ while c < len(self.current_list):
+
+
+ #take a group of ten curves and plot them with some spacing
+
+ curveset=self.current_list[c:c+10]
+
+ base=curveset[0]
+ self.current=base
+ self.do_plot(0)
+ multiplot=copy.deepcopy(self._get_displayed_plot(0))
+ self.current.curve.close_all()
+
+ for i in range(1,10):
+ if i >= len(curveset):
+ print 'End of the list'
+ print 'WARNING: maybe you want to finish!'
+ break
+ nextitem=curveset[i]
+ if not nextitem.identify(self.drivers):
+ continue
+ nextplot=self.plotmanip_correct(nextitem.curve.default_plots()[0],nextitem)
+ nextvect=nextplot.vectors
+ nextitem.curve.close_all()
+
+ nextx=nextvect[1][0]
+ nexty=nextvect[1][1]
+ #center y around 0
+ ymedian=np.median(nexty)
+ pos=0
+ for j in range(0,len(nextx)):
+ nextx[j]=nextx[j]+i*xgap
+ nexty[j]=nexty[j]+i*ygap-ymedian
+ multiplot.add_set(nextx,nexty)
+ multiplot.styles.append('lines')
+ multiplot.colors.append(None)
+
+ self._send_plot([multiplot])
+
+
+ print 'Which ones you want to keep?'
+ keep=raw_input()
+ if keep.isalpha():
+ if keep=='b':
+ print 'Going back ten curves'
+ c-=10
+ if c<0:
+ print 'We are already at the start'
+ c=0
+ continue
+ if keep=='q':
+ break
+ else:
+ for i in keep.split():
+ if i.isdigit() and int(i)>0 and int(i)<11: #if it is not digit the int() call is never made, so no exception should be happening
+ keep_item=curveset[int(i)-1].path
+ if keep_item in keep_list:
+ print 'This curve ('+keep_item+') was already selected, skipping'
+ else:
+ keep_list.append(keep_item)
+ else:
+ print 'You entered an invalid value: '+i
+
+ c+=10
+
+ #FIXME I don't know why the print below gives errors sometimes
+ try:
+ print 'Kept '+str(len(keep_list))+' curves from '+str(min(c+i+1,len(self.current_list)))
+ except:
+ print 'Display error, never mind, we continue. Below the amount of kept and total curves:'
+ print str(len(keep_list))
+ print str(len(self.current_list))
+
+ allok=0 #flag to keep from losing all the work in a slight mistake
+ while allok==0:
+ if len(keep_list) == 0:
+ return
+ save=linp.safeinput('Do you want to save the selected curves?',['y','n'])
+ if save=='y':
+ savedir=linp.safeinput('Destination directory?')
+ savedir=os.path.abspath(savedir)
+ if not os.path.isdir(savedir):
+ print 'Destination is not a directory. Try again'
+ continue
+ if save=='n':
+ allok=1
+ return
+
+ for item in keep_list:
+ try:
+ shutil.copy(item, savedir)
+ allok=1
+ except (OSError, IOError):
+ print 'Cannot copy file. '+item+' Perhaps you gave me a wrong directory?'
+ allok=0
+ break
+
+ return