Updated hooke.driver.picoforce to new Driver class.
authorW. Trevor King <wking@drexel.edu>
Mon, 17 May 2010 12:00:36 +0000 (08:00 -0400)
committerW. Trevor King <wking@drexel.edu>
Mon, 17 May 2010 12:00:36 +0000 (08:00 -0400)
This is a fairly major rewrite of the previous PicoForce driver.  I've
cleaned up header parsing and tried to break trace -> block conversion
up into compact functions.  The meaning of many PicoForce file
constants is still unclear to me though, so I may have gotten some
things wrong.

Also:
  * Added references for each hooke.experiment.Experiment subclass.
  * TutorialDriver sets curve.info['experiment'] to an Experiment subclass.
  * An adjusted test.curve_info now passes.

While we're translating the old experiment strings to the new
Experiment classes, the following conversion table may be useful:

  'clamp'  -> ForceClamp
  'smfs'   -> VelocityClamp
  'smfluo' -> TwoColorCoincidenceDetection

doc/index.txt
hooke/driver/__init__.py
hooke/driver/picoforce.py
hooke/driver/tutorial.py
hooke/experiment.py
test/curve_info.py

index 99919a57269fc88f6f38a8f4df34e4f72babbb5e..b370dc6290e7884380f12dba113fae02104f9786 100644 (file)
@@ -44,11 +44,12 @@ Indices and tables
 Publications
 ============
 
-Hooke has been published [1]. Please cite Hooke if you use it.  Let us
-know, and we'll link to your paper!
+Hooke has been published [#sandal2009]_. Please cite Hooke if you use
+it.  Let us know, and we'll link to your paper!
 
 
-[1] Sandal M, Benedetti F, Brucale M, Gomez-Casado A, Samorì B.,
+.. [#sandal2009] M. Sandal, F. Benedetti, M. Brucale, A. Gomez-Casado,
+  B. Samorì.
   "Hooke: an open software platform for force spectroscopy."
   Bioinformatics, 2009.
   doi: `10.1093/bioinformatics/btp180 <http://dx.doi.org/10.1093/bioinformatics/btp180>`_
index b6324cb3370f4d92f197b42e1345d10d7a1c4796..ddaa0e1cfc77fb00e5f855c7f0214248c95175b9 100644 (file)
@@ -36,7 +36,7 @@ DRIVER_MODULES = [
 #    ('mcs', True),
 #    ('mfp1dexport', True),
 #    ('mfp3d', True),
-#    ('picoforce', True),
+    ('picoforce', True),
 #    ('picoforcealt', True),
     ('tutorial', True),
 ]
index 9bf2311bc74bbec383c7792fc96eee9845fcc90e..c1c914336ce9c0bc3935c8a56f6ccbeece7d0766 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright (C) 2006-2010 Alberto Gomez-Casado
+# Copyright (C) 2006-2010 Alberto Gomez-Kasai
 #                         Massimo Sandal <devicerandom@gmail.com>
 #                         W. Trevor King <wking@drexel.edu>
 #
 # License along with Hooke.  If not, see
 # <http://www.gnu.org/licenses/>.
 
-"""Library for interpreting Picoforce force spectroscopy files.
+"""Library for interpreting Veeco PicoForce force spectroscopy files.
 """
 
+import pprint
 import re
-import struct
-from scipy import arange
-
-#from .. import libhooke as lh
-from .. import curve as lhc
-
-
-__version__='0.0.0.20090923'
-
-
-class DataChunk(list):
-    #Dummy class to provide ext and ret methods to the data list.
-
-    def ext(self):
-        halflen=(len(self)/2)
-        return self[0:halflen]
-
-    def ret(self):
-        halflen=(len(self)/2)
-        return self[halflen:]
-
-class picoforceDriver(lhc.Driver):
-
-    #Construction and other special methods
-
-    def __init__(self,filename):
-        '''
-        constructor method
-        '''
-        filename = lh.get_file_path(filename)
-        self.textfile=file(filename)
-        self.binfile=file(filename,'rb')
-
-        #The 0,1,2 data chunks are:
-        #0: D (vs T)
-        #1: Z (vs T)
-        #2: D (vs Z)
-
-
-        self.filepath=filename
-        self.debug=False
-
-        self.filetype='picoforce'
-        self.experiment='smfs'
-
-
-    #Hidden methods. These are meant to be used only by API functions. If needed, however,
-    #they can be called just like API methods.
-
-    def _get_samples_line(self):
-        '''
-        Gets the samples per line parameters in the file, to understand trigger behaviour.
-        '''
-        self.textfile.seek(0)
-
-        samps_expr=re.compile(".*Samps")
-
-        samps_values=[]
-        for line in self.textfile.readlines():
-            if samps_expr.match(line):
-                try:
-                    samps=int(line.split()[2]) #the third word splitted is the offset (in bytes)
-                    samps_values.append(samps)
-                except:
-                    pass
-
-                #We raise a flag for the fact we meet an offset, otherwise we would take spurious data length arguments.
-
-        return int(samps_values[0])
-
-    def _get_chunk_coordinates(self):
-        '''
-        This method gets the coordinates (offset and length) of a data chunk in our
-        Picoforce file.
-
-        It returns a list containing two tuples:
-        the first element of each tuple is the data_offset, the second is the corresponding
-        data size.
-
-        In near future probably each chunk will get its own data structure, with
-        offset, size, type, etc.
-        '''
-        self.textfile.seek(0)
-
-        offset_expr=re.compile(".*Data offset")
-        length_expr=re.compile(".*Data length")
-
-        data_offsets=[]
-        data_sizes=[]
-        flag_offset=0
-
-        for line in self.textfile.readlines():
-
-            if offset_expr.match(line):
-                offset=int(line.split()[2]) #the third word splitted is the offset (in bytes)
-                data_offsets.append(offset)
-                #We raise a flag for the fact we meet an offset, otherwise we would take spurious data length arguments.
-                flag_offset=1
-
-            #same for the data length
-            if length_expr.match(line) and flag_offset:
-                size=int(line.split()[2])
-                data_sizes.append(size)
-                #Put down the offset flag until the next offset is met.
-                flag_offset=0
-
-        return zip(data_offsets,data_sizes)
-
-    def _get_data_chunk(self,whichchunk):
-        '''
-        reads a data chunk and converts it in 16bit signed int.
-        '''
-        offset,size=self._get_chunk_coordinates()[whichchunk]
-
-
-        self.binfile.seek(offset)
-        raw_chunk=self.binfile.read(size)
-
-        my_chunk=[]
-        for data_position in range(0,len(raw_chunk),2):
-            data_unit_bytes=raw_chunk[data_position:data_position+2]
-            #The unpack function converts 2-bytes in a signed int ('h').
-            #we use output[0] because unpack returns a 1-value tuple, and we want the number only
-            data_unit=struct.unpack('h',data_unit_bytes)[0]
-            my_chunk.append(data_unit)
-
-        return DataChunk(my_chunk)
-
-    def _get_Zscan_info(self,index):
-        '''
-        gets the Z scan informations needed to interpret the data chunk.
-        These info come from the general section, BEFORE individual chunk headers.
-
-        By itself, the function will parse for three parameters.
-        (index) that tells the function what to return when called by
-        exposed API methods.
-        index=0 : returns Zscan_V_LSB
-        index=1 : returns Zscan_V_start
-        index=2 : returns Zscan_V_size
-        '''
-        self.textfile.seek(0)
-
-        ciaoforcelist_expr=re.compile(".*Ciao force")
-        zscanstart_expr=re.compile(".*@Z scan start")
-        zscansize_expr=re.compile(".*@Z scan size")
-
-        ciaoforce_flag=0
-        theline=0
-        for line in self.textfile.readlines():
-            if ciaoforcelist_expr.match(line):
-                ciaoforce_flag=1 #raise a flag: zscanstart and zscansize params to read are later
-
-            if ciaoforce_flag and zscanstart_expr.match(line):
-                raw_Zscanstart_line=line.split()
-
-            if ciaoforce_flag and zscansize_expr.match(line):
-                raw_Zscansize_line=line.split()
-
-        Zscanstart_line=[]
-        Zscansize_line=[]
-        for itemscanstart,itemscansize in zip(raw_Zscanstart_line,raw_Zscansize_line):
-            Zscanstart_line.append(itemscanstart.strip('[]()'))
-            Zscansize_line.append(itemscansize.strip('[]()'))
-
-        Zscan_V_LSB=float(Zscanstart_line[6])
-        Zscan_V_start=float(Zscanstart_line[8])
-        Zscan_V_size=float(Zscansize_line[8])
-
-        return (Zscan_V_LSB,Zscan_V_start,Zscan_V_size)[index]
-
-    def _get_Z_magnify_scale(self,whichchunk):
-        '''
-        gets Z scale and Z magnify
-        Here we get Z scale/magnify from the 'whichchunk' only.
-        whichchunk=1,2,3
-        TODO: make it coherent with data_chunks syntaxis (0,1,2)
-
-        In future, should we divide the *file* itself into chunk descriptions and gain
-        true chunk data structures?
-        '''
-        self.textfile.seek(0)
-
-        z_scale_expr=re.compile(".*@4:Z scale")
-        z_magnify_expr=re.compile(".*@Z magnify")
-
-        ramp_size_expr=re.compile(".*@4:Ramp size")
-        ramp_offset_expr=re.compile(".*@4:Ramp offset")
-
-        occurrences=0
-        found_right=0
-
-
-        for line in self.textfile.readlines():
-            if z_magnify_expr.match(line):
-                occurrences+=1
-                if occurrences==whichchunk:
-                    found_right=1
-                    raw_z_magnify_expression=line.split()
-                else:
-                    found_right=0
-
-            if found_right and z_scale_expr.match(line):
-                raw_z_scale_expression=line.split()
-            if found_right and ramp_size_expr.match(line):
-                raw_ramp_size_expression=line.split()
-            if found_right and ramp_offset_expr.match(line):
-                raw_ramp_offset_expression=line.split()
-
-        return float(raw_z_magnify_expression[5]),float(raw_z_scale_expression[7]), float(raw_ramp_size_expression[7]), float(raw_ramp_offset_expression[7]), float(raw_z_scale_expression[5][1:])
-
-
-    #Exposed APIs.
-    #These are the methods that are meant to be called from external apps.
-
-    def LSB_to_volt(self,chunknum,voltrange=20):
-        '''
-        Converts the LSB data of a given chunk (chunknum=0,1,2) in volts.
-        First step to get the deflection and the force.
-
-        SYNTAXIS:
-        item.LSB_to_volt(chunknum, [voltrange])
-
-        The voltrange is by default set to 20 V.
-        '''
-        return DataChunk([((float(lsb)/65535)*voltrange) for lsb in self.data_chunks[chunknum]])
-
-    def LSB_to_deflection(self,chunknum,deflsensitivity=None,voltrange=20):
-        '''
-        Converts the LSB data in deflection (meters).
-
-        SYNTAXIS:
-        item.LSB_to_deflection(chunknum, [deflection sensitivity], [voltrange])
-
-        chunknum is the chunk you want to parse (0,1,2)
-
-        The deflection sensitivity by default is the one parsed from the file.
-        The voltrange is by default set to 20 V.
-        '''
-        if deflsensitivity is None:
-            deflsensitivity=self.get_deflection_sensitivity()
-
-        lsbvolt=self.LSB_to_volt(chunknum)
-        return DataChunk([volt*deflsensitivity for volt in lsbvolt])
-
-    def deflection(self):
-        '''
-        Get the actual force curve deflection.
-        '''
-        deflchunk= self.LSB_to_deflection(2)
-        return deflchunk.ext(),deflchunk.ret()
-
-    def LSB_to_force(self,chunknum=2,Kspring=None,voltrange=20):
-        '''
-        Converts the LSB data (of deflection) in force (newtons).
-
-        SYNTAXIS:
-        item.LSB_to_force([chunknum], [spring constant], [voltrange])
-
-        chunknum is the chunk you want to parse (0,1,2). The chunk used is by default 2.
-        The spring constant by default is the one parsed from the file.
-        The voltrange is by default set to 20 V.
-        '''
-        if Kspring is None:
-            Kspring=self.get_spring_constant()
-
-        lsbdefl=self.LSB_to_deflection(chunknum)
-        return DataChunk([(meter*Kspring) for meter in lsbdefl])
-
-    def get_Zscan_V_start(self):
-        return self._get_Zscan_info(1)
-
-    def get_Zscan_V_size(self):
-        return self._get_Zscan_info(2)
-
-    def get_Z_scan_sensitivity(self):
-        '''
-        gets Z sensitivity
-        '''
-        self.textfile.seek(0)
-
-        z_sensitivity_expr=re.compile(".*@Sens. Zsens")
-
-        for line in self.textfile.readlines():
-            if z_sensitivity_expr.match(line):
-                z_sensitivity=float(line.split()[3])
-        #return it in SI units (that is: m/V, not nm/V)
-        return z_sensitivity*(10**(-9))
-
-    def get_Z_magnify(self,whichchunk):
-        '''
-        Gets the Z magnify factor. Normally it is 1, unknown exact use as of 2006-01-13
-        '''
-        return self._get_Z_magnify_scale(whichchunk)[0]
-
-    def get_Z_scale(self,whichchunk):
-        '''
-        Gets the Z scale.
-        '''
-        return self._get_Z_magnify_scale(whichchunk)[1]
-
-    def get_ramp_size(self,whichchunk):
-        '''
-        Gets the -user defined- ramp size
-        '''
-        return self._get_Z_magnify_scale(whichchunk)[2]
-
-    def get_ramp_offset(self,whichchunk):
-        '''
-        Gets the ramp offset
-        '''
-        return self._get_Z_magnify_scale(whichchunk)[3]
-
-    def get_Z_scale_LSB(self,whichchunk):
-        '''
-        Gets the LSB-to-volt conversion factor of the Z data.
-        (so called hard-scale in the Nanoscope documentation)
-
-        '''
-        return self._get_Z_magnify_scale(whichchunk)[4]
-
-    def get_deflection_sensitivity(self):
-        '''
-        gets deflection sensitivity
-        '''
-        self.textfile.seek(0)
-
-        def_sensitivity_expr=re.compile(".*@Sens. DeflSens")
-
-        for line in self.textfile.readlines():
-            if def_sensitivity_expr.match(line):
-                def_sensitivity=float(line.split()[3])
-                break
-        #return it in SI units (that is: m/V, not nm/V)
-        return def_sensitivity*(10**(-9))
-
-    def get_spring_constant(self):
-        '''
-        gets spring constant.
-        We actually find *three* spring constant values, one for each data chunk (F/t, Z/t, F/z).
-        They are normally all equal, but we retain all three for future...
-        '''
-        self.textfile.seek(0)
-
-        springconstant_expr=re.compile(".*Spring Constant")
-
-        constants=[]
-
-        for line in self.textfile.readlines():
-            if springconstant_expr.match(line):
-                constants.append(float(line.split()[2]))
-
-        return constants[0]
-
-    def get_Zsensorsens(self):
-        '''
-        gets Zsensorsens for Z data.
-
-        This is the sensitivity needed to convert the LSB data in nanometers for the Z-vs-T data chunk.
-        '''
-        self.textfile.seek(0)
-
-        zsensorsens_expr=re.compile(".*Sens. ZSensorSens")
-
-        for line in self.textfile.readlines():
-            if zsensorsens_expr.match(line):
-                zsensorsens_raw_expression=line.split()
-                #we must take only first occurrence, so we exit from the cycle immediately
-                break
-
-        return (float(zsensorsens_raw_expression[3]))*(10**(-9))
-
-    def Z_data(self):
-        '''
-        returns converted ext and ret Z curves.
-        They're on the second chunk (Z vs t).
-        '''
-        #Zmagnify_zt=self.get_Z_magnify(2)
-        #Zscale_zt=self.get_Z_scale(2)
-        Zlsb_zt=self.get_Z_scale_LSB(2)
-        #rampsize_zt=self.get_ramp_size(2)
-        #rampoffset_zt=self.get_ramp_offset(2)
-        zsensorsens=self.get_Zsensorsens()
-
-        '''
-        The magic formula that converts the Z data is:
-
-        meters = LSB * V_lsb_conversion_factor * ZSensorSens
-        '''
-
-        #z_curves=[item*Zlsb_zt*zsensorsens for item in self.data_chunks[1].pair['ext']],[item*Zlsb_zt*zsensorsens for item in self.data_chunks[1].pair['ret']]
-        z_curves=[item*Zlsb_zt*zsensorsens for item in self.data_chunks[1].ext()],[item*Zlsb_zt*zsensorsens for item in self.data_chunks[1].ret()]
-        z_curves=[DataChunk(item) for item in z_curves]
-        return z_curves
-
-    def Z_extremes(self):
-        '''
-        returns the extremes of the Z values
-        '''
-        zcurves=self.Z_data()
-        z_extremes={}
-        z_extremes['ext']=zcurves[0][0],zcurves[0][-1]
-        z_extremes['ret']=zcurves[1][0],zcurves[1][-1]
-
-        return z_extremes
-
-    def Z_step(self):
-        '''
-        returns the calculated step between the Z values
-        '''
-        zrange={}
-        zpoints={}
-
-        z_extremes=self.Z_extremes()
-
-        zrange['ext']=abs(z_extremes['ext'][0]-z_extremes['ext'][1])
-        zrange['ret']=abs(z_extremes['ret'][0]-z_extremes['ret'][1])
-
-        #We must take 1 from the calculated zpoints, or when I use the arange function gives me a point more
-        #with the step. That is, if I have 1000 points, and I use arange(start,stop,step), I have 1001 points...
-        #For cleanness, solution should really be when using arange, but oh well...
-        zpoints['ext']=len(self.Z_data()[0])-1
-        zpoints['ret']=len(self.Z_data()[1])-1
-        #this syntax must become coherent!!
-        return (zrange['ext']/zpoints['ext']),(zrange['ret']/zpoints['ret'])
-
-    def Z_domains(self):
-        '''
-        returns the Z domains on which to plot the force data.
-
-        The Z domains are returned as a single long DataChunk() extended list. The extension and retraction part
-        can be extracted using ext() and ret() methods.
-        '''
-        x1step=self.Z_step()[0]
-        x2step=self.Z_step()[1]
-
-        try:
-            xext=arange(self.Z_extremes()['ext'][0],self.Z_extremes()['ext'][1],-x1step)
-            xret=arange(self.Z_extremes()['ret'][0],self.Z_extremes()['ret'][1],-x2step)
-        except:
-            xext=arange(0,1)
-            xret=arange(0,1)
-            print 'picoforce.py: Warning. xext, xret domains cannot be extracted.'
-
-        if not (len(xext)==len(xret)):
-            if self.debug:
-                #print warning
-                print "picoforce.py: Warning. Extension and retraction domains have different sizes."
-                print "length extension: ", len(xext)
-                print "length retraction: ", len(xret)
-                print "You cannot trust the resulting curve."
-                print "Until a solution is found, I substitute the ext domain with the ret domain. Sorry."
-            xext=xret
-
-        return DataChunk(xext.tolist()+xret.tolist())
-
-    def Z_scan_size(self):
-        return self.get_Zscan_V_size()*self.get_Z_scan_sensitivity()
-
-    def Z_start(self):
-        return self.get_Zscan_V_start()*self.get_Z_scan_sensitivity()
-
-    def ramp_size(self,whichchunk):
-        '''
-        to be implemented if needed
-        '''
-        raise "Not implemented yet."
-
-
-    def ramp_offset(self,whichchunk):
-        '''
-        to be implemented if needed
-        '''
-        raise "Not implemented yet."
-
-    def detriggerize(self, forcext):
-        '''
-        Cuts away the trigger-induced s**t on the extension curve.
-        DEPRECATED
-        cutindex=2
-        startvalue=forcext[0]
-
-        for index in range(len(forcext)-1,2,-2):
-           if forcext[index]>startvalue:
-                cutindex=index
-           else:
+import time
+
+import numpy
+
+from .. import curve as curve # this module defines data containers.
+from .. import experiment as experiment # this module defines expt. types
+from ..config import Setting # configurable setting class
+from . import Driver as Driver # this is the Driver base class
+
+
+__version__='0.0.0.20100516'
+
+class PicoForceDriver (Driver):
+    """Handle Veeco Picoforce force spectroscopy files.
+    """
+    def __init__(self):
+        super(PicoForceDriver, self).__init__(name='picoforce')
+
+    def is_me(self, path):
+        f = file(path, 'r')
+        header = f.read(30)
+        f.close()
+
+        return header[2:17] == 'Force file list'
+
+    def read(self, path):
+        info = self._read_header_path(path)
+        self._check_version(info)
+        data = self._read_data_path(path, info)
+        info['filetype'] = self.name
+        info['experiment'] = experiment.VelocityClamp
+        return (data, info)
+
+    def _read_header_path(self, path):
+        """Read curve information from the PicoForce file at `path`.
+
+        See :meth:`._read_header_file`.
+        """
+        return self._read_header_file(file(path, 'rb'))
+
+    def _read_header_file(self, file):
+        r"""Read curve information from a PicoForce file.
+
+        Return a dict of dicts representing the information.  If a
+        field is repeated multiple times, it's value is replaced by a
+        list of the values for each occurence.
+
+        Examples
+        --------
+
+        >>> import pprint
+        >>> import StringIO
+        >>> f = StringIO.StringIO('\r\n'.join([
+        ...             '\*Force file list',
+        ...             '\Version: 0x06120002',
+        ...             '\Date: 04:42:34 PM Tue Sep 11 2007',
+        ...             '\Start context: FOL2',
+        ...             '\Data length: 40960',
+        ...             '\Text: ',
+        ...             '\*Equipment list',
+        ...             '\Description: Extended PicoForce',
+        ...             '\Controller: IIIA',
+        ...             '\*Ciao force image list',
+        ...             '\Data offset: 40960',
+        ...             '\Data length: 8192',
+        ...             '\*Ciao force image list',
+        ...             '\Data offset: 49152',
+        ...             '\Data length: 8192',
+        ...             '\*Ciao force image list',
+        ...             '\Data offset: 57344',
+        ...             '\Data length: 8192',
+        ...             ]))
+        >>> p = PicoForceDriver()
+        >>> d = p._read_header_file(f)
+        >>> pprint.pprint(d, width=60)
+        {'Ciao force image list': [{'Data length': '8192',
+                                    'Data offset': '40960'},
+                                   {'Data length': '8192',
+                                    'Data offset': '49152'},
+                                   {'Data length': '8192',
+                                    'Data offset': '57344'}],
+         'Equipment list': {'Controller': 'IIIA',
+                            'Description': 'Extended PicoForce'},
+         'Force file list': {'Data length': '40960',
+                             'Date': '04:42:34 PM Tue Sep 11 2007',
+                             'Start context': 'FOL2',
+                             'Text:': None,
+                             'Version': '0x06120002'}}
+        """
+        info = {}
+        header_field = None
+        for line in file:
+            line = line.strip()
+            if line.startswith('\*File list end'):
                 break
+            if line.startswith(r'\*'):
+                header_field = line[len(r'\*'):]
+                if header_field in info:
+                    if isinstance(info[header_field], list):
+                        info[header_field].append({}) # >=3rd appearance
+                    else: # Second appearance
+                        info[header_field] = [info[header_field], {}]
+                else: # First appearance
+                    info[header_field] = {}
+            else:
+                assert line.startswith('\\'), line
+                fields = line[len('\\'):].split(': ', 1)
+                key = fields[0]
+                if len(fields) == 1: # fields = [key]
+                    value = None
+                else: # fields = [key, value]
+                    value = fields[1]
+                if isinstance(info[header_field], list): # >=2nd header_field
+                    target_dict = info[header_field][-1]
+                else: # first appearance of header_field
+                    target_dict = info[header_field]
+                if key in target_dict and target_dict[key] != value:
+                    raise NotImplementedError(
+                        'Overwriting %s: %s -> %s'
+                        % (key, target_dict[key], value))
+                target_dict[key] = value
+        return (info)
+
+    def _check_version(self, info):
+        """Ensure the input file is a version we understand.
+
+        Otherwise, raise `ValueError`.
+        """
+        version = info['Force file list'].get('Version', None)
+        if version not in ['0x06120002']:
+            raise ValueError(
+                '%s file version %s not supported (yet!)\n%s'
+                % (self.name, version,
+                   pprint.pformat(info['Force file list'])))
+
+    def _read_data_path(self, path, info):
+        """Read curve data from the PicoForce file at `path`.
+
+        See :meth:`._read_data_file`.
+        """
+        f = file(path, 'rb')
+        data = self._read_data_file(f, info)
+        f.close()
+        return data
+
+    def _read_data_file(self, file, info):
+        file.seek(0)
+        traces = self._extract_traces(buffer(file.read()), info)
+        deflection,z_piezo,deflection_B = traces
+        self._validate_traces(z_piezo, deflection, deflection_B)
+        L = len(deflection)
+        approach = self._extract_block(
+            info, z_piezo, deflection, 0, L/2, 'approach')
+        retract = self._extract_block(
+            info, z_piezo, deflection, L/2, L, 'retract')
+        data = [approach, retract]
+        return data
+
+    def _extract_traces(self, buffer, info):
+        """Extract each of the three vector blocks in a PicoForce file.
+        
+        The blocks are:
+
+        * Deflection input
+        * Z piezo sensor input
+        * Deflection again?
+
+        And their headers are marked with 'Ciao force image list'.
+        """
+        traces = [] 
+        for image in info['Ciao force image list']:
+            offset = int(image['Data offset'])
+            length = int(image['Data length'])
+            sample_size = int(image['Bytes/pixel'])
+            rows = length / sample_size
+            if sample_size != 2:
+                raise NotImplementedError('Size: %s' % sample_size)
+            d = curve.Data(
+                shape=(rows),
+                dtype=numpy.int16,
+                buffer=buffer,
+                offset=offset,
+                info=image,
+                )
+            traces.append(d)
+        return traces
+
+    def _validate_traces(self, z_piezo, deflection, deflection_B):
+        key = 'Spring Constant'
+        spring_constant = z_piezo.info[key]
+        for trace in [deflection, deflection_B]:
+            if trace.info[key] != spring_constant:
+                raise NotImplementedError(
+                    'spring constant missmatch: %s != %s'
+                    % (spring_constant, trace.info[key]))
+        if max(abs(deflection_B[:-1]-deflection[:-1])) != 0:
+            raise NotImplementedError('trace 0 != trace 2')
+        if len(z_piezo) != len(deflection):
+            raise ValueError('Trace length missmatch: %d != %d'
+                             % (len(z_piezo), len(deflection)))
+
+    def _extract_block(self, info, z_piezo, deflection, start, stop, name):
+        block = curve.Data(
+            shape=(stop-start, 2),
+            dtype=numpy.float)
+        block[:,0] = z_piezo[start:stop]
+        block[:,1] = deflection[start:stop]
+        block.info = self._translate_block_info(
+            info, z_piezo.info, deflection.info, name)
+        block = self._scale_block(block)
+        return block
+
+    def _translate_block_info(self, info, z_piezo_info, deflection_info, name):
+        ret = {
+            'name':name,
+            'raw info':info,
+            'raw z piezo info': z_piezo_info,
+            'raw deflection info': deflection_info,
+            'spring constant (N/m)':float(z_piezo_info['Spring Constant'])
+            }
+
+        t = info['Force file list']['Date'] # 04:42:34 PM Tue Sep 11 2007
+        ret['time'] = time.strptime(t, '%I:%M:%S %p %a %b %d %Y')
+
+        type_re = re.compile('S \[(\w*)\] "([\w\s]*)"')
+        match = type_re.match(z_piezo_info['@4:Image Data'])
+        assert match.group(1).lower() == match.group(2).replace(' ','').lower(), \
+            'Name missmatch: "%s", "%s"' % (match.group(1), match.group(2))
+        ret['columns'] = [match.group(2)]
+        match = type_re.match(deflection_info['@4:Image Data'])
+        assert match.group(1).lower() == match.group(2).replace(' ','').lower(), \
+            'Name missmatch: "%s", "%s"' % (match.group(1), match.group(2))
+        ret['columns'].append(match.group(2))
+        assert ret['columns'] == ['Z sensor', 'Deflection'], \
+            'Unexpected columns: %s' % ret['columns']
+        ret['columns'] = ['z piezo (m)', 'deflection (m)']
+
+        volt_re = re.compile(
+            'V \[Sens. (\w*)\] \(([.0-9]*) V/LSB\) ([.0-9]*) V')
+        match = volt_re.match(z_piezo_info['@4:Z scale'])
+        assert match.group(1) == 'ZSensorSens', z_piezo_info['@4:Z scale']
+        ret['z piezo sensitivity (V/bit)'] = float(match.group(2))
+        ret['z piezo range (V)'] = float(match.group(3))
+        ret['z piezo offset (V)'] = 0.0
+        # offset assumed if raw data is signed...
+
+        match = volt_re.match(deflection_info['@4:Z scale'])
+        assert match.group(1) == 'DeflSens', z_piezo_info['@4:Z scale']
+        ret['deflection sensitivity (V/bit)'] = float(match.group(2))
+        ret['deflection range (V)'] = float(match.group(3))
+        ret['deflection offset (V)'] = 0.0
+        # offset assumed if raw data is signed...
+
+        nm_sens_re = re.compile('V ([.0-9]*) nm/V')
+        match = nm_sens_re.match(info['Scanner list']['@Sens. Zsens'])
+        ret['z piezo sensitivity (m/V)'] = float(match.group(1))*1e-9
+
+        match = nm_sens_re.match(info['Ciao scan list']['@Sens. DeflSens'])
+        ret['deflection sensitivity (m/V)'] = float(match.group(1))*1e-9
+
+        match = volt_re.match(info['Ciao force list']['@Z scan start'])
+        ret['z piezo scan (V/bit)'] = float(match.group(2))
+        ret['z piezo scan start (V)'] = float(match.group(3))
+
+        match = volt_re.match(info['Ciao force list']['@Z scan size'])
+        ret['z piezo scan size (V)'] = float(match.group(3))
+
+        const_re = re.compile('C \[([:\w\s]*)\] ([.0-9]*)')
+        match = const_re.match(z_piezo_info['@Z magnify'])
+        assert match.group(1) == '4:Z scale', match.group(1)
+        ret['z piezo magnification'] = match.group(2)
+
+        match = volt_re.match(z_piezo_info['@4:Z scale'])
+        assert match.group(1) == 'ZSensorSens', match.group(1)
+        ret['z piezo scale (V/bit)'] = float(match.group(2))
+        ret['z piezo scale (V)'] = float(match.group(3))
+
+        match = volt_re.match(z_piezo_info['@4:Ramp size'])
+        assert match.group(1) == 'Zsens', match.group(1)
+        ret['z piezo ramp size (V/bit)'] = float(match.group(2))
+        ret['z piezo ramp size (V)'] = float(match.group(3))
+
+        match = volt_re.match(z_piezo_info['@4:Ramp offset'])
+        assert match.group(1) == 'Zsens', match.group(1)
+        ret['z piezo ramp offset (V/bit)'] = float(match.group(2))
+        ret['z piezo ramp offset (V)'] = float(match.group(3))
+        
+        # Unaccounted for:
+        #   Samps*
+        
+        return ret
+
+    def _scale_block(self, data):
+        """Convert the block from its native format to a `numpy.float`
+        array in SI units.
+        """
+        ret = curve.Data(
+            shape=data.shape,
+            dtype=numpy.float,
+            )
+        info = data.info
+        ret.info = info
+        ret.info['raw-data'] = data # store the raw data
+        data.info = {} # break circular reference info <-> data
+
+        z_col = info['columns'].index('z piezo (m)')
+        d_col = info['columns'].index('deflection (m)')
+
+        # Leading '-' because Veeco's z increases towards the surface
+        # (positive indentation), but it makes more sense to me to
+        # have it inzrease away from the surface (positive
+        # separation).
+        ret[:,z_col] = -(
+            (data[:,z_col].astype(ret.dtype)
+             * info['z piezo sensitivity (V/bit)']
+             - info['z piezo offset (V)'])
+            * info['z piezo sensitivity (m/V)']
+            )
+
+        ret[:,d_col] = (
+            (data[:,d_col]
+             * info['deflection sensitivity (V/bit)']
+             - info['deflection offset (V)'])
+            * info['deflection sensitivity (m/V)']
+            )
+
+        return ret
 
-        return cutindex
-        '''
-        return 0
-
-    def is_me(self):
-        '''
-        self-identification of file type magic
-        '''
-        curve_file=file(self.filepath)
-        header=curve_file.read(30)
-        curve_file.close()
-
-        if header[2:17] == 'Force file list': #header of a picoforce file
-            self.data_chunks=[self._get_data_chunk(num) for num in [0,1,2]]
-            return True
-        else:
-            return False
-
-    def close_all(self):
-        '''
-        Explicitly closes all files
-        '''
-        self.textfile.close()
-        self.binfile.close()
-
-    def default_plots(self):
-        '''
-        creates the default PlotObject
-        '''
-
-
-        force=self.LSB_to_force()
-        zdomain=self.Z_domains()
-
-        samples=self._get_samples_line()
-        #cutindex=0
-        #cutindex=self.detriggerize(force.ext())
-
-        main_plot=lhc.PlotObject()
-
-        main_plot.vectors = [[zdomain.ext()[0:samples], force.ext()[0:samples]],[zdomain.ret()[0:samples], force.ret()[0:samples]]]
-        main_plot.normalize_vectors()
-        main_plot.units = ['meters','newton']
-        main_plot.destination = 0
-        main_plit.filename = self.filepath
-        main_plot.title = self.filepath
-        main_plot.colors = ['red', 'blue']
-        main_plit.styles = ['plot', 'plot']
-
-        return [main_plot]
index 4553369f6cb546ded76ba249dd218ff3b3c3f758..adcaf2cec1529d7b87d6f3d1cda3338325c19e47 100644 (file)
@@ -132,5 +132,5 @@ class TutorialDriver (Driver):
         f.close() # remember to close the file
 
         data = curve.Data()
-        info = {'filetype':'tutorial', 'experiment':'generic'}
+        info = {'filetype':'tutorial', 'experiment':experiment.Experiment}
         return (data, info)
index 3d29c75266ea1a8369e9b8e9dc1821b8afc7476b..22ed8d2c4c2f1b6942a0e25f24451030d898b283 100644 (file)
@@ -28,10 +28,34 @@ class Experiment (object):
     pass
 
 class ForceClamp (Experiment):
-    """Constant force experiments.
+    """Constant force force spectroscopy [#fernandez2004]_.
+
+    .. [#fernandez2004] J.M. Fernandez, H. Li.
+      "Force-Clamp Spectroscopy Monitors the Folding Trajectory of a
+      Single Protein."
+      Science, 2004.
+      doi: `10.1126/science.1092497 <http://dx.doi.org/10.1126/science.1092497>`_
     """
     pass
 
 class VelocityClamp (Experiment):
-    """Constant piezo velocity experiments.
+    """Constant piezo velocity force spectroscopy [#rief1997]_.
+    
+    .. [#rief1997]: M. Rief, M. Gautel, F. Oesterhelt, J.M. Fernandez,
+      H.E. Gaub.
+      "Reversible Unfolding of Individual Titin Immunoglobulin Domains by AFM."
+      Science, 1997.
+      doi: `10.1126/science.276.5315.1109 <http://dx.doi.org/10.1126/science.276.5315.1109>`_
+    """
+    pass
+
+class TwoColorCoincidenceDetection (Experiment):
+    """Two-color fluorescence coincidence spectroscopy [#clarke2007]_.
+
+    .. [#clarke2007] R.W. Clarke, A. Orte, D. Klenerman.
+      "Optimized Threshold Selection for Single-Molecule Two-Color
+      Fluorescence Coincidence Spectroscopy."
+      Anal. Chem., 2007.
+      doi: `10.1021/ac062188w <http://dx.doi.org/10.1021/ac062188w>`_
     """
+    pass
index 23bb4a26b559459f675d27a018792d8616568ddc..62b2f4c648e2f40ff15814324a5cda743810d7a4 100644 (file)
 <FilePlaylist test.hkp>
 Success
 <BLANKLINE>
->>> h = r.run_lines(h, ['curve_info'])
+>>> h = r.run_lines(h, ['curve_info']) # doctest: +ELLIPSIS
 name: picoforce.000
 path: test/data/picoforce.000
-experiment: None
-driver: picoforce
-filetype: None
+experiment: <class 'hooke.experiment.VelocityClamp'>
+driver: <hooke.driver.picoforce.PicoForceDriver object at 0x...>
+filetype: picoforce
 note: 
-blocks: 0
-block sizes: []
+blocks: 2
+block sizes: [(2048, 2), (2048, 2)]
 Success
 <BLANKLINE>
 """