From 77b97054f4fb880b6641cf73d7a7062f1901bc81 Mon Sep 17 00:00:00 2001 From: "W. Trevor King" Date: Tue, 1 Jun 2010 19:19:03 -0400 Subject: [PATCH] Added hooke.util.fit for easy Levenberg-Marquardt fitting. --- hooke/util/fit.py | 151 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 151 insertions(+) create mode 100644 hooke/util/fit.py diff --git a/hooke/util/fit.py b/hooke/util/fit.py new file mode 100644 index 0000000..e3f23f4 --- /dev/null +++ b/hooke/util/fit.py @@ -0,0 +1,151 @@ +# Copyright (C) 2010 W. Trevor King +# +# This file is part of Hooke. +# +# Hooke is free software: you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation, either +# version 3 of the License, or (at your option) any later version. +# +# Hooke is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with Hooke. If not, see +# . + +"""Provide :class:`ModelFitter` to make arbitrary model fitting easy. +""" + +from numpy import arange, ndarray +from scipy.optimize import leastsq + + +class PoorFit (ValueError): + pass + +class ModelFitter (object): + """A convenient wrapper around :func:`scipy.optimize.leastsq`. + + Parameters + ---------- + d_data : array_like + Deflection data to be analyzed for the contact position. + info : + Store any extra information useful inside your overridden + methods. + + Examples + -------- + + >>> import numpy + + You'll want to subclass `ModelFitter`, overriding at least + `.model` and potentially the parameter and scale guessing + methods. + + >>> class LinearModel (ModelFitter): + ... '''Simple linear model. + ... + ... Levenberg-Marquardt is not how you want to solve this problem + ... for real systems, but it's a simple test case. + ... ''' + ... def model(self, params): + ... '''A linear model. + ... + ... Notes + ... ----- + ... .. math:: y = p_0 x + p_1 + ... ''' + ... p = params # convenient alias + ... self._model_data[:] = p[0]*arange(len(self._data)) + p[1] + ... return self._model_data + ... def guess_initial_params(self, outqueue=None): + ... return [float(self._data[-1] - self._data[0])/len(self._data), + ... self._data[0]] + ... def guess_scale(self, params, outqueue=None): + ... slope_scale = params[0]/10. + ... if slope_scale == 0: # data is expected to be flat + ... slope_scale = float(self._data.max()-self._data.min())/len(self._data) + ... if slope_scale == 0: # data is completely flat + ... slope_scale = 1. + ... offset_scale = self._data.std()/10.0 + ... if offset_scale == 0: # data is completely flat + ... offset_scale = 1. + ... return [slope_scale, offset_scale] + >>> data = 20*numpy.sin(arange(1000)) + 7.*arange(1000) - 33.0 + >>> m = LinearModel(data) + >>> slope,offset = m.fit() + + We round the outputs to protect the doctest against differences in + machine rounding during computation. We expect the values to be close + to the input settings (slope 7, offset -33). + + >>> print '%.3f' % slope + 7.000 + >>> print '%.3f' % offset + -32.890 + + The offset is a bit off because, the range is not a multiple of + :math:`2\pi`. + """ + def __init__(self, data, info=None): + self.set_data(data) + self.info = info + + def set_data(self, data): + self._data = data + self._model_data = ndarray(shape=data.shape, dtype=data.dtype) + + def model(self, params): + p = params # convenient alias + self._model_data[:] = arange(len(self._data)) + raise NotImplementedError + + def guess_initial_params(self, outqueue=None): + return [] + + def guess_scale(self, params, outqueue=None): + return [] + + def residual(self, params): + return self._data - self.model(params) + + def fit(self, initial_params=None, scale=None, outqueue=None, **kwargs): + """ + Parameters + ---------- + initial_params : iterable or None + Initial parameter values for residual minimization. If + `None`, they are estimated from the data using + :meth:`guess_initial_params`. + scale : iterable or None + Parameter length scales for residual minimization. If + `None`, they are estimated from the data using + :meth:`guess_scale`. + outqueue : Queue or None + If given, will be used to output the data and fitted model + for user verification. + kwargs : + Any additional arguments are passed through to `leastsq`. + """ + if initial_params == None: + initial_params = self.guess_initial_params(outqueue) + if scale == None: + scale = self.guess_scale(initial_params, outqueue) + params,cov,info,mesg,ier = leastsq( + func=self.residual, x0=initial_params, full_output=True, + diag=scale, **kwargs) + if outqueue != None: + outqeue.put({ + 'initial parameters': initial_params, + 'scale': scale, + 'fitted parameters': params, + 'covariance matrix': cov, + 'info': info, + 'message': mesg, + 'convergence flag': ier, + }) + return params -- 2.26.2