Examples
--------
+ >>> from pprint import pprint
+ >>> from Queue import Queue
>>> import numpy
You'll want to subclass `ModelFitter`, overriding at least
... return [slope_scale, offset_scale]
>>> data = 20*numpy.sin(arange(1000)) + 7.*arange(1000) - 33.0
>>> m = LinearModel(data)
- >>> slope,offset = m.fit()
+ >>> outqueue = Queue()
+ >>> slope,offset = m.fit(outqueue=outqueue)
+ >>> info = outqueue.get()
+ >>> pprint(info) # doctest: +ELLIPSIS, +REPORT_UDIFF
+ {'convergence flag': 2,
+ 'covariance matrix': array([[ 1.199...e-08, -5.993...e-06],
+ [ -5.993...e-06, 3.994...e-03]]),
+ 'fitted parameters': array([ 6.999..., -32.889...]),
+ 'info': {'fjac': array([[...]]),
+ 'fvec': array([...]),
+ 'ipvt': array([1, 2]),
+ 'nfev': 7,
+ 'qtf': array([ 2.851...e-07, 1.992...e-06])},
+ 'initial parameters': [6.992..., -33.0],
+ 'message': 'The relative error between two consecutive iterates is at most 0.000...',
+ 'scale': [0.699..., 202.071...]}
We round the outputs to protect the doctest against differences in
machine rounding during computation. We expect the values to be close
:math:`2\pi`.
"""
def __init__(self, data, info=None):
- self.set_data(data)
- self.info = info
+ self.set_data(data, info)
- def set_data(self, data):
+ def set_data(self, data, info=None):
self._data = data
self._model_data = ndarray(shape=data.shape, dtype=data.dtype)
+ self.info = info
def model(self, params):
p = params # convenient alias
return []
def guess_scale(self, params, outqueue=None):
- return []
+ return None
def residual(self, params):
return self._data - self.model(params)
initial_params = self.guess_initial_params(outqueue)
if scale == None:
scale = self.guess_scale(initial_params, outqueue)
+ assert min(scale) > 0, scale
params,cov,info,mesg,ier = leastsq(
func=self.residual, x0=initial_params, full_output=True,
diag=scale, **kwargs)
if outqueue != None:
- outqeue.put({
+ outqueue.put({
'initial parameters': initial_params,
'scale': scale,
'fitted parameters': params,