info :
Store any extra information useful inside your overridden
methods.
+ rescale : boolean
+ Rescale parameters so the scale of each is 1.0. Also rescale
+ the data so data.std() == 1.0.
Examples
--------
>>> slope,offset = m.fit(outqueue=outqueue)
>>> info = outqueue.get()
>>> pprint(info) # doctest: +ELLIPSIS, +REPORT_UDIFF
- {'convergence flag': 2,
+ {'active fitted parameters': array([ 6.999..., -32.889...]),
+ 'active parameters': array([ 6.999..., -32.889...]),
+ 'active scale': [0.699..., 202.071...],
+ 'convergence flag': 2,
'covariance matrix': array([[ 1.199...e-08, -5.993...e-06],
[ -5.993...e-06, 3.994...e-03]]),
+ 'data scale factor': 1.0,
'fitted parameters': array([ 6.999..., -32.889...]),
'info': {'fjac': array([[...]]),
'fvec': array([...]),
'qtf': array([ 2.851...e-07, 1.992...e-06])},
'initial parameters': [6.992..., -33.0],
'message': 'The relative error between two consecutive iterates is at most 0.000...',
+ 'rescaled': False,
'scale': [0.699..., 202.071...]}
We round the outputs to protect the doctest against differences in
The offset is a bit off because, the range is not a multiple of
:math:`2\pi`.
+
+ We could also use rescaled parameters:
+
+ >>> m = LinearModel(data, rescale=True)
+ >>> outqueue = Queue()
+ >>> slope,offset = m.fit(outqueue=outqueue)
+ >>> print '%.3f' % slope
+ 7.000
+ >>> print '%.3f' % offset
+ -32.890
"""
- def __init__(self, data, info=None):
- self.set_data(data, info)
+ def __init__(self, *args, **kwargs):
+ self.set_data(*args, **kwargs)
- def set_data(self, data, info=None):
+ def set_data(self, data, info=None, rescale=False):
self._data = data
self._model_data = ndarray(shape=data.shape, dtype=data.dtype)
self.info = info
+ self._rescale = rescale
+ if rescale == True:
+ self._data_scale_factor = data.std()
+ else:
+ self._data_scale_factor = 1.0
def model(self, params):
p = params # convenient alias
return None
def residual(self, params):
- return self._data - self.model(params)
+ if self._rescale == True:
+ params = [p*s for p,s in zip(params, self._param_scale_factors)]
+ residual = self._data - self.model(params)
+ if self._rescale == True or False:
+ residual /= self._data_scale_factor
+ return residual
def fit(self, initial_params=None, scale=None, outqueue=None, **kwargs):
"""
if scale == None:
scale = self.guess_scale(initial_params, outqueue)
assert min(scale) > 0, scale
+ if self._rescale == True:
+ self._param_scale_factors = scale
+ active_scale = [1.0 for s in scale]
+ active_params = [p/s for p,s in zip(initial_params,
+ self._param_scale_factors)]
+ else:
+ active_scale = scale
+ active_params = initial_params
params,cov,info,mesg,ier = leastsq(
- func=self.residual, x0=initial_params, full_output=True,
- diag=scale, **kwargs)
+ func=self.residual, x0=active_params, full_output=True,
+ diag=active_scale, **kwargs)
+ if self._rescale == True:
+ active_params = params
+ params = [p*s for p,s in zip(params, self._param_scale_factors)]
+ else:
+ active_params = params
if outqueue != None:
outqueue.put({
+ 'rescaled': self._rescale,
'initial parameters': initial_params,
+ 'active parameters': active_params,
'scale': scale,
+ 'active scale': active_scale,
+ 'data scale factor': self._data_scale_factor,
+ 'active fitted parameters': active_params,
'fitted parameters': params,
'covariance matrix': cov,
'info': info,