1 """Definition of useful ndimensional linear operators"""
2 import numpy as np
3 from copy import copy
4 import warnings
5 from interface import LinearOperator
8 """Subclass of LinearOperator that handle multidimensional inputs and outputs"""
9 - def __init__(self, shapein, shapeout, matvec, rmatvec=None, matmat=None, rmatmat=None,
10 dtypein=None, dtypeout=None, dtype=np.float64):
11
12 sizein = np.prod(shapein)
13 sizeout = np.prod(shapeout)
14 shape = (sizeout, sizein)
15
16 ndmatvec = lambda x: matvec(x.reshape(shapein)).ravel()
17
18 if rmatvec is not None:
19 ndrmatvec = lambda x: rmatvec(x.reshape(shapeout)).ravel()
20 else:
21 ndrmatvec = None
22
23 LinearOperator.__init__(self, shape, ndmatvec, ndrmatvec, dtype=dtype,
24 dtypein=dtypein, dtypeout=dtypeout)
25
26
27 self.ndmatvec = matvec
28 self.ndrmatvec = rmatvec
29 self.shapein = shapein
30 self.shapeout = shapeout
31
33 """
34 Calling an NDOperator is a short-cut for calling ndmatvec operation,
35 i.e. applying the linear operation on a ndarray or subclass.
36 """
37 return self.ndmatvec(x)
38
39 @property
41 kwargs = {}
42 if self.rmatvec is not None:
43 matvec, kwargs['rmatvec'] = self.ndrmatvec, self.ndmatvec
44 else:
45 raise NotImplementedError('rmatvec is not defined')
46 if self._matmat is not None and self._rmatmat is not None:
47 kwargs['matmat'], kwargs['rmatmat'] = self._rmatmat, self._matmat
48 else:
49 matmat, rmatmat = None, None
50 kwargs['dtype'] = getattr(self, 'dtype', None)
51 kwargs['dtypein'] = getattr(self, 'dtypeout', None)
52 kwargs['dtypeout'] = getattr(self, 'dtypein', None)
53 shapein = self.shapeout
54 shapeout = self.shapein
55 return NDOperator(shapein, shapeout, matvec, **kwargs)
56
58 - def __init__(self, shapein=None, shapeout=None, classin=None,
59 classout=None, dictin=None, dictout=None, xin=None, xout=None,
60 matvec=None, rmatvec=None, dtype=np.float64, dtypein=None,
61 dtypeout=None):
62 "Wrap linear operation working on ndarray subclasses in InfoArray style"
63 if xin is not None:
64 shapein = xin.shape
65 classin = xin.__class__
66 dictin = xin.__dict__
67 dtype = xin.dtype
68
69 if xout is not None:
70 shapeout = xout.shape
71 classout = xout.__class__
72 dictout = xout.__dict__
73
74 sizein = np.prod(shapein)
75 sizeout = np.prod(shapeout)
76 shape = (sizeout, sizein)
77
78 self.ndsmatvec = matvec
79 self.ndsrmatvec = rmatvec
80 self.classin = classin
81 self.classout = classout
82 self.dictin = dictin
83 self.dictout = dictout
84 self.shapein = shapein
85 self.shapeout = shapeout
86
87 if matvec is not None:
88 def smatvec(x):
89 xi = classin(data=x)
90 xi.__dict__ = dictin
91 return matvec(xi)
92 else:
93 raise ValueError('Requires a matvec function')
94
95 if rmatvec is not None:
96 def srmatvec(x):
97 xo = classout(data=x)
98 xo.__dict__ = dictout
99 return rmatvec(xo)
100 else:
101 rmatvec = None
102
103 NDOperator.__init__(self, shapein, shapeout, smatvec, rmatvec=srmatvec,
104 dtypein=dtypein, dtypeout=dtypeout, dtype=dtype)
105
109 "Transform n-dimensional linear operators into LinearOperators"
110 return NDOperator(*kargs, **kwargs)
111
112 -def masubclass(xin=None, xout=None, shapein=None, shapeout=None, classin=None,
113 classout=None, dictin=None, dictout=None,
114 matvec=None, rmatvec=None, dtype=np.float64, dtypein=None, dtypeout=None):
115 "Wrap linear operation working on ndarray subclasses in MaskedArray style"
116 if xin is not None:
117 shapein = xin.shape
118 classin = xin.__class__
119 dictin = xin.__dict__
120 dtype = xin.dtype
121 if xout is not None:
122 shapeout = xout.shape
123 classout = xout.__class__
124 dictout = xout.__dict__
125 sizein = np.prod(shapein)
126 sizeout = np.prod(shapeout)
127 shape = (sizeout, sizein)
128 if matvec is not None:
129 def ndmatvec(x):
130 xi = classin(x.reshape(shapein))
131 xi.__dict__ = dictin
132 return matvec(xi).reshape(sizeout)
133 else:
134 raise ValueError('Requires a matvec function')
135 if rmatvec is not None:
136 def ndrmatvec(x):
137 xo = classout(x.reshape(shapeout))
138 xo.__dict__ = dictout
139 return rmatvec(xo).reshape(sizein)
140 else:
141 ndrmatvec = None
142 return LinearOperator(shape, matvec=ndmatvec, rmatvec=ndrmatvec, dtype=dtype,
143 dtypein=dtypein, dtypeout=dtypeout)
144
146 "Wrap linear operation working on ndarray subclasses in InfoArray style"
147 return NDSOperator(**kwargs)
148
151 - def __init__(self, shapein, matvec, **kwargs):
153
155 - def __init__(self, shapein, matvec, **kwargs):
159
160 @property
163
180
182 - def __init__(self, shapein, coef, **kwargs):
189
199
204
207 self.mask = mask.astype(np.bool)
208 self.mask_inv = self.mask == False
209 shapein = mask.shape
210 shapeout = np.sum(self.mask_inv)
211 self._in = np.zeros(shapein)
212 self._out = np.zeros(shapeout)
213 def matvec(x):
214 self._out[:] = x[self.mask_inv]
215 return self._out
216 def rmatvec(x):
217 self._in[self.mask_inv] = x
218 return self._in
219 NDOperator.__init__(self, shapein, shapeout, matvec, rmatvec, **kwargs)
220
221 -class Fft(NDOperator):
222 - def __init__(self, shapein, n=None, axis=-1, **kwargs):
223 if n > shapein[axis]:
224 raise NotImplemented("The case n larger than shapein is not implemented.")
225 self.n = n
226 self.axis = axis
227 if n is None:
228 shapeout = shapein
229 else:
230 shapeout = np.asarray(copy(shapein))
231 shapeout[axis] = n
232 shapeout = tuple(shapeout)
233
234 if n is None:
235 self.norm = np.sqrt(shapein[axis])
236 else:
237 self.norm = np.sqrt(n)
238
239 matvec = lambda x: np.fft.fft(x, n=n, axis=axis) / self.norm
240
241
242
243 if n is not None:
244 def rmatvec(x):
245 out = np.zeros(shapein, x.dtype)
246 t = np.fft.ifft(x, axis=axis) * self.norm
247 s = [slice(None) ,] * out.ndim
248 s[axis] = slice(n)
249 warnings.filterwarnings("ignore", category=np.ComplexWarning)
250 out[s] = t
251 warnings.resetwarnings()
252 return out
253 else:
254 rmatvec = lambda x: np.fft.ifft(x, n=n, axis=axis) * self.norm
255 NDOperator.__init__(self, shapein, shapeout, matvec, rmatvec, **kwargs)
256
257 -class Fft2(NDOperator):
258 - def __init__(self, shapein, s=None, axes=(-2, -1), **kwargs):
259 for a in axes:
260 if s is not None:
261 if s[a] > shapein[a]:
262 raise NotImplemented("The case s larger than shapein is not implemented.")
263 self.s = s
264 self.axes = axes
265 if len(shapein) != 2:
266 raise ValueError('Error expected 2 dimensional shape')
267
268 if s is None:
269 shapeout = shapein
270 else:
271 shapeout = list(shapein)
272 for a, si in zip(axes, s):
273 shapeout[a] = si
274
275 if s is None:
276 self.norm = np.sqrt(np.prod([shapein[a] for a in axes]))
277 else:
278 self.norm = np.sqrt(np.prod(s))
279
280 matvec = lambda x: np.fft.fft2(x, s=s, axes=axes) / self.norm
281
282 if s is not None:
283 def rmatvec(x):
284 out = np.zeros(shapein, x.dtype)
285 t = np.fft.ifft2(x, axes=axes) * self.norm
286 sl = [slice(None), ] * out.ndim
287 for si, a in zip(s, axes):
288 sl[a] = slice(si)
289 warnings.filterwarnings("ignore", category=np.ComplexWarning)
290 out[sl] = t
291 warnings.resetwarnings()
292 return out
293 else:
294 rmatvec = lambda x: np.fft.ifft2(x, s=s, axes=axes) * self.norm
295 NDOperator.__init__(self, shapein, shapeout, matvec, rmatvec, **kwargs)
296
297 -class Fftn(NDOperator):
298 - def __init__(self, shapein, s=None, axes=None, **kwargs):
299 self.s = s
300 if axes is None:
301 self.axes = range(- len(shapein), 0)
302 else:
303 self.axes = axes
304 for a in self.axes:
305 if s is not None:
306 if s[a] > shapein[a]:
307 raise NotImplemented("The case s larger than shapein is not implemented.")
308
309 if s is None:
310 shapeout = shapein
311 else:
312 shapeout = list(shapein)
313 for a, si in zip(self.axes, s):
314 shapeout[a] = si
315
316 if s is None:
317 self.norm = np.sqrt(np.prod([shapein[a] for a in self.axes]))
318 else:
319 self.norm = np.sqrt(np.prod(s))
320
321 matvec = lambda x: np.fft.fftn(x, s=s, axes=self.axes) / self.norm
322
323 if s is not None:
324 def rmatvec(x):
325 out = np.zeros(shapein, x.dtype)
326 t = np.fft.ifftn(x, axes=self.axes) * self.norm
327 sl = [slice(None), ] * out.ndim
328 for si, a in zip(s, self.axes):
329 sl[a] = slice(si)
330 warnings.filterwarnings("ignore", category=np.ComplexWarning)
331 out[sl] = t
332 warnings.resetwarnings()
333 return out
334 else:
335 rmatvec = lambda x: np.fft.ifftn(x, s=s, axes=self.axes) * self.norm
336 NDOperator.__init__(self, shapein, shapeout, matvec, rmatvec, **kwargs)
337
339 - def __init__(self, shapein, kernel, mode="full", fft=False, **kwargs):
340 if fft:
341 from scipy.signal import fftconvolve as convolve
342
343 if np.any(np.asarray(kernel.shape) % 2 != 1):
344 raise ValueError("Kernels with non-even shapes are not handled for now.")
345 else:
346 from scipy.signal import convolve
347
348 self.kernel = kernel
349 self.mode = mode
350
351
352 s = (slice(None, None, -1), ) * kernel.ndim
353 self.rkernel = kernel[s]
354
355 if mode == 'full':
356 self.rmode = 'valid'
357 elif mode == 'valid':
358 self.rmode = 'full'
359 elif mode == 'same':
360 self.rmode = 'same'
361
362 if mode == 'full':
363 shapeout = [s + ks - 1 for s, ks in zip(shapein, kernel.shape)]
364 if mode == 'valid':
365 shapeout = [s - ks + 1 for s, ks in zip(shapein, kernel.shape)]
366 if mode == 'same':
367 shapeout = shapein
368
369 matvec = lambda x: convolve(x, self.kernel, mode=self.mode)
370 rmatvec = lambda x: convolve(x, self.rkernel, mode=self.rmode)
371 NDOperator.__init__(self, shapein, shapeout, matvec, rmatvec, **kwargs)
372
374 - def __init__(self, shapein, kernel, mode='reflect', cval=0.0,
375 origin=0, **kwargs):
376 """
377 Generate a convolution operator wrapping
378 scipy.ndimage.convolve function.
379 The kernel is reversed for the transposition.
380 Note that kernel with even shapes are not handled.
381 """
382 if kernel.ndim == 1:
383 from scipy.ndimage import convolve1d as convolve
384 else:
385 from scipy.ndimage import convolve
386
387 self.kernel = kernel
388 self.mode = mode
389 self.cval = cval
390 self.origin = origin
391
392
393 if np.any(np.asarray(self.kernel.shape) % 2 != 1):
394 ValueError("kernel should have even shape.")
395
396
397 s = (slice(None, None, -1), ) * kernel.ndim
398 self.rkernel = kernel[s]
399
400 shapeout = shapein
401
402 matvec = lambda x: convolve(x, self.kernel, mode=mode, cval=cval,
403 origin=origin)
404 rmatvec = lambda x: convolve(x, self.rkernel, mode=mode, cval=cval,
405 origin=origin)
406 NDOperator.__init__(self, shapein, shapeout, matvec, rmatvec, **kwargs)
407
410 import fftw3
411 from multiprocessing import cpu_count
412 self.n_threads = cpu_count()
413 shapeout = shapein
414
415 dtype = kwargs.get('dtype', np.float64)
416 dtypein = kwargs.get('dtypein', dtype)
417 dtypeout = kwargs.get('dtypeout', dtype)
418
419
420 self.norm = 1.
421 if dtypein == np.float64 and dtypeout == np.complex128:
422 self.norm = np.sqrt(np.prod(shapein))
423
424 self.inarray = np.zeros(shapein, dtype=dtypein)
425 self.outarray = np.zeros(shapeout, dtype=dtypeout)
426 self.rinarray = np.zeros(shapein, dtype=dtypeout)
427 self.routarray = np.zeros(shapeout, dtype=dtypein)
428 if dtype == np.complex128 or dtypein == np.complex128 or dtypeout == np.complex128:
429 self.plan = fftw3.Plan(inarray=self.inarray,
430 outarray=self.outarray,
431 direction='forward',
432 nthreads=self.n_threads)
433 self.rplan = fftw3.Plan(inarray=self.rinarray,
434 outarray=self.routarray,
435 direction='backward',
436 nthreads=self.n_threads)
437 else:
438 realtypes = ('halfcomplex r2c','halfcomplex r2c')
439 rrealtypes = ('halfcomplex c2r','halfcomplex c2r')
440 self.plan = fftw3.Plan(inarray=self.inarray,
441 outarray=self.outarray,
442 direction='forward',
443 realtypes=realtypes,
444 nthreads=self.n_threads)
445 self.rplan = fftw3.Plan(inarray=self.rinarray,
446 outarray=self.routarray,
447 realtypes=rrealtypes,
448 direction='backward',
449 nthreads=self.n_threads)
450 def matvec(x):
451 self.inarray[:] = x[:]
452 self.plan()
453 return self.outarray / self.norm
454 def rmatvec(x):
455 self.rinarray[:] = x[:]
456 self.rplan()
457 return self.routarray / self.norm
458 NDOperator.__init__(self, shapein, shapeout, matvec, rmatvec, **kwargs)
459
461 - def __init__(self, shapein, kernel, **kwargs):
462 import fftw3
463 from multiprocessing import cpu_count
464 self.n_threads = cpu_count()
465 self.kernel = kernel
466
467 s = (slice(None, None, -1), ) * kernel.ndim
468 self.rkernel = kernel[s]
469
470 shapeout = shapein
471 fullshape = np.array(shapein) + np.array(kernel.shape) - 1
472
473 self.norm = np.prod(fullshape)
474
475 self.inarray = np.zeros(fullshape, dtype=kernel.dtype)
476 self.outarray = np.zeros(fullshape, dtype=np.complex128)
477 self.rinarray = np.zeros(fullshape, dtype=np.complex128)
478 self.routarray = np.zeros(fullshape, dtype=kernel.dtype)
479 self.plan = fftw3.Plan(inarray=self.inarray,
480 outarray=self.outarray,
481 direction='forward',
482 nthreads=self.n_threads)
483 self.rplan = fftw3.Plan(inarray=self.rinarray,
484 outarray=self.routarray,
485 direction='backward',
486 nthreads=self.n_threads)
487
488 sk = [slice(None, shapei, None) for shapei in kernel.shape]
489 self.si = [slice(None, shapei, None) for shapei in shapein]
490 self.so = [slice(None, shapei, None) for shapei in shapeout]
491
492 self.padded_kernel = np.zeros(shapein, dtype=kernel.dtype)
493 self.padded_kernel[sk] = kernel
494 self.fft_kernel = copy(self.fft(self.padded_kernel))
495
496 self.rpadded_kernel = np.zeros(shapein, dtype=self.rkernel.dtype)
497 self.rpadded_kernel[sk] = self.rkernel
498 self.rfft_kernel = copy(self.fft(self.padded_kernel))
499
500 def matvec(x):
501 return self._centered(self.convolve(x, self.fft_kernel), shapeout) / self.norm
502
503 def rmatvec(x):
504 return self._centered(self.convolve(x, self.rfft_kernel), shapeout) / self.norm
505 NDOperator.__init__(self, shapein, shapeout, matvec, rmatvec, **kwargs)
506
507 - def fft(self, arr):
508 self.inarray[self.si] = arr
509 self.plan()
510 return self.outarray
511
512 - def ifft(self, arr):
513 self.rinarray[self.si] = arr[self.si]
514 self.rplan()
515 return self.routarray
516
518 return self.ifft(self.fft(arr) * fft_kernel)
519
521
522 newsize = np.asarray(newsize)
523 currsize = np.array(arr.shape)
524 startind = (currsize - newsize) / 2
525 endind = startind + newsize
526 myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
527 return arr[tuple(myslice)]
528
529 -class Diff(NDOperator):
530 - def __init__(self, shapein, axis=-1, **kwargs):
531 self.axis = axis
532 shapeout = np.asarray(shapein)
533 shapeout[axis] -= 1
534 shapetmp = list(shapeout)
535 shapetmp[axis] += 2
536 tmp = np.zeros(shapetmp)
537 s = [slice(None),] * len(shapein)
538 s[axis] = slice(1, -1)
539 def matvec(x):
540 return np.diff(x, axis=axis)
541 def rmatvec(x):
542 tmp[s] = x
543 return - np.diff(tmp, axis=axis)
544 NDOperator.__init__(self, shapein, shapeout, matvec, rmatvec, **kwargs)
545
547 - def __init__(self, shapein, factor, axis=-1, **kwargs):
548 self.factor = factor
549 self.axis = axis
550
551 shapeout = np.asarray(copy(shapein))
552 shapeout[axis] = np.ceil(shapeout[axis] / float(factor))
553
554 matvec = lambda x: self.bin(x, factor, axis=axis)
555 rmatvec = lambda x: self.replicate(x, factor, axis=axis)
556 NDOperator.__init__(self, shapein, shapeout, matvec, rmatvec, **kwargs)
557
558 - def bin(self, arr, factor, axis=-1):
559 outarr = np.zeros(self.shapeout)
560 s0 = [slice(None),] * arr.ndim
561 s1 = [slice(None),] * arr.ndim
562 for i in xrange(arr.shape[axis]):
563 s0[axis] = i
564 s1[axis] = np.floor(i / factor)
565 outarr[s1] += arr[s0]
566 return outarr
567
569 outarr = np.zeros(self.shapein)
570 s0 = [slice(None),] * arr.ndim
571 s1 = [slice(None),] * arr.ndim
572 for i in xrange(self.shapein[axis]):
573 s0[axis] = i
574 s1[axis] = np.floor(i / factor)
575 outarr[s0] = arr[s1]
576 return outarr
577
579 - def __init__(self, shapein, slices, **kwargs):
580
581 shapeout = []
582 for a, s in enumerate(slices):
583 if s.stop is None:
584 stop = shapein[a] -1
585 else:
586 stop = s.stop
587 start = np.max((s.start, 0))
588 step = np.max((1, s.step))
589 shapeout.append(int(np.ceil((stop - start + 1) / float(step))))
590 shapeout = tuple(shapeout)
591
592 def matvec(x):
593 return x[slices]
594 def rmatvec(x):
595 out = np.zeros(shapein)
596 out[slices] = x
597 return out
598 NDOperator.__init__(self, shapein, shapeout, matvec, rmatvec=rmatvec, **kwargs)
599
603
606
609
612
615
616 -def fft(shapein, n=None, axis=-1, **kwargs):
617 return Fft(shapein, n=n, axis=axis, **kwargs)
618
619 -def fft2(shapein, s=None, axes=(-2, -1), **kwargs):
620 return Fft2(shapein, s=s, axes=axes, **kwargs)
621
622 -def fftn(shapein, s=None, axes=None, **kwargs):
623 return Fftn(shapein, s=s, axes=axes, **kwargs)
624
625 -def convolve(shapein, kernel, mode="full", fft=False, **kwargs):
626 return Convolve(shapein, kernel, mode=mode, **kwargs)
627
628 -def convolve_ndimage(shapein, kernel, mode="reflect", cval=0.0, origin=0,
629 **kwargs):
630 return ConvolveNDImage(shapein, kernel, mode=mode, cval=cval,
631 origin=origin, **kwargs)
634
635 -def diff(shapein, axis=-1, **kwargs):
636 return Diff(shapein, axis=axis, **kwargs)
637
638 -def binning(shapein, factor, axis=-1, **kwargs):
639 return Binning(shapein, factor, axis=axis, **kwargs)
640
641 -def ndslice(shapein, slices, **kwargs):
642 return NDSlice(shapein, slices, **kwargs)
643