36 #ifndef VIGRA_RESAMPLING_CONVOLUTION_HXX
37 #define VIGRA_RESAMPLING_CONVOLUTION_HXX
40 #include "stdimage.hxx"
41 #include "array_vector.hxx"
42 #include "rational.hxx"
43 #include "functortraits.hxx"
44 #include "functorexpression.hxx"
45 #include "transformimage.hxx"
46 #include "imagecontainer.hxx"
50 namespace resampling_detail
53 struct MapTargetToSourceCoordinate
55 MapTargetToSourceCoordinate(Rational<int>
const & samplingRatio,
56 Rational<int>
const & offset)
57 : a(samplingRatio.denominator()*offset.denominator()),
58 b(samplingRatio.numerator()*offset.numerator()),
59 c(samplingRatio.numerator()*offset.denominator())
66 int operator()(
int i)
const
68 return (i * a + b) / c;
71 double toDouble(
int i)
const
73 return double(i * a + b) / c;
76 Rational<int> toRational(
int i)
const
78 return Rational<int>(i * a + b, c);
81 bool isExpand2()
const
83 return a == 1 && b == 0 && c == 2;
86 bool isReduce2()
const
88 return a == 2 && b == 0 && c == 1;
97 class FunctorTraits<resampling_detail::MapTargetToSourceCoordinate>
98 :
public FunctorTraitsBase<resampling_detail::MapTargetToSourceCoordinate>
101 typedef VigraTrueType isUnaryFunctor;
104 template <
class SrcIter,
class SrcAcc,
105 class DestIter,
class DestAcc,
108 resamplingExpandLine2(SrcIter s, SrcIter send, SrcAcc src,
109 DestIter d, DestIter dend, DestAcc dest,
110 KernelArray
const & kernels)
112 typedef typename KernelArray::value_type Kernel;
113 typedef typename KernelArray::const_reference KernelRef;
114 typedef typename Kernel::const_iterator KernelIter;
117 PromoteTraits<typename SrcAcc::value_type, typename Kernel::value_type>::Promote
124 int ileft = std::max(kernels[0].right(), kernels[1].right());
125 int iright = wo + std::min(kernels[0].left(), kernels[1].left()) - 1;
126 for(
int i = 0; i < wn; ++i, ++d)
129 KernelRef kernel = kernels[i & 1];
130 KernelIter k = kernel.center() + kernel.right();
131 TmpType
sum = NumericTraits<TmpType>::zero();
134 for(
int m=is-kernel.right(); m <= is-kernel.left(); ++m, --k)
139 sum += *k * src(s, mm);
144 for(
int m=is-kernel.right(); m <= is-kernel.left(); ++m, --k)
149 sum += *k * src(s, mm);
154 SrcIter ss = s + is - kernel.right();
155 for(
int m = 0; m < kernel.size(); ++m, --k, ++ss)
164 template <
class SrcIter,
class SrcAcc,
165 class DestIter,
class DestAcc,
168 resamplingReduceLine2(SrcIter s, SrcIter send, SrcAcc src,
169 DestIter d, DestIter dend, DestAcc dest,
170 KernelArray
const & kernels)
172 typedef typename KernelArray::value_type Kernel;
173 typedef typename KernelArray::const_reference KernelRef;
174 typedef typename Kernel::const_iterator KernelIter;
176 KernelRef kernel = kernels[0];
177 KernelIter kbegin = kernel.center() + kernel.right();
180 PromoteTraits<typename SrcAcc::value_type, typename Kernel::value_type>::Promote
187 int ileft = kernel.right();
188 int iright = wo + kernel.left() - 1;
189 for(
int i = 0; i < wn; ++i, ++d)
192 KernelIter k = kbegin;
193 TmpType sum = NumericTraits<TmpType>::zero();
196 for(
int m=is-kernel.right(); m <= is-kernel.left(); ++m, --k)
201 sum += *k * src(s, mm);
206 for(
int m=is-kernel.right(); m <= is-kernel.left(); ++m, --k)
211 sum += *k * src(s, mm);
216 SrcIter ss = s + is - kernel.right();
217 for(
int m = 0; m < kernel.size(); ++m, --k, ++ss)
269 template <
class SrcIter,
class SrcAcc,
270 class DestIter,
class DestAcc,
275 DestIter d, DestIter dend, DestAcc dest,
276 KernelArray
const & kernels,
277 Functor mapTargetToSourceCoordinate)
279 if(mapTargetToSourceCoordinate.isExpand2())
281 resamplingExpandLine2(s, send, src, d, dend, dest, kernels);
284 if(mapTargetToSourceCoordinate.isReduce2())
286 resamplingReduceLine2(s, send, src, d, dend, dest, kernels);
291 NumericTraits<typename SrcAcc::value_type>::RealPromote
293 typedef typename KernelArray::value_type Kernel;
294 typedef typename Kernel::const_iterator KernelIter;
301 typename KernelArray::const_iterator kernel = kernels.begin();
302 for(i=0; i<wn; ++i, ++d, ++kernel)
305 if(kernel == kernels.end())
306 kernel = kernels.begin();
309 int is = mapTargetToSourceCoordinate(i);
311 TmpType sum = NumericTraits<TmpType>::zero();
313 int lbound = is - kernel->right(),
314 hbound = is - kernel->left();
316 KernelIter k = kernel->center() + kernel->right();
317 if(lbound < 0 || hbound >= wo)
319 vigra_precondition(-lbound < wo && wo2 - hbound >= 0,
320 "resamplingConvolveLine(): kernel or offset larger than image.");
321 for(
int m=lbound; m <= hbound; ++m, --k)
328 sum = TmpType(sum + *k * src(s, mm));
333 SrcIter ss = s + lbound;
334 SrcIter ssend = s + hbound;
336 for(; ss <= ssend; ++ss, --k)
338 sum = TmpType(sum + *k * src(ss));
346 template <
class Kernel,
class MapCoordinate,
class KernelArray>
348 createResamplingKernels(Kernel
const & kernel,
349 MapCoordinate
const & mapCoordinate, KernelArray & kernels)
351 for(
unsigned int idest = 0; idest < kernels.size(); ++idest)
353 int isrc = mapCoordinate(idest);
354 double idsrc = mapCoordinate.toDouble(idest);
355 double offset = idsrc - isrc;
356 double radius = kernel.radius();
357 int left = int(
ceil(-radius - offset));
358 int right = int(
floor(radius - offset));
359 kernels[idest].initExplicitly(left, right);
361 double x = left + offset;
362 for(
int i = left; i <= right; ++i, ++x)
363 kernels[idest][i] = kernel(x);
364 kernels[idest].normalize(1.0, kernel.derivativeOrder(), offset);
454 template <
class SrcIter,
class SrcAcc,
455 class DestIter,
class DestAcc,
459 DestIter dul, DestIter dlr, DestAcc dest,
460 Kernel
const & kernel,
461 Rational<int>
const & samplingRatio, Rational<int>
const & offset)
463 int wold = slr.x - sul.x;
464 int wnew = dlr.x - dul.x;
466 vigra_precondition(!samplingRatio.is_inf() && samplingRatio > 0,
467 "resamplingConvolveX(): sampling ratio must be > 0 and < infinity");
468 vigra_precondition(!offset.is_inf(),
469 "resamplingConvolveX(): offset must be < infinity");
471 int period =
lcm(samplingRatio.numerator(), samplingRatio.denominator());
472 resampling_detail::MapTargetToSourceCoordinate mapCoordinate(samplingRatio, offset);
474 ArrayVector<Kernel1D<double> > kernels(period);
476 createResamplingKernels(kernel, mapCoordinate, kernels);
478 for(; sul.y < slr.y; ++sul.y, ++dul.y)
480 typename SrcIter::row_iterator sr = sul.rowIterator();
481 typename DestIter::row_iterator dr = dul.rowIterator();
483 kernels, mapCoordinate);
487 template <
class SrcIter,
class SrcAcc,
488 class DestIter,
class DestAcc,
492 triple<DestIter, DestIter, DestAcc> dest,
493 Kernel
const & kernel,
494 Rational<int>
const & samplingRatio, Rational<int>
const & offset)
497 dest.first, dest.second, dest.third,
498 kernel, samplingRatio, offset);
593 template <
class SrcIter,
class SrcAcc,
594 class DestIter,
class DestAcc,
598 DestIter dul, DestIter dlr, DestAcc dest,
599 Kernel
const & kernel,
600 Rational<int>
const & samplingRatio, Rational<int>
const & offset)
602 int hold = slr.y - sul.y;
603 int hnew = dlr.y - dul.y;
605 vigra_precondition(!samplingRatio.is_inf() && samplingRatio > 0,
606 "resamplingConvolveY(): sampling ratio must be > 0 and < infinity");
607 vigra_precondition(!offset.is_inf(),
608 "resamplingConvolveY(): offset must be < infinity");
610 int period =
lcm(samplingRatio.numerator(), samplingRatio.denominator());
612 resampling_detail::MapTargetToSourceCoordinate mapCoordinate(samplingRatio, offset);
614 ArrayVector<Kernel1D<double> > kernels(period);
616 createResamplingKernels(kernel, mapCoordinate, kernels);
618 for(; sul.x < slr.x; ++sul.x, ++dul.x)
620 typename SrcIter::column_iterator sc = sul.columnIterator();
621 typename DestIter::column_iterator dc = dul.columnIterator();
623 kernels, mapCoordinate);
627 template <
class SrcIter,
class SrcAcc,
628 class DestIter,
class DestAcc,
632 triple<DestIter, DestIter, DestAcc> dest,
633 Kernel
const & kernel,
634 Rational<int>
const & samplingRatio, Rational<int>
const & offset)
637 dest.first, dest.second, dest.third,
638 kernel, samplingRatio, offset);
713 template <
class SrcIterator,
class SrcAccessor,
714 class DestIterator,
class DestAccessor,
715 class KernelX,
class KernelY>
717 DestIterator dul, DestIterator dlr, DestAccessor dest,
719 Rational<int>
const & samplingRatioX, Rational<int>
const & offsetX,
721 Rational<int>
const & samplingRatioY, Rational<int>
const & offsetY)
724 NumericTraits<typename SrcAccessor::value_type>::RealPromote
727 BasicImage<TmpType> tmp(dlr.x - dul.x, slr.y - sul.y);
731 kx, samplingRatioX, offsetX);
733 destIterRange(dul, dlr, dest),
734 ky, samplingRatioY, offsetY);
737 template <
class SrcIterator,
class SrcAccessor,
738 class DestIterator,
class DestAccessor,
739 class KernelX,
class KernelY>
742 triple<DestIterator, DestIterator, DestAccessor> dest,
744 Rational<int>
const & samplingRatioX, Rational<int>
const & offsetX,
746 Rational<int>
const & samplingRatioY, Rational<int>
const & offsetY)
749 dest.first, dest.second, dest.third,
750 kx, samplingRatioX, offsetX,
751 ky, samplingRatioY, offsetY);
796 template <
class SrcIterator,
class SrcAccessor,
797 class DestIterator,
class DestAccessor>
799 DestIterator dul, DestIterator dlr, DestAccessor dest,
800 double centerValue = 0.4)
802 vigra_precondition(0.25 <= centerValue && centerValue <= 0.5,
803 "pyramidReduceBurtFilter(): centerValue must be between 0.25 and 0.5.");
805 int wold = slr.x - sul.x;
806 int wnew = dlr.x - dul.x;
807 int hold = slr.y - sul.y;
808 int hnew = dlr.y - dul.y;
810 vigra_precondition(wnew == (wold + 1) / 2 && hnew == (hold + 1) / 2,
811 "pyramidReduceBurtFilter(): oldSize = ceil(newSize / 2) required.");
813 vigra_precondition(wnew == (wold + 1) / 2 && hnew == (hold + 1) / 2,
814 "pyramidReduceBurtFilter(): oldSize = ceil(newSize / 2) required.");
816 Rational<int> samplingRatio(1,2), offset(0);
817 resampling_detail::MapTargetToSourceCoordinate mapCoordinate(samplingRatio, offset);
819 ArrayVector<Kernel1D<double> > kernels(1);
820 kernels[0].initExplicitly(-2, 2) = 0.25 - centerValue / 2.0, 0.25, centerValue, 0.25, 0.25 - centerValue / 2.0;
823 NumericTraits<typename SrcAccessor::value_type>::RealPromote
825 typedef BasicImage<TmpType> TmpImage;
826 typedef typename TmpImage::traverser TmpIterator;
828 BasicImage<TmpType> tmp(wnew, hold);
830 TmpIterator tul = tmp.upperLeft();
832 for(; sul.y < slr.y; ++sul.y, ++tul.y)
834 typename SrcIterator::row_iterator sr = sul.rowIterator();
835 typename TmpIterator::row_iterator tr = tul.rowIterator();
838 kernels, mapCoordinate);
841 tul = tmp.upperLeft();
843 for(; dul.x < dlr.x; ++dul.x, ++tul.x)
845 typename DestIterator::column_iterator dc = dul.columnIterator();
846 typename TmpIterator::column_iterator tc = tul.columnIterator();
848 kernels, mapCoordinate);
852 template <
class SrcIterator,
class SrcAccessor,
853 class DestIterator,
class DestAccessor>
856 triple<DestIterator, DestIterator, DestAccessor> dest,
857 double centerValue = 0.4)
860 dest.first, dest.second, dest.third, centerValue);
863 template <
class Image,
class Alloc>
866 double centerValue = 0.4)
868 vigra_precondition(fromLevel < toLevel,
869 "pyramidReduceBurtFilter(): fromLevel must be smaller than toLevel.");
870 vigra_precondition(pyramid.lowestLevel() <= fromLevel && toLevel <= pyramid.highestLevel(),
871 "pyramidReduceBurtFilter(): fromLevel and toLevel must be between the lowest and highest pyramid levels (inclusive).");
873 for(
int i=fromLevel+1; i <= toLevel; ++i)
920 template <
class SrcIterator,
class SrcAccessor,
921 class DestIterator,
class DestAccessor>
923 DestIterator dul, DestIterator dlr, DestAccessor dest,
924 double centerValue = 0.4)
926 vigra_precondition(0.25 <= centerValue && centerValue <= 0.5,
927 "pyramidExpandBurtFilter(): centerValue must be between 0.25 and 0.5.");
929 int wold = slr.x - sul.x;
930 int wnew = dlr.x - dul.x;
931 int hold = slr.y - sul.y;
932 int hnew = dlr.y - dul.y;
934 vigra_precondition(wold == (wnew + 1) / 2 && hold == (hnew + 1) / 2,
935 "pyramidExpandBurtFilter(): oldSize = ceil(newSize / 2) required.");
937 vigra_precondition(wold == (wnew + 1) / 2 && hold == (hnew + 1) / 2,
938 "pyramidExpandBurtFilter(): oldSize = ceil(newSize / 2) required.");
940 Rational<int> samplingRatio(2), offset(0);
941 resampling_detail::MapTargetToSourceCoordinate mapCoordinate(samplingRatio, offset);
943 ArrayVector<Kernel1D<double> > kernels(2);
944 kernels[0].initExplicitly(-1, 1) = 0.5 - centerValue, 2.0*centerValue, 0.5 - centerValue;
945 kernels[1].initExplicitly(-1, 0) = 0.5, 0.5;
948 NumericTraits<typename SrcAccessor::value_type>::RealPromote
950 typedef BasicImage<TmpType> TmpImage;
951 typedef typename TmpImage::traverser TmpIterator;
953 BasicImage<TmpType> tmp(wnew, hold);
955 TmpIterator tul = tmp.upperLeft();
957 for(; sul.y < slr.y; ++sul.y, ++tul.y)
959 typename SrcIterator::row_iterator sr = sul.rowIterator();
960 typename TmpIterator::row_iterator tr = tul.rowIterator();
963 kernels, mapCoordinate);
966 tul = tmp.upperLeft();
968 for(; dul.x < dlr.x; ++dul.x, ++tul.x)
970 typename DestIterator::column_iterator dc = dul.columnIterator();
971 typename TmpIterator::column_iterator tc = tul.columnIterator();
973 kernels, mapCoordinate);
977 template <
class SrcIterator,
class SrcAccessor,
978 class DestIterator,
class DestAccessor>
981 triple<DestIterator, DestIterator, DestAccessor> dest,
982 double centerValue = 0.4)
985 dest.first, dest.second, dest.third, centerValue);
989 template <
class Image,
class Alloc>
992 double centerValue = 0.4)
994 vigra_precondition(fromLevel > toLevel,
995 "pyramidExpandBurtFilter(): fromLevel must be larger than toLevel.");
996 vigra_precondition(pyramid.lowestLevel() <= toLevel && fromLevel <= pyramid.highestLevel(),
997 "pyramidExpandBurtFilter(): fromLevel and toLevel must be between the lowest and highest pyramid levels (inclusive).");
999 for(
int i=fromLevel-1; i >= toLevel; --i)
1010 template <
class Image,
class Alloc>
1013 double centerValue = 0.4)
1015 using namespace functor;
1018 for(
int i=fromLevel; i < toLevel; ++i)
1022 combineTwoImages(srcImageRange(tmpImage), srcImage(pyramid[i]), destImage(pyramid[i]),
1034 template <
class Image,
class Alloc>
1037 double centerValue = 0.4)
1039 using namespace functor;
1041 vigra_precondition(fromLevel > toLevel,
1042 "pyramidExpandBurtLaplacian(): fromLevel must be larger than toLevel.");
1044 "pyramidExpandBurtLaplacian(): fromLevel and toLevel must be between the lowest and highest pyramid levels (inclusive).");
1046 for(
int i=fromLevel-1; i >= toLevel; --i)
1050 combineTwoImages(srcImageRange(tmpImage), srcImage(pyramid[i]), destImage(pyramid[i]),
void resamplingConvolveX(...)
Apply a resampling filter in the x-direction.
void pyramidExpandBurtFilter(...)
Two-fold up-sampling for image pyramid reconstruction.
ImageType value_type
Definition: imagecontainer.hxx:478
void pyramidExpandBurtLaplacian(ImagePyramid< Image, Alloc > &pyramid, int fromLevel, int toLevel, double centerValue=0.4)
Reconstruct a Laplacian pyramid.
Definition: resampling_convolution.hxx:1036
NumericTraits< V >::Promote sum(TinyVectorBase< V, SIZE, D1, D2 > const &l)
sum of the vector's elements
Definition: tinyvector.hxx:1683
void resamplingConvolveImage(...)
Apply two separable resampling filters successively, the first in x-direction, the second in y-direct...
IntType lcm(IntType n, IntType m)
Definition: rational.hxx:122
void combineTwoImages(...)
Combine two source images into destination image.
void pyramidReduceBurtFilter(...)
Two-fold down-sampling for image pyramid construction.
int highestLevel() const
Definition: imagecontainer.hxx:575
void resamplingConvolveY(...)
Apply a resampling filter in the y-direction.
int lowestLevel() const
Definition: imagecontainer.hxx:568
void pyramidReduceBurtLaplacian(ImagePyramid< Image, Alloc > &pyramid, int fromLevel, int toLevel, double centerValue=0.4)
Create a Laplacian pyramid.
Definition: resampling_convolution.hxx:1012
int ceil(FixedPoint< IntBits, FracBits > v)
rounding up.
Definition: fixedpoint.hxx:675
void resamplingConvolveLine(...)
Performs a 1-dimensional resampling convolution of the source signal using the given set of kernels...
Class template for logarithmically tapering image pyramids.
Definition: imagecontainer.hxx:467
int floor(FixedPoint< IntBits, FracBits > v)
rounding down.
Definition: fixedpoint.hxx:667