[ VIGRA Homepage | Function Index | Class Index | Namespaces | File List | Main Page ]
00001 /************************************************************************/ 00002 /* */ 00003 /* Copyright 1998-2002 by Ullrich Koethe */ 00004 /* Cognitive Systems Group, University of Hamburg, Germany */ 00005 /* */ 00006 /* This file is part of the VIGRA computer vision library. */ 00007 /* ( Version 1.6.0, Aug 13 2008 ) */ 00008 /* The VIGRA Website is */ 00009 /* http://kogs-www.informatik.uni-hamburg.de/~koethe/vigra/ */ 00010 /* Please direct questions, bug reports, and contributions to */ 00011 /* ullrich.koethe@iwr.uni-heidelberg.de or */ 00012 /* vigra@informatik.uni-hamburg.de */ 00013 /* */ 00014 /* Permission is hereby granted, free of charge, to any person */ 00015 /* obtaining a copy of this software and associated documentation */ 00016 /* files (the "Software"), to deal in the Software without */ 00017 /* restriction, including without limitation the rights to use, */ 00018 /* copy, modify, merge, publish, distribute, sublicense, and/or */ 00019 /* sell copies of the Software, and to permit persons to whom the */ 00020 /* Software is furnished to do so, subject to the following */ 00021 /* conditions: */ 00022 /* */ 00023 /* The above copyright notice and this permission notice shall be */ 00024 /* included in all copies or substantial portions of the */ 00025 /* Software. */ 00026 /* */ 00027 /* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND */ 00028 /* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES */ 00029 /* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND */ 00030 /* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT */ 00031 /* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, */ 00032 /* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING */ 00033 /* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR */ 00034 /* OTHER DEALINGS IN THE SOFTWARE. */ 00035 /* */ 00036 /************************************************************************/ 00037 00038 00039 #ifndef VIGRA_STDCONVOLUTION_HXX 00040 #define VIGRA_STDCONVOLUTION_HXX 00041 00042 #include <cmath> 00043 #include "stdimage.hxx" 00044 #include "bordertreatment.hxx" 00045 #include "separableconvolution.hxx" 00046 #include "utilities.hxx" 00047 #include "sized_int.hxx" 00048 00049 namespace vigra { 00050 00051 template <class SrcIterator, class SrcAccessor, 00052 class DestIterator, class DestAccessor, 00053 class KernelIterator, class KernelAccessor, 00054 class KSumType> 00055 void internalPixelEvaluationByClip(int x, int y, int w, int h, SrcIterator xs, 00056 SrcAccessor src_acc, DestIterator xd, DestAccessor dest_acc, 00057 KernelIterator ki, Diff2D kul, Diff2D klr, KernelAccessor ak, 00058 KSumType norm) 00059 { 00060 typedef typename 00061 NumericTraits<typename SrcAccessor::value_type>::RealPromote SumType; 00062 typedef 00063 NumericTraits<typename DestAccessor::value_type> DestTraits; 00064 00065 // calculate width and height of the kernel 00066 int kernel_width = klr.x - kul.x + 1; 00067 int kernel_height = klr.y - kul.y + 1; 00068 00069 SumType sum = NumericTraits<SumType>::zero(); 00070 int xx, yy; 00071 int x0, y0, x1, y1; 00072 00073 y0 = (y<klr.y) ? -y : -klr.y; 00074 y1 = (h-y-1<-kul.y) ? h-y-1 : -kul.y; 00075 00076 x0 = (x<klr.x) ? -x : -klr.x; 00077 x1 = (w-x-1<-kul.x) ? w-x-1 : -kul.x; 00078 00079 SrcIterator yys = xs + Diff2D(x0, y0); 00080 KernelIterator yk = ki - Diff2D(x0, y0); 00081 00082 KSumType ksum = NumericTraits<KSumType>::zero(); 00083 kernel_width = x1 - x0 + 1; 00084 kernel_height = y1 - y0 + 1; 00085 00086 //es wird zuerst abgeschnitten und dann gespigelt! 00087 00088 for(yy=0; yy<kernel_height; ++yy, ++yys.y, --yk.y) 00089 { 00090 SrcIterator xxs = yys; 00091 KernelIterator xk = yk; 00092 00093 for(xx=0; xx<kernel_width; ++xx, ++xxs.x, --xk.x) 00094 { 00095 sum += ak(xk) * src_acc(xxs); 00096 ksum += ak(xk); 00097 } 00098 } 00099 00100 // store average in destination pixel 00101 dest_acc.set(DestTraits::fromRealPromote((norm / ksum) * sum), xd); 00102 00103 } 00104 00105 00106 #if 0 00107 00108 template <class SrcIterator, class SrcAccessor, 00109 class DestIterator, class DestAccessor, 00110 class KernelIterator, class KernelAccessor> 00111 void internalPixelEvaluationByWrapReflectRepeat(int x, int y, int src_width, int src_height, SrcIterator xs, 00112 SrcAccessor src_acc, DestIterator xd, DestAccessor dest_acc, 00113 KernelIterator ki, Diff2D kul, Diff2D klr, KernelAccessor ak, 00114 BorderTreatmentMode border) 00115 { 00116 00117 typedef typename 00118 NumericTraits<typename SrcAccessor::value_type>::RealPromote SumType; 00119 typedef 00120 NumericTraits<typename DestAccessor::value_type> DestTraits; 00121 00122 SumType sum = NumericTraits<SumType>::zero(); 00123 00124 SrcIterator src_ul = xs - Diff2D(x, y); 00125 SrcIterator src_lr = src_ul + Diff2D(src_width, src_height); 00126 00127 SrcIterator yys = xs; 00128 KernelIterator yk = ki; 00129 00130 // calculate width and height of the kernel 00131 int kernel_width = klr.x - kul.x + 1; 00132 int kernel_height = klr.y - kul.y + 1; 00133 00134 // where the kernel is beyond the borders: 00135 bool top_to_much = (y<klr.y) ? true : false; 00136 bool down_to_much = (src_height-y-1<-kul.y)? true : false; 00137 bool left_to_much = (x<klr.x)? true : false; 00138 bool right_to_much = (src_width-x-1<-kul.x)? true : false; 00139 00140 // direction of iteration, 00141 // e.g. (-1, +1) for ll->ur or (-1, -1) for lr->ul 00142 Diff2D way_increment; 00143 00144 /* Iteration is always done from valid to invalid range. 00145 The following tuple is composed as such: 00146 - If an invalid range is reached while iterating in X, 00147 a jump of border_increment.first is performed and 00148 border_increment.third is used for further iterating. 00149 - If an invalid range is reached while iterating in Y, 00150 a jump of border_increment.second is performed and 00151 border_increment.fourth is used for further iterating. 00152 */ 00153 tuple4<int, int, int, int> border_increment; 00154 if (border == BORDER_TREATMENT_REPEAT){ 00155 border_increment = tuple4<int, int, int, int>(1, 1, 0, 0); 00156 }else if (border == BORDER_TREATMENT_REFLECT){ 00157 border_increment = tuple4<int, int, int, int>(2, 2, -1, -1); 00158 }else{ // BORDER_TREATMENT_WRAP 00159 border_increment = tuple4<int, int, int, int>(src_width, src_height, 1, 1); 00160 } 00161 00162 pair<int, int> valid_step_count; 00163 00164 if(left_to_much && !top_to_much && !down_to_much) 00165 { 00166 yys += klr; 00167 yk += kul; 00168 way_increment = Diff2D(-1, -1); 00169 border_increment.third = -border_increment.third; 00170 border_increment.fourth = -border_increment.fourth; 00171 valid_step_count = std::make_pair((yys - src_ul).x + 1, kernel_height); 00172 } 00173 else if(top_to_much && !left_to_much && !right_to_much) 00174 { 00175 yys += klr; 00176 yk += kul; 00177 way_increment = Diff2D(-1, -1); 00178 border_increment.third = -border_increment.third; 00179 border_increment.fourth = -border_increment.fourth; 00180 valid_step_count = std::make_pair(kernel_width, (yys - src_ul).y + 1); 00181 } 00182 else if(right_to_much && !top_to_much && !down_to_much) 00183 { 00184 yys += kul; 00185 yk += klr; 00186 way_increment = Diff2D(1, 1); 00187 border_increment.first = -border_increment.first; 00188 border_increment.second = -border_increment.second; 00189 valid_step_count = std::make_pair((src_lr - yys).x, kernel_height); 00190 } 00191 else if(down_to_much && !left_to_much && !right_to_much) 00192 { 00193 yys += kul; 00194 yk += klr; 00195 way_increment = Diff2D(1, 1); 00196 border_increment.first = -border_increment.first; 00197 border_increment.second = -border_increment.second; 00198 valid_step_count = std::make_pair(kernel_width, (src_lr - yys).y); 00199 } 00200 else if(down_to_much && left_to_much) 00201 { 00202 yys += kul + Diff2D(kernel_width - 1, 0); 00203 yk += kul + Diff2D(0, kernel_height - 1); 00204 way_increment = Diff2D(-1, 1); 00205 border_increment.second = -border_increment.second; 00206 border_increment.third = -border_increment.third; 00207 valid_step_count = std::make_pair((yys - src_ul).x + 1, (src_lr - yys).y); 00208 } 00209 else if(down_to_much && right_to_much) 00210 { 00211 yys += kul; 00212 yk += klr; 00213 way_increment = Diff2D(1, 1); 00214 border_increment.first = -border_increment.first; 00215 border_increment.second = -border_increment.second; 00216 valid_step_count = std::make_pair((src_lr - yys).x, (src_lr - yys).y); 00217 } 00218 else if(top_to_much && left_to_much) 00219 { 00220 yys += klr; 00221 yk += kul; 00222 way_increment = Diff2D(-1, -1); 00223 border_increment.third = -border_increment.third; 00224 border_increment.fourth = -border_increment.fourth; 00225 valid_step_count = std::make_pair((yys - src_ul).x + 1, (yys - src_ul).y + 1); 00226 } 00227 else 00228 { //top_to_much && right_to_much 00229 yys += kul + Diff2D(0, kernel_height - 1); 00230 yk += kul + Diff2D(kernel_width - 1, 0); 00231 way_increment = Diff2D(1, -1); 00232 border_increment.first = -border_increment.first; 00233 border_increment.fourth = -border_increment.fourth; 00234 valid_step_count = std::make_pair((src_lr - yys).x, (yys - src_ul).y + 1); 00235 } 00236 00237 int yy = 0, xx; 00238 00239 //laeuft den zulässigen Bereich in y-Richtung durch 00240 for(; yy < valid_step_count.second; ++yy, yys.y += way_increment.y, yk.y -= way_increment.y ) 00241 { 00242 SrcIterator xxs = yys; 00243 KernelIterator xk = yk; 00244 00245 //laeuft den zulässigen Bereich in x-Richtung durch 00246 for(xx = 0; xx < valid_step_count.first; ++xx, xxs.x += way_increment.x, xk.x -= way_increment.x) 00247 { 00248 sum += ak(xk) * src_acc(xxs); 00249 } 00250 00251 //Nächstes ++xxs.x wuerde in unzulässigen Bereich 00252 //bringen => Sprung in zulaessigen Bereich 00253 xxs.x += border_increment.first; 00254 00255 for( ; xx < kernel_width; ++xx, xxs.x += border_increment.third, xk.x -= way_increment.x ) 00256 { 00257 sum += ak(xk) * src_acc(xxs); 00258 } 00259 } 00260 00261 //Nächstes ++yys.y wuerde in unzulässigen Bereich 00262 //bringen => Sprung in zulaessigen Bereich 00263 yys.y += border_increment.second; 00264 00265 for( ; yy < kernel_height; ++yy, yys.y += border_increment.third, yk.y -= way_increment.y) 00266 { 00267 SrcIterator xxs = yys; 00268 KernelIterator xk = yk; 00269 00270 for(xx=0; xx < valid_step_count.first; ++xx, xxs.x += way_increment.x, xk.x -= way_increment.x) 00271 { 00272 sum += ak(xk) * src_acc(xxs); 00273 } 00274 00275 //Sprung in den zulaessigen Bereich 00276 xxs.x += border_increment.first; 00277 00278 for( ; xx < kernel_width; ++xx, xxs.x += border_increment.third, xk.x -= way_increment.x ) 00279 { 00280 sum += ak(xk) * src_acc(xxs); 00281 } 00282 } 00283 00284 // store average in destination pixel 00285 dest_acc.set(DestTraits::fromRealPromote(sum), xd); 00286 00287 }// end of internalPixelEvaluationByWrapReflectRepeat 00288 #endif /* #if 0 */ 00289 00290 00291 template <class SrcIterator, class SrcAccessor, 00292 class KernelIterator, class KernelAccessor, 00293 class SumType> 00294 void 00295 internalPixelEvaluationByWrapReflectRepeat(SrcIterator xs, SrcAccessor src_acc, 00296 KernelIterator xk, KernelAccessor ak, 00297 int left, int right, int kleft, int kright, 00298 int borderskipx, int borderinc, SumType & sum) 00299 { 00300 SrcIterator xxs = xs + left; 00301 KernelIterator xxk = xk - left; 00302 00303 for(int xx = left; xx <= right; ++xx, ++xxs, --xxk) 00304 { 00305 sum += ak(xxk) * src_acc(xxs); 00306 } 00307 00308 xxs = xs + left - borderskipx; 00309 xxk = xk - left + 1; 00310 for(int xx = left - 1; xx >= -kright; --xx, xxs -= borderinc, ++xxk) 00311 { 00312 sum += ak(xxk) * src_acc(xxs); 00313 } 00314 00315 xxs = xs + right + borderskipx; 00316 xxk = xk - right - 1; 00317 for(int xx = right + 1; xx <= -kleft; ++xx, xxs += borderinc, --xxk) 00318 { 00319 sum += ak(xxk) * src_acc(xxs); 00320 } 00321 } 00322 00323 00324 /** \addtogroup StandardConvolution Two-dimensional convolution functions 00325 00326 Perform 2D non-separable convolution, with and without ROI mask. 00327 00328 These generic convolution functions implement 00329 the standard 2D convolution operation for images that fit 00330 into the required interface. Arbitrary ROI's are supported 00331 by the mask version of the algorithm. 00332 The functions need a suitable 2D kernel to operate. 00333 */ 00334 //@{ 00335 00336 /** \brief Performs a 2 dimensional convolution of the source image using the given 00337 kernel. 00338 00339 The KernelIterator must point to the center of the kernel, and 00340 the kernel's size is given by its upper left (x and y of distance <= 0) and 00341 lower right (distance >= 0) corners. The image must always be larger than the 00342 kernel. At those positions where the kernel does not completely fit 00343 into the image, the specified \ref BorderTreatmentMode is 00344 applied. You can choice between following BorderTreatmentModes: 00345 <ul> 00346 <li>BORDER_TREATMENT_CLIP</li> 00347 <li>BORDER_TREATMENT_AVOID</li> 00348 <li>BORDER_TREATMENT_WRAP</li> 00349 <li>BORDER_TREATMENT_REFLECT</li> 00350 <li>BORDER_TREATMENT_REPEAT</li> 00351 </ul><br> 00352 The images's pixel type (SrcAccessor::value_type) must be a 00353 linear space over the kernel's value_type (KernelAccessor::value_type), 00354 i.e. addition of source values, multiplication with kernel values, 00355 and NumericTraits must be defined. 00356 The kernel's value_type must be an algebraic field, 00357 i.e. the arithmetic operations (+, -, *, /) and NumericTraits must 00358 be defined. 00359 00360 <b> Declarations:</b> 00361 00362 pass arguments explicitly: 00363 \code 00364 namespace vigra { 00365 template <class SrcIterator, class SrcAccessor, 00366 class DestIterator, class DestAccessor, 00367 class KernelIterator, class KernelAccessor> 00368 void convolveImage(SrcIterator src_ul, SrcIterator src_lr, SrcAccessor src_acc, 00369 DestIterator dest_ul, DestAccessor dest_acc, 00370 KernelIterator ki, KernelAccessor ak, 00371 Diff2D kul, Diff2D klr, BorderTreatmentMode border); 00372 } 00373 \endcode 00374 00375 00376 use argument objects in conjunction with \ref ArgumentObjectFactories : 00377 \code 00378 namespace vigra { 00379 template <class SrcIterator, class SrcAccessor, 00380 class DestIterator, class DestAccessor, 00381 class KernelIterator, class KernelAccessor> 00382 void convolveImage(triple<SrcIterator, SrcIterator, SrcAccessor> src, 00383 pair<DestIterator, DestAccessor> dest, 00384 tuple5<KernelIterator, KernelAccessor, Diff2D, Diff2D, 00385 BorderTreatmentMode> kernel); 00386 } 00387 \endcode 00388 00389 <b> Usage:</b> 00390 00391 <b>\#include</b> <<a href="stdconvolution_8hxx-source.html">vigra/stdconvolution.hxx</a>><br> 00392 Namespace: vigra 00393 00394 00395 \code 00396 vigra::FImage src(w,h), dest(w,h); 00397 ... 00398 00399 // define horizontal Sobel filter 00400 vigra::Kernel2D<float> sobel; 00401 00402 sobel.initExplicitly(Diff2D(-1,-1), Diff2D(1,1)) = // upper left and lower right 00403 0.125, 0.0, -0.125, 00404 0.25, 0.0, -0.25, 00405 0.125, 0.0, -0.125; 00406 sobel.setBorderTreatment(vigra::BORDER_TREATMENT_REFLECT); 00407 00408 vigra::convolveImage(srcImageRange(src), destImage(dest), kernel2d(sobel)); 00409 \endcode 00410 00411 <b> Required Interface:</b> 00412 00413 \code 00414 ImageIterator src_ul, src_lr; 00415 ImageIterator dest_ul; 00416 ImageIterator ik; 00417 00418 SrcAccessor src_accessor; 00419 DestAccessor dest_accessor; 00420 KernelAccessor kernel_accessor; 00421 00422 NumericTraits<SrcAccessor::value_type>::RealPromote s = src_accessor(src_ul); 00423 00424 s = s + s; 00425 s = kernel_accessor(ik) * s; 00426 s -= s; 00427 00428 dest_accessor.set( 00429 NumericTraits<DestAccessor::value_type>::fromRealPromote(s), dest_ul); 00430 00431 NumericTraits<KernelAccessor::value_type>::RealPromote k = kernel_accessor(ik); 00432 00433 k += k; 00434 k -= k; 00435 k = k / k; 00436 00437 \endcode 00438 00439 <b> Preconditions:</b> 00440 00441 \code 00442 kul.x <= 0 00443 kul.y <= 0 00444 klr.x >= 0 00445 klr.y >= 0 00446 src_lr.x - src_ul.x >= klr.x + kul.x + 1 00447 src_lr.y - src_ul.y >= klr.y + kul.y + 1 00448 \endcode 00449 00450 If border == BORDER_TREATMENT_CLIP: Sum of kernel elements must be 00451 != 0. 00452 00453 */ 00454 template <class SrcIterator, class SrcAccessor, 00455 class DestIterator, class DestAccessor, 00456 class KernelIterator, class KernelAccessor> 00457 void convolveImage(SrcIterator src_ul, SrcIterator src_lr, SrcAccessor src_acc, 00458 DestIterator dest_ul, DestAccessor dest_acc, 00459 KernelIterator ki, KernelAccessor ak, 00460 Diff2D kul, Diff2D klr, BorderTreatmentMode border) 00461 { 00462 vigra_precondition((border == BORDER_TREATMENT_CLIP || 00463 border == BORDER_TREATMENT_AVOID || 00464 border == BORDER_TREATMENT_REFLECT || 00465 border == BORDER_TREATMENT_REPEAT || 00466 border == BORDER_TREATMENT_WRAP), 00467 "convolveImage():\n" 00468 " Border treatment must be one of follow treatments:\n" 00469 " - BORDER_TREATMENT_CLIP\n" 00470 " - BORDER_TREATMENT_AVOID\n" 00471 " - BORDER_TREATMENT_REFLECT\n" 00472 " - BORDER_TREATMENT_REPEAT\n" 00473 " - BORDER_TREATMENT_WRAP\n"); 00474 00475 vigra_precondition(kul.x <= 0 && kul.y <= 0, 00476 "convolveImage(): coordinates of " 00477 "kernel's upper left must be <= 0."); 00478 vigra_precondition(klr.x >= 0 && klr.y >= 0, 00479 "convolveImage(): coordinates of " 00480 "kernel's lower right must be >= 0."); 00481 00482 // use traits to determine SumType as to prevent possible overflow 00483 typedef typename 00484 PromoteTraits<typename SrcAccessor::value_type, 00485 typename KernelAccessor::value_type>::Promote SumType; 00486 typedef typename 00487 NumericTraits<typename KernelAccessor::value_type>::RealPromote KernelSumType; 00488 typedef 00489 NumericTraits<typename DestAccessor::value_type> DestTraits; 00490 00491 // calculate width and height of the image 00492 int w = src_lr.x - src_ul.x; 00493 int h = src_lr.y - src_ul.y; 00494 00495 // calculate width and height of the kernel 00496 int kernel_width = klr.x - kul.x + 1; 00497 int kernel_height = klr.y - kul.y + 1; 00498 00499 vigra_precondition(w >= kernel_width && h >= kernel_height, 00500 "convolveImage(): kernel larger than image."); 00501 00502 KernelSumType norm = NumericTraits<KernelSumType>::zero(); 00503 if(border == BORDER_TREATMENT_CLIP) 00504 { 00505 // calculate the sum of the kernel elements for renormalization 00506 KernelIterator yk = ki + klr; 00507 00508 // determine sum within kernel (= norm) 00509 for(int y = 0; y < kernel_height; ++y, --yk.y) 00510 { 00511 KernelIterator xk = yk; 00512 for(int x = 0; x < kernel_width; ++x, --xk.x) 00513 { 00514 norm += ak(xk); 00515 } 00516 } 00517 vigra_precondition(norm != NumericTraits<KernelSumType>::zero(), 00518 "convolveImage(): Cannot use BORDER_TREATMENT_CLIP with a DC-free kernel"); 00519 } 00520 00521 // create iterators for the interior part of the image (where the kernel always fits into the image) 00522 DestIterator yd = dest_ul + Diff2D(klr.x, klr.y); 00523 SrcIterator ys = src_ul + Diff2D(klr.x, klr.y); 00524 SrcIterator send = src_lr + Diff2D(kul.x, kul.y); 00525 00526 // iterate over the interior part 00527 for(; ys.y < send.y; ++ys.y, ++yd.y) 00528 { 00529 // create x iterators 00530 DestIterator xd(yd); 00531 SrcIterator xs(ys); 00532 00533 for(; xs.x < send.x; ++xs.x, ++xd.x) 00534 { 00535 // init the sum 00536 SumType sum = NumericTraits<SumType>::zero(); 00537 00538 SrcIterator yys = xs - klr; 00539 SrcIterator yyend = xs - kul; 00540 KernelIterator yk = ki + klr; 00541 00542 for(; yys.y <= yyend.y; ++yys.y, --yk.y) 00543 { 00544 typename SrcIterator::row_iterator xxs = yys.rowIterator(); 00545 typename SrcIterator::row_iterator xxe = xxs + kernel_width; 00546 typename KernelIterator::row_iterator xk = yk.rowIterator(); 00547 00548 for(; xxs < xxe; ++xxs, --xk) 00549 { 00550 sum += ak(xk) * src_acc(xxs); 00551 } 00552 } 00553 00554 // store convolution result in destination pixel 00555 dest_acc.set(DestTraits::fromRealPromote(sum), xd); 00556 } 00557 } 00558 00559 if(border == BORDER_TREATMENT_AVOID) 00560 return; // skip processing near the border 00561 00562 int interiorskip = w + kul.x - klr.x - 1; 00563 int borderskipx; 00564 int borderskipy; 00565 int borderinc; 00566 if(border == BORDER_TREATMENT_REPEAT) 00567 { 00568 borderskipx = 0; 00569 borderskipy = 0; 00570 borderinc = 0; 00571 } 00572 else if(border == BORDER_TREATMENT_REFLECT) 00573 { 00574 borderskipx = -1; 00575 borderskipy = -1; 00576 borderinc = -1; 00577 } 00578 else if(border == BORDER_TREATMENT_WRAP) 00579 { 00580 borderskipx = -w+1; 00581 borderskipy = -h+1; 00582 borderinc = 1; 00583 } 00584 00585 // create iterators for the entire image 00586 yd = dest_ul; 00587 ys = src_ul; 00588 00589 // work on entire image (but skip the already computed points in the loop) 00590 for(int y = 0; y < h; ++y, ++ys.y, ++yd.y) 00591 { 00592 int top = std::max(static_cast<IntBiggest>(-klr.y), 00593 static_cast<IntBiggest>(src_ul.y - ys.y)); 00594 int bottom = std::min(static_cast<IntBiggest>(-kul.y), 00595 static_cast<IntBiggest>(src_lr.y - ys.y - 1)); 00596 00597 // create x iterators 00598 DestIterator xd(yd); 00599 SrcIterator xs(ys); 00600 00601 for(int x = 0; x < w; ++x, ++xs.x, ++xd.x) 00602 { 00603 // check if we are away from the border 00604 if(y >= klr.y && y < h+kul.y && x == klr.x) 00605 { 00606 // yes => skip the already computed points 00607 x += interiorskip; 00608 xs.x += interiorskip; 00609 xd.x += interiorskip; 00610 continue; 00611 } 00612 if (border == BORDER_TREATMENT_CLIP) 00613 { 00614 internalPixelEvaluationByClip(x, y, w, h, xs, src_acc, xd, dest_acc, ki, kul, klr, ak, norm); 00615 } 00616 else 00617 { 00618 int left = std::max(-klr.x, src_ul.x - xs.x); 00619 int right = std::min(-kul.x, src_lr.x - xs.x - 1); 00620 00621 // init the sum 00622 SumType sum = NumericTraits<SumType>::zero(); 00623 00624 // create iterators for the part of the kernel that fits into the image 00625 SrcIterator yys = xs + Size2D(0, top); 00626 KernelIterator yk = ki - Size2D(0, top); 00627 00628 int yy; 00629 for(yy = top; yy <= bottom; ++yy, ++yys.y, --yk.y) 00630 { 00631 internalPixelEvaluationByWrapReflectRepeat(yys.rowIterator(), src_acc, yk.rowIterator(), ak, 00632 left, right, kul.x, klr.x, borderskipx, borderinc, sum); 00633 } 00634 yys = xs + Size2D(0, top - borderskipy); 00635 yk = ki - Size2D(0, top - 1); 00636 for(yy = top - 1; yy >= -klr.y; --yy, yys.y -= borderinc, ++yk.y) 00637 { 00638 internalPixelEvaluationByWrapReflectRepeat(yys.rowIterator(), src_acc, yk.rowIterator(), ak, 00639 left, right, kul.x, klr.x, borderskipx, borderinc, sum); 00640 } 00641 yys = xs + Size2D(0, bottom + borderskipy); 00642 yk = ki - Size2D(0, bottom + 1); 00643 for(yy = bottom + 1; yy <= -kul.y; ++yy, yys.y += borderinc, --yk.y) 00644 { 00645 internalPixelEvaluationByWrapReflectRepeat(yys.rowIterator(), src_acc, yk.rowIterator(), ak, 00646 left, right, kul.x, klr.x, borderskipx, borderinc, sum); 00647 } 00648 00649 // store convolution result in destination pixel 00650 dest_acc.set(DestTraits::fromRealPromote(sum), xd); 00651 00652 // internalPixelEvaluationByWrapReflectRepeat(x, y, w, h, xs, src_acc, xd, dest_acc, ki, kul, klr, ak, border); 00653 } 00654 } 00655 } 00656 } 00657 00658 00659 template <class SrcIterator, class SrcAccessor, 00660 class DestIterator, class DestAccessor, 00661 class KernelIterator, class KernelAccessor> 00662 inline 00663 void convolveImage( 00664 triple<SrcIterator, SrcIterator, SrcAccessor> src, 00665 pair<DestIterator, DestAccessor> dest, 00666 tuple5<KernelIterator, KernelAccessor, Diff2D, Diff2D, 00667 BorderTreatmentMode> kernel) 00668 { 00669 convolveImage(src.first, src.second, src.third, 00670 dest.first, dest.second, 00671 kernel.first, kernel.second, kernel.third, 00672 kernel.fourth, kernel.fifth); 00673 } 00674 00675 00676 /** \brief Performs a 2-dimensional normalized convolution, i.e. convolution with a mask image. 00677 00678 This functions computes 00679 <a href ="http://homepages.inf.ed.ac.uk/rbf/CVonline/LOCAL_COPIES/PIRODDI1/NormConv/NormConv.html">normalized 00680 convolution</a> as defined in 00681 Knutsson, H. and Westin, C-F.: <i>Normalized and differential convolution: 00682 Methods for Interpolation and Filtering of incomplete and uncertain data</i>. 00683 Proc. of the IEEE Conf. on Computer Vision and Pattern Recognition, 1993, 515-523. 00684 00685 The mask image must be binary and encodes which pixels of the original image 00686 are valid. It is used as follows: 00687 Only pixel under the mask are used in the calculations. Whenever a part of the 00688 kernel lies outside the mask, it is ignored, and the kernel is renormalized to its 00689 original norm (analogous to the CLIP \ref BorderTreatmentMode). Thus, a useful convolution 00690 result is computed whenever <i>at least one valid pixel is within the current window</i> 00691 Thus, destination pixels not under the mask still receive a value if they are <i>near</i> 00692 the mask. Therefore, this algorithm is useful as an interpolator of sparse input data. 00693 If you are only interested in the destination values under the mask, you can perform 00694 a subsequent \ref copyImageIf(). 00695 00696 The KernelIterator must point to the center of the kernel, and 00697 the kernel's size is given by its upper left (x and y of distance <= 0) and 00698 lower right (distance >= 0) corners. The image must always be larger than the 00699 kernel. At those positions where the kernel does not completely fit 00700 into the image, the specified \ref BorderTreatmentMode is 00701 applied. Only BORDER_TREATMENT_CLIP and BORDER_TREATMENT_AVOID are currently 00702 supported. 00703 00704 The images's pixel type (SrcAccessor::value_type) must be a 00705 linear space over the kernel's value_type (KernelAccessor::value_type), 00706 i.e. addition of source values, multiplication with kernel values, 00707 and NumericTraits must be defined. 00708 The kernel's value_type must be an algebraic field, 00709 i.e. the arithmetic operations (+, -, *, /) and NumericTraits must 00710 be defined. 00711 00712 <b> Declarations:</b> 00713 00714 pass arguments explicitly: 00715 \code 00716 namespace vigra { 00717 template <class SrcIterator, class SrcAccessor, 00718 class MaskIterator, class MaskAccessor, 00719 class DestIterator, class DestAccessor, 00720 class KernelIterator, class KernelAccessor> 00721 void 00722 normalizedConvolveImage(SrcIterator src_ul, SrcIterator src_lr, SrcAccessor src_acc, 00723 MaskIterator mul, MaskAccessor am, 00724 DestIterator dest_ul, DestAccessor dest_acc, 00725 KernelIterator ki, KernelAccessor ak, 00726 Diff2D kul, Diff2D klr, BorderTreatmentMode border); 00727 } 00728 \endcode 00729 00730 00731 use argument objects in conjunction with \ref ArgumentObjectFactories : 00732 \code 00733 namespace vigra { 00734 template <class SrcIterator, class SrcAccessor, 00735 class MaskIterator, class MaskAccessor, 00736 class DestIterator, class DestAccessor, 00737 class KernelIterator, class KernelAccessor> 00738 void normalizedConvolveImage(triple<SrcIterator, SrcIterator, SrcAccessor> src, 00739 pair<MaskIterator, MaskAccessor> mask, 00740 pair<DestIterator, DestAccessor> dest, 00741 tuple5<KernelIterator, KernelAccessor, Diff2D, Diff2D, 00742 BorderTreatmentMode> kernel); 00743 } 00744 \endcode 00745 00746 <b> Usage:</b> 00747 00748 <b>\#include</b> <<a href="stdconvolution_8hxx-source.html">vigra/stdconvolution.hxx</a>><br> 00749 Namespace: vigra 00750 00751 00752 \code 00753 vigra::FImage src(w,h), dest(w,h); 00754 vigra::CImage mask(w,h); 00755 ... 00756 00757 // define 3x3 binomial filter 00758 vigra::Kernel2D<float> binom; 00759 00760 binom.initExplicitly(Diff2D(-1,-1), Diff2D(1,1)) = // upper left and lower right 00761 0.0625, 0.125, 0.0625, 00762 0.125, 0.25, 0.125, 00763 0.0625, 0.125, 0.0625; 00764 00765 vigra::normalizedConvolveImage(srcImageRange(src), maskImage(mask), destImage(dest), kernel2d(binom)); 00766 \endcode 00767 00768 <b> Required Interface:</b> 00769 00770 \code 00771 ImageIterator src_ul, src_lr; 00772 ImageIterator mul; 00773 ImageIterator dest_ul; 00774 ImageIterator ik; 00775 00776 SrcAccessor src_accessor; 00777 MaskAccessor mask_accessor; 00778 DestAccessor dest_accessor; 00779 KernelAccessor kernel_accessor; 00780 00781 NumericTraits<SrcAccessor::value_type>::RealPromote s = src_accessor(src_ul); 00782 00783 s = s + s; 00784 s = kernel_accessor(ik) * s; 00785 s -= s; 00786 00787 if(mask_accessor(mul)) ...; 00788 00789 dest_accessor.set( 00790 NumericTraits<DestAccessor::value_type>::fromRealPromote(s), dest_ul); 00791 00792 NumericTraits<KernelAccessor::value_type>::RealPromote k = kernel_accessor(ik); 00793 00794 k += k; 00795 k -= k; 00796 k = k / k; 00797 00798 \endcode 00799 00800 <b> Preconditions:</b> 00801 00802 \code 00803 kul.x <= 0 00804 kul.y <= 0 00805 klr.x >= 0 00806 klr.y >= 0 00807 src_lr.x - src_ul.x >= klr.x + kul.x + 1 00808 src_lr.y - src_ul.y >= klr.y + kul.y + 1 00809 border == BORDER_TREATMENT_CLIP || border == BORDER_TREATMENT_AVOID 00810 \endcode 00811 00812 Sum of kernel elements must be != 0. 00813 00814 */ 00815 doxygen_overloaded_function(template <...> void normalizedConvolveImage) 00816 00817 template <class SrcIterator, class SrcAccessor, 00818 class DestIterator, class DestAccessor, 00819 class MaskIterator, class MaskAccessor, 00820 class KernelIterator, class KernelAccessor> 00821 void 00822 normalizedConvolveImage(SrcIterator src_ul, SrcIterator src_lr, SrcAccessor src_acc, 00823 MaskIterator mul, MaskAccessor am, 00824 DestIterator dest_ul, DestAccessor dest_acc, 00825 KernelIterator ki, KernelAccessor ak, 00826 Diff2D kul, Diff2D klr, BorderTreatmentMode border) 00827 { 00828 vigra_precondition((border == BORDER_TREATMENT_CLIP || 00829 border == BORDER_TREATMENT_AVOID), 00830 "normalizedConvolveImage(): " 00831 "Border treatment must be BORDER_TREATMENT_CLIP or BORDER_TREATMENT_AVOID."); 00832 00833 vigra_precondition(kul.x <= 0 && kul.y <= 0, 00834 "normalizedConvolveImage(): left borders must be <= 0."); 00835 vigra_precondition(klr.x >= 0 && klr.y >= 0, 00836 "normalizedConvolveImage(): right borders must be >= 0."); 00837 00838 // use traits to determine SumType as to prevent possible overflow 00839 typedef typename 00840 NumericTraits<typename SrcAccessor::value_type>::RealPromote SumType; 00841 typedef typename 00842 NumericTraits<typename KernelAccessor::value_type>::RealPromote KSumType; 00843 typedef 00844 NumericTraits<typename DestAccessor::value_type> DestTraits; 00845 00846 // calculate width and height of the image 00847 int w = src_lr.x - src_ul.x; 00848 int h = src_lr.y - src_ul.y; 00849 int kernel_width = klr.x - kul.x + 1; 00850 int kernel_height = klr.y - kul.y + 1; 00851 00852 int x,y; 00853 int ystart = (border == BORDER_TREATMENT_AVOID) ? klr.y : 0; 00854 int yend = (border == BORDER_TREATMENT_AVOID) ? h+kul.y : h; 00855 int xstart = (border == BORDER_TREATMENT_AVOID) ? klr.x : 0; 00856 int xend = (border == BORDER_TREATMENT_AVOID) ? w+kul.x : w; 00857 00858 // create y iterators 00859 DestIterator yd = dest_ul + Diff2D(xstart, ystart); 00860 SrcIterator ys = src_ul + Diff2D(xstart, ystart); 00861 MaskIterator ym = mul + Diff2D(xstart, ystart); 00862 00863 KSumType norm = ak(ki); 00864 int xx, yy; 00865 KernelIterator yk = ki + klr; 00866 for(yy=0; yy<kernel_height; ++yy, --yk.y) 00867 { 00868 KernelIterator xk = yk; 00869 00870 for(xx=0; xx<kernel_width; ++xx, --xk.x) 00871 { 00872 norm += ak(xk); 00873 } 00874 } 00875 norm -= ak(ki); 00876 00877 00878 for(y=ystart; y < yend; ++y, ++ys.y, ++yd.y, ++ym.y) 00879 { 00880 // create x iterators 00881 DestIterator xd(yd); 00882 SrcIterator xs(ys); 00883 MaskIterator xm(ym); 00884 00885 for(x=xstart; x < xend; ++x, ++xs.x, ++xd.x, ++xm.x) 00886 { 00887 // how much of the kernel fits into the image ? 00888 int x0, y0, x1, y1; 00889 00890 y0 = (y<klr.y) ? -y : -klr.y; 00891 y1 = (h-y-1<-kul.y) ? h-y-1 : -kul.y; 00892 x0 = (x<klr.x) ? -x : -klr.x; 00893 x1 = (w-x-1<-kul.x) ? w-x-1 : -kul.x; 00894 00895 bool first = true; 00896 // init the sum 00897 SumType sum; 00898 KSumType ksum; 00899 00900 SrcIterator yys = xs + Diff2D(x0, y0); 00901 MaskIterator yym = xm + Diff2D(x0, y0); 00902 KernelIterator yk = ki - Diff2D(x0, y0); 00903 00904 int xx, kernel_width, kernel_height; 00905 kernel_width = x1 - x0 + 1; 00906 kernel_height = y1 - y0 + 1; 00907 for(yy=0; yy<kernel_height; ++yy, ++yys.y, --yk.y, ++yym.y) 00908 { 00909 typename SrcIterator::row_iterator xxs = yys.rowIterator(); 00910 typename SrcIterator::row_iterator xxend = xxs + kernel_width; 00911 typename MaskIterator::row_iterator xxm = yym.rowIterator(); 00912 typename KernelIterator::row_iterator xk = yk.rowIterator(); 00913 00914 for(xx=0; xxs < xxend; ++xxs, --xk, ++xxm) 00915 { 00916 if(!am(xxm)) continue; 00917 00918 if(first) 00919 { 00920 sum = ak(xk) * src_acc(xxs); 00921 ksum = ak(xk); 00922 first = false; 00923 } 00924 else 00925 { 00926 sum += ak(xk) * src_acc(xxs); 00927 ksum += ak(xk); 00928 } 00929 } 00930 } 00931 // store average in destination pixel 00932 if(!first && 00933 ksum != NumericTraits<KSumType>::zero()) 00934 { 00935 dest_acc.set(DestTraits::fromRealPromote((norm / ksum) * sum), xd); 00936 } 00937 } 00938 } 00939 } 00940 00941 00942 template <class SrcIterator, class SrcAccessor, 00943 class DestIterator, class DestAccessor, 00944 class MaskIterator, class MaskAccessor, 00945 class KernelIterator, class KernelAccessor> 00946 inline 00947 void normalizedConvolveImage( 00948 triple<SrcIterator, SrcIterator, SrcAccessor> src, 00949 pair<MaskIterator, MaskAccessor> mask, 00950 pair<DestIterator, DestAccessor> dest, 00951 tuple5<KernelIterator, KernelAccessor, Diff2D, Diff2D, 00952 BorderTreatmentMode> kernel) 00953 { 00954 normalizedConvolveImage(src.first, src.second, src.third, 00955 mask.first, mask.second, 00956 dest.first, dest.second, 00957 kernel.first, kernel.second, kernel.third, 00958 kernel.fourth, kernel.fifth); 00959 } 00960 00961 /** \brief Deprecated name of 2-dimensional normalized convolution, i.e. convolution with a mask image. 00962 00963 See \ref normalizedConvolveImage() for documentation. 00964 00965 <b> Declarations:</b> 00966 00967 pass arguments explicitly: 00968 \code 00969 namespace vigra { 00970 template <class SrcIterator, class SrcAccessor, 00971 class MaskIterator, class MaskAccessor, 00972 class DestIterator, class DestAccessor, 00973 class KernelIterator, class KernelAccessor> 00974 void 00975 convolveImageWithMask(SrcIterator src_ul, SrcIterator src_lr, SrcAccessor src_acc, 00976 MaskIterator mul, MaskAccessor am, 00977 DestIterator dest_ul, DestAccessor dest_acc, 00978 KernelIterator ki, KernelAccessor ak, 00979 Diff2D kul, Diff2D klr, BorderTreatmentMode border); 00980 } 00981 \endcode 00982 00983 00984 use argument objects in conjunction with \ref ArgumentObjectFactories : 00985 \code 00986 namespace vigra { 00987 template <class SrcIterator, class SrcAccessor, 00988 class MaskIterator, class MaskAccessor, 00989 class DestIterator, class DestAccessor, 00990 class KernelIterator, class KernelAccessor> 00991 void convolveImageWithMask(triple<SrcIterator, SrcIterator, SrcAccessor> src, 00992 pair<MaskIterator, MaskAccessor> mask, 00993 pair<DestIterator, DestAccessor> dest, 00994 tuple5<KernelIterator, KernelAccessor, Diff2D, Diff2D, 00995 BorderTreatmentMode> kernel); 00996 } 00997 \endcode 00998 */ 00999 doxygen_overloaded_function(template <...> void convolveImageWithMask) 01000 01001 template <class SrcIterator, class SrcAccessor, 01002 class DestIterator, class DestAccessor, 01003 class MaskIterator, class MaskAccessor, 01004 class KernelIterator, class KernelAccessor> 01005 inline void 01006 convolveImageWithMask(SrcIterator src_ul, SrcIterator src_lr, SrcAccessor src_acc, 01007 MaskIterator mul, MaskAccessor am, 01008 DestIterator dest_ul, DestAccessor dest_acc, 01009 KernelIterator ki, KernelAccessor ak, 01010 Diff2D kul, Diff2D klr, BorderTreatmentMode border) 01011 { 01012 normalizedConvolveImage(src_ul, src_lr, src_acc, 01013 mul, am, 01014 dest_ul, dest_acc, 01015 ki, ak, kul, klr, border); 01016 } 01017 01018 template <class SrcIterator, class SrcAccessor, 01019 class DestIterator, class DestAccessor, 01020 class MaskIterator, class MaskAccessor, 01021 class KernelIterator, class KernelAccessor> 01022 inline 01023 void convolveImageWithMask( 01024 triple<SrcIterator, SrcIterator, SrcAccessor> src, 01025 pair<MaskIterator, MaskAccessor> mask, 01026 pair<DestIterator, DestAccessor> dest, 01027 tuple5<KernelIterator, KernelAccessor, Diff2D, Diff2D, 01028 BorderTreatmentMode> kernel) 01029 { 01030 normalizedConvolveImage(src.first, src.second, src.third, 01031 mask.first, mask.second, 01032 dest.first, dest.second, 01033 kernel.first, kernel.second, kernel.third, 01034 kernel.fourth, kernel.fifth); 01035 } 01036 01037 //@} 01038 01039 /********************************************************/ 01040 /* */ 01041 /* Kernel2D */ 01042 /* */ 01043 /********************************************************/ 01044 01045 /** \brief Generic 2 dimensional convolution kernel. 01046 01047 This kernel may be used for convolution of 2 dimensional signals. 01048 01049 Convolution functions access the kernel via an ImageIterator 01050 which they get by calling \ref center(). This iterator 01051 points to the center of the kernel. The kernel's size is given by its upperLeft() 01052 (upperLeft().x <= 0, upperLeft().y <= 0) 01053 and lowerRight() (lowerRight().x >= 0, lowerRight().y >= 0) methods. 01054 The desired border treatment mode is returned by borderTreatment(). 01055 (Note that the \ref StandardConvolution "2D convolution functions" don't currently 01056 support all modes.) 01057 01058 The different init functions create a kernel with the specified 01059 properties. The requirements for the kernel's value_type depend 01060 on the init function used. At least NumericTraits must be defined. 01061 01062 The kernel defines a factory function kernel2d() to create an argument object 01063 (see \ref KernelArgumentObjectFactories). 01064 01065 <b> Usage:</b> 01066 01067 <b>\#include</b> <<a href="stdconvolution_8hxx-source.html">vigra/stdconvolution.hxx</a>><br> 01068 Namespace: vigra 01069 01070 \code 01071 vigra::FImage src(w,h), dest(w,h); 01072 ... 01073 01074 // define horizontal Sobel filter 01075 vigra::Kernel2D<float> sobel; 01076 01077 sobel.initExplicitly(Diff2D(-1,-1), Diff2D(1,1)) = // upper left and lower right 01078 0.125, 0.0, -0.125, 01079 0.25, 0.0, -0.25, 01080 0.125, 0.0, -0.125; 01081 01082 vigra::convolveImage(srcImageRange(src), destImage(dest), kernel2d(sobel)); 01083 \endcode 01084 01085 <b> Required Interface:</b> 01086 01087 \code 01088 value_type v = NumericTraits<value_type>::one(); 01089 \endcode 01090 01091 See also the init functions. 01092 */ 01093 template <class ARITHTYPE> 01094 class Kernel2D 01095 { 01096 public: 01097 /** the kernel's value type 01098 */ 01099 typedef ARITHTYPE value_type; 01100 01101 /** 2D random access iterator over the kernel's values 01102 */ 01103 typedef typename BasicImage<value_type>::traverser Iterator; 01104 01105 /** const 2D random access iterator over the kernel's values 01106 */ 01107 typedef typename BasicImage<value_type>::const_traverser ConstIterator; 01108 01109 /** the kernel's accessor 01110 */ 01111 typedef typename BasicImage<value_type>::Accessor Accessor; 01112 01113 /** the kernel's const accessor 01114 */ 01115 typedef typename BasicImage<value_type>::ConstAccessor ConstAccessor; 01116 01117 struct InitProxy 01118 { 01119 typedef typename 01120 BasicImage<value_type>::ScanOrderIterator Iterator; 01121 01122 InitProxy(Iterator i, int count, value_type & norm) 01123 : iter_(i), base_(i), 01124 count_(count), sum_(count), 01125 norm_(norm) 01126 {} 01127 01128 ~InitProxy() 01129 { 01130 vigra_precondition(count_ == 1 || count_ == sum_, 01131 "Kernel2D::initExplicitly(): " 01132 "Too few init values."); 01133 } 01134 01135 InitProxy & operator,(value_type const & v) 01136 { 01137 if(count_ == sum_) norm_ = *iter_; 01138 01139 --count_; 01140 vigra_precondition(count_ > 0, 01141 "Kernel2D::initExplicitly(): " 01142 "Too many init values."); 01143 01144 norm_ += v; 01145 01146 ++iter_; 01147 *iter_ = v; 01148 01149 return *this; 01150 } 01151 01152 Iterator iter_, base_; 01153 int count_, sum_; 01154 value_type & norm_; 01155 }; 01156 01157 static value_type one() { return NumericTraits<value_type>::one(); } 01158 01159 /** Default constructor. 01160 Creates a kernel of size 1x1 which would copy the signal 01161 unchanged. 01162 */ 01163 Kernel2D() 01164 : kernel_(1, 1, one()), 01165 left_(0, 0), 01166 right_(0, 0), 01167 norm_(one()), 01168 border_treatment_(BORDER_TREATMENT_CLIP) 01169 {} 01170 01171 /** Copy constructor. 01172 */ 01173 Kernel2D(Kernel2D const & k) 01174 : kernel_(k.kernel_), 01175 left_(k.left_), 01176 right_(k.right_), 01177 norm_(k.norm_), 01178 border_treatment_(k.border_treatment_) 01179 {} 01180 01181 /** Copy assignment. 01182 */ 01183 Kernel2D & operator=(Kernel2D const & k) 01184 { 01185 if(this != &k) 01186 { 01187 kernel_ = k.kernel_; 01188 left_ = k.left_; 01189 right_ = k.right_; 01190 norm_ = k.norm_; 01191 border_treatment_ = k.border_treatment_; 01192 } 01193 return *this; 01194 } 01195 01196 /** Initialization. 01197 This initializes the kernel with the given constant. The norm becomes 01198 v*width()*height(). 01199 01200 Instead of a single value an initializer list of length width()*height() 01201 can be used like this: 01202 01203 \code 01204 vigra::Kernel2D<float> binom; 01205 01206 binom.initExplicitly(Diff2D(-1,-1), Diff2D(1,1)) = 01207 0.0625, 0.125, 0.0625, 01208 0.125, 0.25, 0.125, 01209 0.0625, 0.125, 0.0625; 01210 \endcode 01211 01212 In this case, the norm will be set to the sum of the init values. 01213 An initializer list of wrong length will result in a run-time error. 01214 */ 01215 InitProxy operator=(value_type const & v) 01216 { 01217 int size = (right_.x - left_.x + 1) * 01218 (right_.y - left_.y + 1); 01219 kernel_ = v; 01220 norm_ = (double)size*v; 01221 01222 return InitProxy(kernel_.begin(), size, norm_); 01223 } 01224 01225 /** Destructor. 01226 */ 01227 ~Kernel2D() 01228 {} 01229 01230 /** Init the 2D kernel as the cartesian product of two 1D kernels 01231 of type \ref Kernel1D. The norm becomes the product of the two original 01232 norms. 01233 01234 <b> Required Interface:</b> 01235 01236 The kernel's value_type must be a linear algebra. 01237 01238 \code 01239 vigra::Kernel2D<...>::value_type v; 01240 v = v * v; 01241 \endcode 01242 */ 01243 void initSeparable(Kernel1D<value_type> & kx, 01244 Kernel1D<value_type> & ky) 01245 { 01246 left_ = Diff2D(kx.left(), ky.left()); 01247 right_ = Diff2D(kx.right(), ky.right()); 01248 int w = right_.x - left_.x + 1; 01249 int h = right_.y - left_.y + 1; 01250 kernel_.resize(w, h); 01251 01252 norm_ = kx.norm() * ky.norm(); 01253 01254 typedef typename Kernel1D<value_type>::Iterator KIter; 01255 typename Kernel1D<value_type>::Accessor ka; 01256 01257 KIter kiy = ky.center() + left_.y; 01258 Iterator iy = center() + left_; 01259 01260 for(int y=left_.y; y<=right_.y; ++y, ++kiy, ++iy.y) 01261 { 01262 KIter kix = kx.center() + left_.x; 01263 Iterator ix = iy; 01264 for(int x=left_.x; x<=right_.x; ++x, ++kix, ++ix.x) 01265 { 01266 *ix = ka(kix) * ka(kiy); 01267 } 01268 } 01269 } 01270 01271 /** Init the 2D kernel as the cartesian product of two 1D kernels 01272 given explicitly by iterators and sizes. The norm becomes the 01273 sum of the resulting kernel values. 01274 01275 <b> Required Interface:</b> 01276 01277 The kernel's value_type must be a linear algebra. 01278 01279 \code 01280 vigra::Kernel2D<...>::value_type v; 01281 v = v * v; 01282 v += v; 01283 \endcode 01284 01285 <b> Preconditions:</b> 01286 01287 \code 01288 xleft <= 0; 01289 xright >= 0; 01290 yleft <= 0; 01291 yright >= 0; 01292 \endcode 01293 */ 01294 template <class KernelIterator> 01295 void initSeparable(KernelIterator kxcenter, int xleft, int xright, 01296 KernelIterator kycenter, int yleft, int yright) 01297 { 01298 vigra_precondition(xleft <= 0 && yleft <= 0, 01299 "Kernel2D::initSeparable(): left borders must be <= 0."); 01300 vigra_precondition(xright >= 0 && yright >= 0, 01301 "Kernel2D::initSeparable(): right borders must be >= 0."); 01302 01303 left_ = Point2D(xleft, yleft); 01304 right_ = Point2D(xright, yright); 01305 01306 int w = right_.x - left_.x + 1; 01307 int h = right_.y - left_.y + 1; 01308 kernel_.resize(w, h); 01309 01310 KernelIterator kiy = kycenter + left_.y; 01311 Iterator iy = center() + left_; 01312 01313 for(int y=left_.y; y<=right_.y; ++y, ++kiy, ++iy.y) 01314 { 01315 KernelIterator kix = kxcenter + left_.x; 01316 Iterator ix = iy; 01317 for(int x=left_.x; x<=right_.x; ++x, ++kix, ++ix.x) 01318 { 01319 *ix = *kix * *kiy; 01320 } 01321 } 01322 01323 typename BasicImage<value_type>::iterator i = kernel_.begin(); 01324 typename BasicImage<value_type>::iterator iend = kernel_.end(); 01325 norm_ = *i; 01326 ++i; 01327 01328 for(; i!= iend; ++i) 01329 { 01330 norm_ += *i; 01331 } 01332 } 01333 01334 /** Init the 2D kernel as a circular averaging filter. The norm will be 01335 calculated as 01336 <TT>NumericTraits<value_type>::one() / (number of non-zero kernel values)</TT>. 01337 The kernel's value_type must be a linear space. 01338 01339 <b> Required Interface:</b> 01340 01341 \code 01342 value_type v = vigra::NumericTraits<value_type>::one(); 01343 01344 double d; 01345 v = d * v; 01346 \endcode 01347 01348 <b> Precondition:</b> 01349 01350 \code 01351 radius > 0; 01352 \endcode 01353 */ 01354 void initDisk(int radius) 01355 { 01356 vigra_precondition(radius > 0, 01357 "Kernel2D::initDisk(): radius must be > 0."); 01358 01359 left_ = Point2D(-radius, -radius); 01360 right_ = Point2D(radius, radius); 01361 int w = right_.x - left_.x + 1; 01362 int h = right_.y - left_.y + 1; 01363 kernel_.resize(w, h); 01364 norm_ = NumericTraits<value_type>::one(); 01365 01366 kernel_ = NumericTraits<value_type>::zero(); 01367 double count = 0.0; 01368 01369 Iterator k = center(); 01370 double r2 = (double)radius*radius; 01371 01372 int i; 01373 for(i=0; i<= radius; ++i) 01374 { 01375 double r = (double) i - 0.5; 01376 int w = (int)(VIGRA_CSTD::sqrt(r2 - r*r) + 0.5); 01377 for(int j=-w; j<=w; ++j) 01378 { 01379 k(j, i) = NumericTraits<value_type>::one(); 01380 k(j, -i) = NumericTraits<value_type>::one(); 01381 count += (i != 0) ? 2.0 : 1.0; 01382 } 01383 } 01384 01385 count = 1.0 / count; 01386 01387 for(int y=-radius; y<=radius; ++y) 01388 { 01389 for(int x=-radius; x<=radius; ++x) 01390 { 01391 k(x,y) = count * k(x,y); 01392 } 01393 } 01394 } 01395 01396 /** Init the kernel by an explicit initializer list. 01397 The upper left and lower right corners of the kernel must be passed. 01398 A comma-separated initializer list is given after the assignment operator. 01399 This function is used like this: 01400 01401 \code 01402 // define horizontal Sobel filter 01403 vigra::Kernel2D<float> sobel; 01404 01405 sobel.initExplicitly(Diff2D(-1,-1), Diff2D(1,1)) = 01406 0.125, 0.0, -0.125, 01407 0.25, 0.0, -0.25, 01408 0.125, 0.0, -0.125; 01409 \endcode 01410 01411 The norm is set to the sum of the initialzer values. If the wrong number of 01412 values is given, a run-time error results. It is, however, possible to give 01413 just one initializer. This creates an averaging filter with the given constant: 01414 01415 \code 01416 vigra::Kernel2D<float> average3x3; 01417 01418 average3x3.initExplicitly(Diff2D(-1,-1), Diff2D(1,1)) = 1.0/9.0; 01419 \endcode 01420 01421 Here, the norm is set to value*width()*height(). 01422 01423 <b> Preconditions:</b> 01424 01425 \code 01426 1. upperleft.x <= 0; 01427 2. upperleft.y <= 0; 01428 3. lowerright.x >= 0; 01429 4. lowerright.y >= 0; 01430 5. the number of values in the initializer list 01431 is 1 or equals the size of the kernel. 01432 \endcode 01433 */ 01434 Kernel2D & initExplicitly(Diff2D upperleft, Diff2D lowerright) 01435 { 01436 vigra_precondition(upperleft.x <= 0 && upperleft.y <= 0, 01437 "Kernel2D::initExplicitly(): left borders must be <= 0."); 01438 vigra_precondition(lowerright.x >= 0 && lowerright.y >= 0, 01439 "Kernel2D::initExplicitly(): right borders must be >= 0."); 01440 01441 left_ = Point2D(upperleft); 01442 right_ = Point2D(lowerright); 01443 01444 int w = right_.x - left_.x + 1; 01445 int h = right_.y - left_.y + 1; 01446 kernel_.resize(w, h); 01447 01448 return *this; 01449 } 01450 01451 /** Coordinates of the upper left corner of the kernel. 01452 */ 01453 Point2D upperLeft() const { return left_; } 01454 01455 /** Coordinates of the lower right corner of the kernel. 01456 */ 01457 Point2D lowerRight() const { return right_; } 01458 01459 /** Width of the kernel. 01460 */ 01461 int width() const { return right_.x - left_.x + 1; } 01462 01463 /** Height of the kernel. 01464 */ 01465 int height() const { return right_.y - left_.y + 1; } 01466 01467 /** ImageIterator that points to the center of the kernel (coordinate (0,0)). 01468 */ 01469 Iterator center() { return kernel_.upperLeft() - left_; } 01470 01471 /** ImageIterator that points to the center of the kernel (coordinate (0,0)). 01472 */ 01473 ConstIterator center() const { return kernel_.upperLeft() - left_; } 01474 01475 /** Access kernel entry at given position. 01476 */ 01477 value_type & operator()(int x, int y) 01478 { return kernel_[Diff2D(x,y) - left_]; } 01479 01480 /** Read kernel entry at given position. 01481 */ 01482 value_type operator()(int x, int y) const 01483 { return kernel_[Diff2D(x,y) - left_]; } 01484 01485 /** Access kernel entry at given position. 01486 */ 01487 value_type & operator[](Diff2D const & d) 01488 { return kernel_[d - left_]; } 01489 01490 /** Read kernel entry at given position. 01491 */ 01492 value_type operator[](Diff2D const & d) const 01493 { return kernel_[d - left_]; } 01494 01495 /** Norm of the kernel (i.e. sum of its elements). 01496 */ 01497 value_type norm() const { return norm_; } 01498 01499 /** The kernels default accessor. 01500 */ 01501 Accessor accessor() { return Accessor(); } 01502 01503 /** The kernels default const accessor. 01504 */ 01505 ConstAccessor accessor() const { return ConstAccessor(); } 01506 01507 /** Normalize the kernel to the given value. (The norm is the sum of all kernel 01508 elements.) The kernel's value_type must be a division algebra or 01509 algebraic field. 01510 01511 <b> Required Interface:</b> 01512 01513 \code 01514 value_type v = vigra::NumericTraits<value_type>::one(); // if norm is not 01515 // given explicitly 01516 01517 v += v; 01518 v = v * v; 01519 v = v / v; 01520 \endcode 01521 */ 01522 void normalize(value_type norm) 01523 { 01524 typename BasicImage<value_type>::iterator i = kernel_.begin(); 01525 typename BasicImage<value_type>::iterator iend = kernel_.end(); 01526 typename NumericTraits<value_type>::RealPromote sum = *i; 01527 ++i; 01528 01529 for(; i!= iend; ++i) 01530 { 01531 sum += *i; 01532 } 01533 01534 sum = norm / sum; 01535 i = kernel_.begin(); 01536 for(; i != iend; ++i) 01537 { 01538 *i = *i * sum; 01539 } 01540 01541 norm_ = norm; 01542 } 01543 01544 /** Normalize the kernel to norm 1. 01545 */ 01546 void normalize() 01547 { 01548 normalize(one()); 01549 } 01550 01551 /** current border treatment mode 01552 */ 01553 BorderTreatmentMode borderTreatment() const 01554 { return border_treatment_; } 01555 01556 /** Set border treatment mode. 01557 Only <TT>BORDER_TREATMENT_CLIP</TT> and <TT>BORDER_TREATMENT_AVOID</TT> are currently 01558 allowed. 01559 */ 01560 void setBorderTreatment( BorderTreatmentMode new_mode) 01561 { 01562 vigra_precondition((new_mode == BORDER_TREATMENT_CLIP || 01563 new_mode == BORDER_TREATMENT_AVOID || 01564 new_mode == BORDER_TREATMENT_REFLECT || 01565 new_mode == BORDER_TREATMENT_REPEAT || 01566 new_mode == BORDER_TREATMENT_WRAP), 01567 "convolveImage():\n" 01568 " Border treatment must be one of follow treatments:\n" 01569 " - BORDER_TREATMENT_CLIP\n" 01570 " - BORDER_TREATMENT_AVOID\n" 01571 " - BORDER_TREATMENT_REFLECT\n" 01572 " - BORDER_TREATMENT_REPEAT\n" 01573 " - BORDER_TREATMENT_WRAP\n"); 01574 01575 border_treatment_ = new_mode; 01576 } 01577 01578 01579 private: 01580 BasicImage<value_type> kernel_; 01581 Point2D left_, right_; 01582 value_type norm_; 01583 BorderTreatmentMode border_treatment_; 01584 }; 01585 01586 /**************************************************************/ 01587 /* */ 01588 /* Argument object factories for Kernel2D */ 01589 /* */ 01590 /* (documentation: see vigra/convolution.hxx) */ 01591 /* */ 01592 /**************************************************************/ 01593 01594 template <class KernelIterator, class KernelAccessor> 01595 inline 01596 tuple5<KernelIterator, KernelAccessor, Diff2D, Diff2D, BorderTreatmentMode> 01597 kernel2d(KernelIterator ik, KernelAccessor ak, Diff2D kul, Diff2D klr, 01598 BorderTreatmentMode border) 01599 01600 { 01601 return 01602 tuple5<KernelIterator, KernelAccessor, Diff2D, Diff2D, BorderTreatmentMode> ( 01603 ik, ak, kul, klr, border); 01604 } 01605 01606 template <class T> 01607 inline 01608 tuple5<typename Kernel2D<T>::ConstIterator, 01609 typename Kernel2D<T>::ConstAccessor, 01610 Diff2D, Diff2D, BorderTreatmentMode> 01611 kernel2d(Kernel2D<T> const & k) 01612 01613 { 01614 return 01615 tuple5<typename Kernel2D<T>::ConstIterator, 01616 typename Kernel2D<T>::ConstAccessor, 01617 Diff2D, Diff2D, BorderTreatmentMode>( 01618 k.center(), 01619 k.accessor(), 01620 k.upperLeft(), k.lowerRight(), 01621 k.borderTreatment()); 01622 } 01623 01624 template <class T> 01625 inline 01626 tuple5<typename Kernel2D<T>::ConstIterator, 01627 typename Kernel2D<T>::ConstAccessor, 01628 Diff2D, Diff2D, BorderTreatmentMode> 01629 kernel2d(Kernel2D<T> const & k, BorderTreatmentMode border) 01630 01631 { 01632 return 01633 tuple5<typename Kernel2D<T>::ConstIterator, 01634 typename Kernel2D<T>::ConstAccessor, 01635 Diff2D, Diff2D, BorderTreatmentMode>( 01636 k.center(), 01637 k.accessor(), 01638 k.upperLeft(), k.lowerRight(), 01639 border); 01640 } 01641 01642 01643 } // namespace vigra 01644 01645 #endif // VIGRA_STDCONVOLUTION_HXX
© Ullrich Köthe (ullrich.koethe@iwr.uni-heidelberg.de) |
html generated using doxygen and Python
|