00001
00002 #include "vil_gauss_reduce.h"
00003
00004
00005
00006
00007
00008 #include <vcl_cmath.h>
00009 #include <vcl_cassert.h>
00010 #include <vxl_config.h>
00011 #include <vnl/vnl_erf.h>
00012 #include <vnl/vnl_math.h>
00013
00014
00015
00016
00017
00018 VCL_DEFINE_SPECIALIZATION
00019 void vil_gauss_reduce_1plane(const vxl_byte* src_im,
00020 unsigned src_ni, unsigned src_nj,
00021 vcl_ptrdiff_t s_x_step, vcl_ptrdiff_t s_y_step,
00022 vxl_byte* dest_im,
00023 vcl_ptrdiff_t d_x_step, vcl_ptrdiff_t d_y_step)
00024 {
00025 vxl_byte* d_row = dest_im;
00026 const vxl_byte* s_row = src_im;
00027 vcl_ptrdiff_t sxs2 = s_x_step*2;
00028 unsigned ni2 = (src_ni-3)/2;
00029 for (unsigned y=0;y<src_nj;++y)
00030 {
00031
00032 *d_row = static_cast<vxl_byte>(
00033 vnl_math_rnd(0.071f * s_row[sxs2]
00034 + 0.357f * s_row[s_x_step]
00035 + 0.572f * s_row[0]));
00036
00037 vxl_byte * d = d_row + d_x_step;
00038 const vxl_byte* s = s_row + sxs2;
00039 for (unsigned x=0;x<ni2;++x)
00040 {
00041 *d = static_cast<vxl_byte>(
00042 vnl_math_rnd(0.05*s[-sxs2] + 0.05*s[sxs2]
00043 + 0.25*s[-s_x_step]+ 0.25*s[s_x_step]
00044 + 0.4*s[0]));
00045
00046 d += d_x_step;
00047 s += sxs2;
00048 }
00049
00050 *d = static_cast<vxl_byte>(
00051 vnl_math_rnd(0.071f * s[-sxs2]
00052 + 0.357f * s[-s_x_step]
00053 + 0.572f * s[0]));
00054
00055 d_row += d_y_step;
00056 s_row += s_y_step;
00057 }
00058 }
00059
00060
00061
00062
00063 VCL_DEFINE_SPECIALIZATION
00064 void vil_gauss_reduce_1plane(const float* src_im,
00065 unsigned src_ni, unsigned src_nj,
00066 vcl_ptrdiff_t s_x_step, vcl_ptrdiff_t s_y_step,
00067 float* dest_im, vcl_ptrdiff_t d_x_step, vcl_ptrdiff_t d_y_step)
00068 {
00069 float* d_row = dest_im;
00070 const float* s_row = src_im;
00071 vcl_ptrdiff_t sxs2 = s_x_step*2;
00072 unsigned ni2 = (src_ni-3)/2;
00073 for (unsigned y=0;y<src_nj;++y)
00074 {
00075
00076 *d_row = 0.071f * s_row[sxs2]
00077 + 0.357f * s_row[s_x_step]
00078 + 0.572f * s_row[0];
00079 float * d = d_row + d_x_step;
00080 const float* s = s_row + sxs2;
00081 for (unsigned x=0;x<ni2;++x)
00082 {
00083 *d = 0.05f*(s[-sxs2] + s[sxs2])
00084 + 0.25f*(s[-s_x_step]+ s[s_x_step])
00085 + 0.40f*s[0];
00086
00087 d += d_x_step;
00088 s += sxs2;
00089 }
00090
00091 *d = 0.071f * s[-sxs2]
00092 + 0.357f * s[-s_x_step]
00093 + 0.572f * s[0];
00094
00095 d_row += d_y_step;
00096 s_row += s_y_step;
00097 }
00098 }
00099
00100
00101
00102
00103
00104 VCL_DEFINE_SPECIALIZATION
00105 void vil_gauss_reduce_1plane(const double* src_im,
00106 unsigned src_ni, unsigned src_nj,
00107 vcl_ptrdiff_t s_x_step, vcl_ptrdiff_t s_y_step,
00108 double* dest_im, vcl_ptrdiff_t d_x_step, vcl_ptrdiff_t d_y_step)
00109 {
00110 double* d_row = dest_im;
00111 const double* s_row = src_im;
00112 vcl_ptrdiff_t sxs2 = s_x_step*2;
00113 unsigned ni2 = (src_ni-3)/2;
00114 for (unsigned y=0;y<src_nj;++y)
00115 {
00116
00117 *d_row = 0.071 * s_row[sxs2]
00118 + 0.357 * s_row[s_x_step]
00119 + 0.572 * s_row[0];
00120 double * d = d_row + d_x_step;
00121 const double* s = s_row + sxs2;
00122 for (unsigned x=0;x<ni2;++x)
00123 {
00124 *d = 0.05f*(s[-sxs2] + s[sxs2])
00125 + 0.25f*(s[-s_x_step]+ s[s_x_step])
00126 + 0.40f*s[0];
00127
00128 d += d_x_step;
00129 s += sxs2;
00130 }
00131
00132 *d = 0.071f * s[-sxs2]
00133 + 0.357f * s[-s_x_step]
00134 + 0.572f * s[0];
00135
00136 d_row += d_y_step;
00137 s_row += s_y_step;
00138 }
00139 }
00140
00141
00142
00143
00144 VCL_DEFINE_SPECIALIZATION
00145 void vil_gauss_reduce_1plane(const int* src_im,
00146 unsigned src_ni, unsigned src_nj,
00147 vcl_ptrdiff_t s_x_step, vcl_ptrdiff_t s_y_step,
00148 int* dest_im,
00149 vcl_ptrdiff_t d_x_step, vcl_ptrdiff_t d_y_step)
00150 {
00151 int* d_row = dest_im;
00152 const int* s_row = src_im;
00153 vcl_ptrdiff_t sxs2 = s_x_step*2;
00154 unsigned ni2 = (src_ni-3)/2;
00155 for (unsigned y=0;y<src_nj;++y)
00156 {
00157
00158 *d_row = vnl_math_rnd(0.071f * s_row[sxs2]
00159 + 0.357f * s_row[s_x_step]
00160 + 0.572f * s_row[0]);
00161
00162 int * d = d_row + d_x_step;
00163 const int* s = s_row + sxs2;
00164 for (unsigned x=0;x<ni2;++x)
00165 {
00166 *d = vnl_math_rnd(0.05*s[-sxs2] + 0.25*s[-s_x_step] +
00167 0.05*s[ sxs2] + 0.25*s[ s_x_step] +
00168 0.4 *s[0]);
00169
00170 d += d_x_step;
00171 s += sxs2;
00172 }
00173
00174 *d = vnl_math_rnd(0.071f * s[-sxs2]
00175 + 0.357f * s[-s_x_step]
00176 + 0.572f * s[0]);
00177
00178 d_row += d_y_step;
00179 s_row += s_y_step;
00180 }
00181 }
00182
00183
00184
00185
00186 VCL_DEFINE_SPECIALIZATION
00187 void vil_gauss_reduce_1plane(const vxl_int_16* src_im,
00188 unsigned src_ni, unsigned src_nj,
00189 vcl_ptrdiff_t s_x_step, vcl_ptrdiff_t s_y_step,
00190 vxl_int_16* dest_im,
00191 vcl_ptrdiff_t d_x_step, vcl_ptrdiff_t d_y_step)
00192 {
00193 vxl_int_16* d_row = dest_im;
00194 const vxl_int_16* s_row = src_im;
00195 vcl_ptrdiff_t sxs2 = s_x_step*2;
00196 unsigned ni2 = (src_ni-3)/2;
00197 for (unsigned y=0;y<src_nj;++y)
00198 {
00199
00200 *d_row = static_cast<vxl_int_16>(
00201 vnl_math_rnd(0.071f * s_row[sxs2]
00202 + 0.357f * s_row[s_x_step]
00203 + 0.572f * s_row[0]));
00204
00205 vxl_int_16 * d = d_row + d_x_step;
00206 const vxl_int_16* s = s_row + sxs2;
00207 for (unsigned x=0;x<ni2;++x)
00208 {
00209
00210 *d = vxl_int_16(0.5 + 0.05*s[-sxs2] + 0.25*s[-s_x_step]
00211 + 0.05*s[ sxs2] + 0.25*s[ s_x_step]
00212 + 0.4 *s[0]);
00213
00214 d += d_x_step;
00215 s += sxs2;
00216 }
00217
00218 *d = static_cast<vxl_int_16>(
00219 vnl_math_rnd(0.071f * s[-sxs2]
00220 + 0.357f * s[-s_x_step]
00221 + 0.572f * s[0]));
00222
00223 d_row += d_y_step;
00224 s_row += s_y_step;
00225 }
00226 }
00227
00228
00229
00230
00231
00232
00233 VCL_DEFINE_SPECIALIZATION
00234 void vil_gauss_reduce_2_3_1plane(const float* src_im,
00235 unsigned src_ni, unsigned src_nj,
00236 vcl_ptrdiff_t s_x_step, vcl_ptrdiff_t s_y_step,
00237 float* dest_im, vcl_ptrdiff_t d_x_step, vcl_ptrdiff_t d_y_step)
00238 {
00239 float* d_row = dest_im;
00240 const float* s_row = src_im;
00241 vcl_ptrdiff_t sxs2 = s_x_step*2,sxs3 = s_x_step*3;
00242 unsigned d_ni = (2*src_ni+1)/3;
00243 unsigned d_ni2 = d_ni/2;
00244 for (unsigned y=0;y<src_nj;++y)
00245 {
00246
00247 d_row[0] = 0.75f*s_row[0] + 0.25f*s_row[s_x_step];
00248 d_row[d_x_step] = 0.5f*s_row[s_x_step] + 0.5f*s_row[sxs2];
00249 float * d = d_row + 2*d_x_step;
00250 const float* s = s_row + sxs3;
00251 for (unsigned x=1;x<d_ni2;++x)
00252 {
00253 *d = 0.2f*(s[-s_x_step] + s[s_x_step])+0.6f*s[0];
00254 d += d_x_step;
00255 *d = 0.5f*(s[s_x_step] + s[sxs2]);
00256 d += d_x_step;
00257 s += sxs3;
00258 }
00259
00260 if (src_ni%3==1) *d=0.75f*s[-s_x_step] + 0.25f*s[0];
00261 else
00262 if (src_ni%3==2) *d=0.2f*(s[-s_x_step] + s[s_x_step])+0.6f*s[0];
00263
00264 d_row += d_y_step;
00265 s_row += s_y_step;
00266 }
00267 }
00268
00269
00270
00271
00272
00273
00274 VCL_DEFINE_SPECIALIZATION
00275 void vil_gauss_reduce_2_3_1plane(const double* src_im,
00276 unsigned src_ni, unsigned src_nj,
00277 vcl_ptrdiff_t s_x_step, vcl_ptrdiff_t s_y_step,
00278 double* dest_im, vcl_ptrdiff_t d_x_step, vcl_ptrdiff_t d_y_step)
00279 {
00280 double* d_row = dest_im;
00281 const double* s_row = src_im;
00282 vcl_ptrdiff_t sxs2 = s_x_step*2,sxs3 = s_x_step*3;
00283 unsigned d_ni = (2*src_ni+1)/3;
00284 unsigned d_ni2 = d_ni/2;
00285 for (unsigned y=0;y<src_nj;++y)
00286 {
00287
00288 d_row[0] = 0.75*s_row[0] + 0.25*s_row[s_x_step];
00289 d_row[d_x_step] = 0.5*s_row[s_x_step] + 0.5*s_row[sxs2];
00290 double * d = d_row + 2*d_x_step;
00291 const double* s = s_row + sxs3;
00292 for (unsigned x=1;x<d_ni2;++x)
00293 {
00294 *d = 0.2*(s[-s_x_step] + s[s_x_step])+0.6*s[0];
00295 d += d_x_step;
00296 *d = 0.5*(s[s_x_step] + s[sxs2]);
00297 d += d_x_step;
00298 s += sxs3;
00299 }
00300
00301 if (src_ni%3==1) *d=0.75*s[-s_x_step] + 0.25*s[0];
00302 else
00303 if (src_ni%3==2) *d=0.2*(s[-s_x_step] + s[s_x_step])+0.6*s[0];
00304
00305 d_row += d_y_step;
00306 s_row += s_y_step;
00307 }
00308 }
00309
00310
00311
00312
00313
00314
00315 VCL_DEFINE_SPECIALIZATION
00316 void vil_gauss_reduce_2_3_1plane(const vxl_byte* src_im,
00317 unsigned src_ni, unsigned src_nj,
00318 vcl_ptrdiff_t s_x_step, vcl_ptrdiff_t s_y_step,
00319 vxl_byte* dest_im, vcl_ptrdiff_t d_x_step, vcl_ptrdiff_t d_y_step)
00320 {
00321 vxl_byte* d_row = dest_im;
00322 const vxl_byte* s_row = src_im;
00323 vcl_ptrdiff_t sxs2 = s_x_step*2,sxs3 = s_x_step*3;
00324 unsigned d_ni = (2*src_ni+1)/3;
00325 unsigned d_ni2 = d_ni/2;
00326 for (unsigned y=0;y<src_nj;++y)
00327 {
00328
00329
00330 d_row[0] = vxl_byte(0.5f + 0.75f*s_row[0] + 0.25f*s_row[s_x_step]);
00331 d_row[d_x_step] = vxl_byte(0.5f + 0.5f*s_row[s_x_step] + 0.5f*s_row[sxs2]);
00332 vxl_byte * d = d_row + 2*d_x_step;
00333 const vxl_byte* s = s_row + sxs3;
00334 for (unsigned x=1;x<d_ni2;++x)
00335 {
00336 *d = vxl_byte(0.5f + 0.2f*(s[-s_x_step] + s[s_x_step]) + 0.6f*s[0]);
00337 d += d_x_step;
00338 *d = vxl_byte(0.5f + 0.5f*(s[s_x_step] + s[sxs2]));
00339 d += d_x_step;
00340 s += sxs3;
00341 }
00342
00343 if (src_ni%3==1)
00344 *d = vxl_byte(0.5f + 0.75f*s[-s_x_step] + 0.25f*s[0]);
00345 else if (src_ni%3==2)
00346 *d = vxl_byte(0.5f + 0.2f*(s[-s_x_step] + s[s_x_step]) + 0.6f*s[0]);
00347
00348 d_row += d_y_step;
00349 s_row += s_y_step;
00350 }
00351 }
00352
00353
00354
00355
00356
00357
00358
00359 VCL_DEFINE_SPECIALIZATION
00360 void vil_gauss_reduce_2_3_1plane(const int* src_im,
00361 unsigned src_ni, unsigned src_nj,
00362 vcl_ptrdiff_t s_x_step, vcl_ptrdiff_t s_y_step,
00363 int* dest_im, vcl_ptrdiff_t d_x_step, vcl_ptrdiff_t d_y_step)
00364 {
00365 int* d_row = dest_im;
00366 const int* s_row = src_im;
00367 vcl_ptrdiff_t sxs2 = s_x_step*2,sxs3 = s_x_step*3;
00368 unsigned d_ni = (2*src_ni+1)/3;
00369 unsigned d_ni2 = d_ni/2;
00370 for (unsigned y=0;y<src_nj;++y)
00371 {
00372
00373
00374 d_row[0] = int(0.5f + 0.75f*s_row[0] + 0.25f*s_row[s_x_step]);
00375 d_row[d_x_step] = int(0.5f + 0.5f*s_row[s_x_step] + 0.5f*s_row[sxs2]);
00376 int * d = d_row + 2*d_x_step;
00377 const int* s = s_row + sxs3;
00378 for (unsigned x=1;x<d_ni2;++x)
00379 {
00380 *d = int(0.5f + 0.2f*(s[-s_x_step] + s[s_x_step]) + 0.6f*s[0]);
00381 d += d_x_step;
00382 *d = int(0.5f + 0.5f*(s[s_x_step] + s[sxs2]));
00383 d += d_x_step;
00384 s += sxs3;
00385 }
00386
00387 if (src_ni%3==1)
00388 *d = int(0.5f + 0.75f*s[-s_x_step] + 0.25f*s[0]);
00389 else if (src_ni%3==2)
00390 *d = int(0.5f + 0.2f*(s[-s_x_step] + s[s_x_step]) + 0.6f*s[0]);
00391
00392 d_row += d_y_step;
00393 s_row += s_y_step;
00394 }
00395 }
00396
00397
00398
00399
00400
00401
00402 VCL_DEFINE_SPECIALIZATION
00403 void vil_gauss_reduce_2_3_1plane(const vxl_int_16* src_im,
00404 unsigned src_ni, unsigned src_nj,
00405 vcl_ptrdiff_t s_x_step, vcl_ptrdiff_t s_y_step,
00406 vxl_int_16* dest_im, vcl_ptrdiff_t d_x_step, vcl_ptrdiff_t d_y_step)
00407 {
00408 vxl_int_16* d_row = dest_im;
00409 const vxl_int_16* s_row = src_im;
00410 vcl_ptrdiff_t sxs2 = s_x_step*2,sxs3 = s_x_step*3;
00411 unsigned d_ni = (2*src_ni+1)/3;
00412 unsigned d_ni2 = d_ni/2;
00413 for (unsigned y=0;y<src_nj;++y)
00414 {
00415
00416
00417 d_row[0] = vxl_int_16(0.5f + 0.75f*s_row[0] + 0.25f*s_row[s_x_step]);
00418 d_row[d_x_step] = vxl_int_16(0.5f + 0.5f*s_row[s_x_step] + 0.5f*s_row[sxs2]);
00419 vxl_int_16 * d = d_row + 2*d_x_step;
00420 const vxl_int_16* s = s_row + sxs3;
00421 for (unsigned x=1;x<d_ni2;++x)
00422 {
00423 *d = vxl_int_16(0.5f + 0.2f*(s[-s_x_step] + s[s_x_step]) + 0.6f*s[0]);
00424 d += d_x_step;
00425 *d = vxl_int_16(0.5f + 0.5f*(s[s_x_step] + s[sxs2]));
00426 d += d_x_step;
00427 s += sxs3;
00428 }
00429
00430 if (src_ni%3==1)
00431 *d = vxl_int_16(0.5f + 0.75f*s[-s_x_step] + 0.25f*s[0]);
00432 else if (src_ni%3==2)
00433 *d = vxl_int_16(0.5f + 0.2f*(s[-s_x_step] + s[s_x_step]) + 0.6f*s[0]);
00434
00435 d_row += d_y_step;
00436 s_row += s_y_step;
00437 }
00438 }
00439
00440
00441
00442 VCL_DEFINE_SPECIALIZATION
00443 void vil_gauss_reduce_121_1plane(const vxl_byte* src_im,
00444 unsigned src_ni, unsigned src_nj,
00445 vcl_ptrdiff_t s_x_step, vcl_ptrdiff_t s_y_step,
00446 vxl_byte* dest_im, vcl_ptrdiff_t d_x_step, vcl_ptrdiff_t d_y_step)
00447 {
00448 vcl_ptrdiff_t sxs2 = s_x_step*2;
00449 vcl_ptrdiff_t sys2 = s_y_step*2;
00450 vxl_byte* d_row = dest_im+d_y_step;
00451 const vxl_byte* s_row1 = src_im + s_y_step;
00452 const vxl_byte* s_row2 = s_row1 + s_y_step;
00453 const vxl_byte* s_row3 = s_row2 + s_y_step;
00454 unsigned ni2 = (src_ni-2)/2;
00455 unsigned nj2 = (src_nj-2)/2;
00456 for (unsigned y=0;y<nj2;++y)
00457 {
00458
00459 *d_row = *s_row2;
00460 vxl_byte * d = d_row + d_x_step;
00461 const vxl_byte* s1 = s_row1 + sxs2;
00462 const vxl_byte* s2 = s_row2 + sxs2;
00463 const vxl_byte* s3 = s_row3 + sxs2;
00464 for (unsigned x=0;x<ni2;++x)
00465 {
00466
00467
00468 *d = vxl_byte( 0.0625f * s1[-s_x_step] + 0.125f * s1[0] + 0.0625f * s1[s_x_step]
00469 + 0.1250f * s2[-s_x_step] + 0.250f * s2[0] + 0.1250f * s2[s_x_step]
00470 + 0.0625f * s3[-s_x_step] + 0.125f * s3[0] + 0.0625f * s3[s_x_step] +0.5);
00471
00472 d += d_x_step;
00473 s1 += sxs2;
00474 s2 += sxs2;
00475 s3 += sxs2;
00476 }
00477
00478 if (src_ni&1)
00479 *d = *s2;
00480
00481 d_row += d_y_step;
00482 s_row1 += sys2;
00483 s_row2 += sys2;
00484 s_row3 += sys2;
00485 }
00486
00487
00488
00489
00490 const vxl_byte* s0 = src_im;
00491 unsigned ni=(src_ni+1)/2;
00492 for (unsigned i=0;i<ni;++i)
00493 {
00494 dest_im[i]= *s0;
00495 s0+=sxs2;
00496 }
00497
00498 if (src_nj&1)
00499 {
00500 unsigned yhi = (src_nj-1)/2;
00501 vxl_byte* dest_last_row = dest_im + yhi*d_y_step;
00502 const vxl_byte* s_last = src_im + yhi*sys2;
00503 for (unsigned i=0;i<ni;++i)
00504 {
00505 dest_last_row[i]= *s_last;
00506 s_last+=sxs2;
00507 }
00508 }
00509 }
00510
00511
00512
00513
00514 VCL_DEFINE_SPECIALIZATION
00515 void vil_gauss_reduce_121_1plane(const float* src_im,
00516 unsigned src_ni, unsigned src_nj,
00517 vcl_ptrdiff_t s_x_step, vcl_ptrdiff_t s_y_step,
00518 float* dest_im,
00519 vcl_ptrdiff_t d_x_step, vcl_ptrdiff_t d_y_step)
00520 {
00521 vcl_ptrdiff_t sxs2 = s_x_step*2;
00522 vcl_ptrdiff_t sys2 = s_y_step*2;
00523 float* d_row = dest_im+d_y_step;
00524 const float* s_row1 = src_im + s_y_step;
00525 const float* s_row2 = s_row1 + s_y_step;
00526 const float* s_row3 = s_row2 + s_y_step;
00527 unsigned ni2 = (src_ni-2)/2;
00528 unsigned nj2 = (src_nj-2)/2;
00529 for (unsigned y=0;y<nj2;++y)
00530 {
00531
00532 *d_row = *s_row2;
00533 float * d = d_row + d_x_step;
00534 const float* s1 = s_row1 + sxs2;
00535 const float* s2 = s_row2 + sxs2;
00536 const float* s3 = s_row3 + sxs2;
00537 for (unsigned x=0;x<ni2;++x)
00538 {
00539
00540 *d = 0.0625f * s1[-s_x_step] + 0.125f * s1[0] + 0.0625f * s1[s_x_step]
00541 + 0.1250f * s2[-s_x_step] + 0.250f * s2[0] + 0.1250f * s2[s_x_step]
00542 + 0.0625f * s3[-s_x_step] + 0.125f * s3[0] + 0.0625f * s3[s_x_step];
00543
00544 d += d_x_step;
00545 s1 += sxs2;
00546 s2 += sxs2;
00547 s3 += sxs2;
00548 }
00549
00550 if (src_ni&1)
00551 *d = *s2;
00552
00553 d_row += d_y_step;
00554 s_row1 += sys2;
00555 s_row2 += sys2;
00556 s_row3 += sys2;
00557 }
00558
00559
00560
00561
00562 const float* s0 = src_im;
00563 unsigned ni=(src_ni+1)/2;
00564 for (unsigned i=0;i<ni;++i)
00565 {
00566 dest_im[i]= *s0;
00567 s0+=sxs2;
00568 }
00569
00570 if (src_nj&1)
00571 {
00572 unsigned yhi = (src_nj-1)/2;
00573 float* dest_last_row = dest_im + yhi*d_y_step;
00574 const float* s_last = src_im + yhi*sys2;
00575 for (unsigned i=0;i<ni;++i)
00576 {
00577 dest_last_row[i]= *s_last;
00578 s_last+=sxs2;
00579 }
00580 }
00581 }
00582
00583
00584
00585
00586 VCL_DEFINE_SPECIALIZATION
00587 void vil_gauss_reduce_121_1plane(const double* src_im,
00588 unsigned src_ni, unsigned src_nj,
00589 vcl_ptrdiff_t s_x_step, vcl_ptrdiff_t s_y_step,
00590 double* dest_im,
00591 vcl_ptrdiff_t d_x_step, vcl_ptrdiff_t d_y_step)
00592 {
00593 vcl_ptrdiff_t sxs2 = s_x_step*2;
00594 vcl_ptrdiff_t sys2 = s_y_step*2;
00595 double* d_row = dest_im+d_y_step;
00596 const double* s_row1 = src_im + s_y_step;
00597 const double* s_row2 = s_row1 + s_y_step;
00598 const double* s_row3 = s_row2 + s_y_step;
00599 unsigned ni2 = (src_ni-2)/2;
00600 unsigned nj2 = (src_nj-2)/2;
00601 for (unsigned y=0;y<nj2;++y)
00602 {
00603
00604 *d_row = *s_row2;
00605 double * d = d_row + d_x_step;
00606 const double* s1 = s_row1 + sxs2;
00607 const double* s2 = s_row2 + sxs2;
00608 const double* s3 = s_row3 + sxs2;
00609 for (unsigned x=0;x<ni2;++x)
00610 {
00611
00612 *d = 0.0625 * s1[-s_x_step] + 0.125 * s1[0] + 0.0625 * s1[s_x_step]
00613 + 0.1250 * s2[-s_x_step] + 0.250 * s2[0] + 0.1250 * s2[s_x_step]
00614 + 0.0625 * s3[-s_x_step] + 0.125 * s3[0] + 0.0625 * s3[s_x_step];
00615
00616 d += d_x_step;
00617 s1 += sxs2;
00618 s2 += sxs2;
00619 s3 += sxs2;
00620 }
00621
00622 if (src_ni&1)
00623 *d = *s2;
00624
00625 d_row += d_y_step;
00626 s_row1 += sys2;
00627 s_row2 += sys2;
00628 s_row3 += sys2;
00629 }
00630
00631
00632
00633
00634 const double* s0 = src_im;
00635 unsigned ni=(src_ni+1)/2;
00636 for (unsigned i=0;i<ni;++i)
00637 {
00638 dest_im[i]= *s0;
00639 s0+=sxs2;
00640 }
00641
00642 if (src_nj&1)
00643 {
00644 unsigned yhi = (src_nj-1)/2;
00645 double* dest_last_row = dest_im + yhi*d_y_step;
00646 const double* s_last = src_im + yhi*sys2;
00647 for (unsigned i=0;i<ni;++i)
00648 {
00649 dest_last_row[i]= *s_last;
00650 s_last+=sxs2;
00651 }
00652 }
00653 }
00654
00655
00656
00657
00658 VCL_DEFINE_SPECIALIZATION
00659 void vil_gauss_reduce_121_1plane(const int* src_im,
00660 unsigned src_ni, unsigned src_nj,
00661 vcl_ptrdiff_t s_x_step, vcl_ptrdiff_t s_y_step,
00662 int* dest_im,
00663 vcl_ptrdiff_t d_x_step, vcl_ptrdiff_t d_y_step)
00664 {
00665 vcl_ptrdiff_t sxs2 = s_x_step*2;
00666 vcl_ptrdiff_t sys2 = s_y_step*2;
00667 int* d_row = dest_im+d_y_step;
00668 const int* s_row1 = src_im + s_y_step;
00669 const int* s_row2 = s_row1 + s_y_step;
00670 const int* s_row3 = s_row2 + s_y_step;
00671 unsigned ni2 = (src_ni-2)/2;
00672 unsigned nj2 = (src_nj-2)/2;
00673 for (unsigned y=0;y<nj2;++y)
00674 {
00675
00676 *d_row = *s_row2;
00677 int * d = d_row + d_x_step;
00678 const int* s1 = s_row1 + sxs2;
00679 const int* s2 = s_row2 + sxs2;
00680 const int* s3 = s_row3 + sxs2;
00681 for (unsigned x=0;x<ni2;++x)
00682 {
00683
00684
00685 *d = int( 0.0625f * s1[-s_x_step] + 0.125f * s1[0] + 0.0625f * s1[s_x_step]
00686 + 0.1250f * s2[-s_x_step] + 0.250f * s2[0] + 0.1250f * s2[s_x_step]
00687 + 0.0625f * s3[-s_x_step] + 0.125f * s3[0] + 0.0625f * s3[s_x_step] +0.5);
00688
00689 d += d_x_step;
00690 s1 += sxs2;
00691 s2 += sxs2;
00692 s3 += sxs2;
00693 }
00694
00695 if (src_ni&1)
00696 *d = *s2;
00697
00698 d_row += d_y_step;
00699 s_row1 += sys2;
00700 s_row2 += sys2;
00701 s_row3 += sys2;
00702 }
00703
00704
00705
00706
00707 const int* s0 = src_im;
00708 unsigned ni=(src_ni+1)/2;
00709 for (unsigned i=0;i<ni;++i)
00710 {
00711 dest_im[i]= *s0;
00712 s0+=sxs2;
00713 }
00714
00715 if (src_nj&1)
00716 {
00717 unsigned yhi = (src_nj-1)/2;
00718 int* dest_last_row = dest_im + yhi*d_y_step;
00719 const int* s_last = src_im + yhi*sys2;
00720 for (unsigned i=0;i<ni;++i)
00721 {
00722 dest_last_row[i]= *s_last;
00723 s_last+=sxs2;
00724 }
00725 }
00726 }
00727
00728
00729
00730 VCL_DEFINE_SPECIALIZATION
00731 void vil_gauss_reduce_121_1plane(const vxl_int_16* src_im,
00732 unsigned src_ni, unsigned src_nj,
00733 vcl_ptrdiff_t s_x_step, vcl_ptrdiff_t s_y_step,
00734 vxl_int_16* dest_im,
00735 vcl_ptrdiff_t d_x_step, vcl_ptrdiff_t d_y_step)
00736 {
00737 vcl_ptrdiff_t sxs2 = s_x_step*2;
00738 vcl_ptrdiff_t sys2 = s_y_step*2;
00739 vxl_int_16* d_row = dest_im+d_y_step;
00740 const vxl_int_16* s_row1 = src_im + s_y_step;
00741 const vxl_int_16* s_row2 = s_row1 + s_y_step;
00742 const vxl_int_16* s_row3 = s_row2 + s_y_step;
00743 unsigned ni2 = (src_ni-2)/2;
00744 unsigned nj2 = (src_nj-2)/2;
00745 for (unsigned y=0;y<nj2;++y)
00746 {
00747
00748 *d_row = *s_row2;
00749 vxl_int_16 * d = d_row + d_x_step;
00750 const vxl_int_16* s1 = s_row1 + sxs2;
00751 const vxl_int_16* s2 = s_row2 + sxs2;
00752 const vxl_int_16* s3 = s_row3 + sxs2;
00753 for (unsigned x=0;x<ni2;++x)
00754 {
00755
00756
00757 *d = vxl_int_16( 0.0625f * s1[-s_x_step] + 0.125f * s1[0] + 0.0625f * s1[s_x_step]
00758 + 0.1250f * s2[-s_x_step] + 0.250f * s2[0] + 0.1250f * s2[s_x_step]
00759 + 0.0625f * s3[-s_x_step] + 0.125f * s3[0] + 0.0625f * s3[s_x_step] +0.5);
00760
00761 d += d_x_step;
00762 s1 += sxs2;
00763 s2 += sxs2;
00764 s3 += sxs2;
00765 }
00766
00767 if (src_ni&1)
00768 *d = *s2;
00769
00770 d_row += d_y_step;
00771 s_row1 += sys2;
00772 s_row2 += sys2;
00773 s_row3 += sys2;
00774 }
00775
00776
00777
00778
00779 const vxl_int_16* s0 = src_im;
00780 unsigned ni=(src_ni+1)/2;
00781 for (unsigned i=0;i<ni;++i)
00782 {
00783 dest_im[i]= *s0;
00784 s0+=sxs2;
00785 }
00786
00787 if (src_nj&1)
00788 {
00789 unsigned yhi = (src_nj-1)/2;
00790 vxl_int_16* dest_last_row = dest_im + yhi*d_y_step;
00791 const vxl_int_16* s_last = src_im + yhi*sys2;
00792 for (unsigned i=0;i<ni;++i)
00793 {
00794 dest_last_row[i]= *s_last;
00795 s_last+=sxs2;
00796 }
00797 }
00798 }
00799
00800
00801 vil_gauss_reduce_params::vil_gauss_reduce_params(double scaleStep)
00802 {
00803 assert(scaleStep> 1.0 && scaleStep<=2.0);
00804 scale_step_ = scaleStep;
00805
00806
00807 double z = 1/vcl_sqrt(2.0*(scaleStep-1.0));
00808 filt0_ = vnl_erf(0.5 * z) - vnl_erf(-0.5 * z);
00809 filt1_ = vnl_erf(1.5 * z) - vnl_erf(0.5 * z);
00810 filt2_ = vnl_erf(2.5 * z) - vnl_erf(1.5 * z);
00811
00812 double five_tap_total = 2*(filt2_ + filt1_) + filt0_;
00813 #if 0
00814 double four_tap_total = filt2_ + 2*(filt1_) + filt0_;
00815 double three_tap_total = filt2_ + filt1_ + filt0_;
00816 #endif
00817
00818
00819 filt_edge0_ = (filt0_ + filt1_ + filt2_) / five_tap_total;
00820 filt_edge1_ = filt1_ / five_tap_total;
00821 filt_edge2_ = filt2_ / five_tap_total;
00822 #if 0
00823 filt_edge0_ = 1.0;
00824 filt_edge1_ = 0.0;
00825 filt_edge2_ = 0.0;
00826 #endif
00827
00828 filt_pen_edge_n1_ = (filt1_+filt2_) / five_tap_total;
00829 filt_pen_edge0_ = filt0_ / five_tap_total;
00830 filt_pen_edge1_ = filt1_ / five_tap_total;
00831 filt_pen_edge2_ = filt2_ / five_tap_total;
00832
00833
00834 filt0_ = filt0_ / five_tap_total;
00835 filt1_ = filt1_ / five_tap_total;
00836 filt2_ = filt2_ / five_tap_total;
00837
00838 assert(filt_edge0_ > filt_edge1_);
00839 assert(filt_edge1_ > filt_edge2_);
00840 }